diff --git a/.gitignore b/.gitignore index 5797d0cd11..771f84d72f 100644 --- a/.gitignore +++ b/.gitignore @@ -41,6 +41,9 @@ installer/x86_64/platforms/ # Misc. files asic_config_checksum files/Aboot/boot0 +files/dsc/MANIFEST +files/dsc/install_debian +files/dsc/fs.zip files/initramfs-tools/arista-convertfs files/initramfs-tools/union-mount diff --git a/build_debian.sh b/build_debian.sh index 991969040b..8a0d0be392 100755 --- a/build_debian.sh +++ b/build_debian.sh @@ -50,8 +50,8 @@ TRUSTED_GPG_DIR=$BUILD_TOOL_PATH/trusted.gpg.d echo "Error: Invalid ONIE_IMAGE_PART_SIZE in onie image config file" exit 1 } -[ -n "$ONIE_INSTALLER_PAYLOAD" ] || { - echo "Error: Invalid ONIE_INSTALLER_PAYLOAD in onie image config file" +[ -n "$INSTALLER_PAYLOAD" ] || { + echo "Error: Invalid INSTALLER_PAYLOAD in onie image config file" exit 1 } [ -n "$FILESYSTEM_SQUASHFS" ] || { @@ -753,14 +753,27 @@ sudo chroot $FILESYSTEM_ROOT update-initramfs -u ## Convert initrd image to u-boot format if [[ $TARGET_BOOTLOADER == uboot ]]; then INITRD_FILE=initrd.img-${LINUX_KERNEL_VERSION}-${CONFIGURED_ARCH} + KERNEL_FILE=vmlinuz-${LINUX_KERNEL_VERSION}-${CONFIGURED_ARCH} if [[ $CONFIGURED_ARCH == armhf ]]; then INITRD_FILE=initrd.img-${LINUX_KERNEL_VERSION}-armmp sudo LANG=C chroot $FILESYSTEM_ROOT mkimage -A arm -O linux -T ramdisk -C gzip -d /boot/$INITRD_FILE /boot/u${INITRD_FILE} ## Overwriting the initrd image with uInitrd sudo LANG=C chroot $FILESYSTEM_ROOT mv /boot/u${INITRD_FILE} /boot/$INITRD_FILE elif [[ $CONFIGURED_ARCH == arm64 ]]; then - sudo cp -v $PLATFORM_DIR/${sonic_asic_platform}-${CONFIGURED_ARCH}/sonic_fit.its $FILESYSTEM_ROOT/boot/ - sudo LANG=C chroot $FILESYSTEM_ROOT mkimage -f /boot/sonic_fit.its /boot/sonic_${CONFIGURED_ARCH}.fit + if [[ $CONFIGURED_PLATFORM == pensando ]]; then + ## copy device tree file into boot (XXX: need to compile dtb from dts) + sudo cp -v $PLATFORM_DIR/pensando/elba-asic-psci.dtb $FILESYSTEM_ROOT/boot/ + ## make kernel as gzip file + sudo LANG=C chroot $FILESYSTEM_ROOT gzip /boot/${KERNEL_FILE} + sudo LANG=C chroot $FILESYSTEM_ROOT mv /boot/${KERNEL_FILE}.gz /boot/${KERNEL_FILE} + ## Convert initrd image to u-boot format + sudo LANG=C chroot $FILESYSTEM_ROOT mkimage -A arm64 -O linux -T ramdisk -C gzip -d /boot/$INITRD_FILE /boot/u${INITRD_FILE} + ## Overwriting the initrd image with uInitrd + sudo LANG=C chroot $FILESYSTEM_ROOT mv /boot/u${INITRD_FILE} /boot/$INITRD_FILE + else + sudo cp -v $PLATFORM_DIR/${sonic_asic_platform}-${CONFIGURED_ARCH}/sonic_fit.its $FILESYSTEM_ROOT/boot/ + sudo LANG=C chroot $FILESYSTEM_ROOT mkimage -f /boot/sonic_fit.its /boot/sonic_${CONFIGURED_ARCH}.fit + fi fi fi @@ -811,7 +824,7 @@ if [[ "$CHANGE_DEFAULT_PASSWORD" == "y" ]]; then fi ## Compress most file system into squashfs file -sudo rm -f $ONIE_INSTALLER_PAYLOAD $FILESYSTEM_SQUASHFS +sudo rm -f $INSTALLER_PAYLOAD $FILESYSTEM_SQUASHFS ## Output the file system total size for diag purpose ## Note: -x to skip directories on different file systems, such as /proc sudo du -hsx $FILESYSTEM_ROOT @@ -856,5 +869,5 @@ fi pushd $FILESYSTEM_ROOT && sudo tar -I $GZ_COMPRESS_PROGRAM -cf $OLDPWD/$FILESYSTEM_DOCKERFS -C ${DOCKERFS_PATH}var/lib/docker .; popd ## Compress together with /boot, /var/lib/docker and $PLATFORM_DIR as an installer payload zip file -pushd $FILESYSTEM_ROOT && sudo tar -I $GZ_COMPRESS_PROGRAM -cf platform.tar.gz -C $PLATFORM_DIR . && sudo zip -n .gz $OLDPWD/$ONIE_INSTALLER_PAYLOAD -r boot/ platform.tar.gz; popd -sudo zip -g -n .squashfs:.gz $ONIE_INSTALLER_PAYLOAD $FILESYSTEM_SQUASHFS $FILESYSTEM_DOCKERFS +pushd $FILESYSTEM_ROOT && sudo tar -I $GZ_COMPRESS_PROGRAM -cf platform.tar.gz -C $PLATFORM_DIR . && sudo zip -n .gz $OLDPWD/$INSTALLER_PAYLOAD -r boot/ platform.tar.gz; popd +sudo zip -g -n .squashfs:.gz $INSTALLER_PAYLOAD $FILESYSTEM_SQUASHFS $FILESYSTEM_DOCKERFS diff --git a/build_image.sh b/build_image.sh index 0b735f612e..7fafba29ef 100755 --- a/build_image.sh +++ b/build_image.sh @@ -18,8 +18,8 @@ fi echo "Error: Invalid ONIE_IMAGE_PART_SIZE in onie image config file" exit 1 } -[ -n "$ONIE_INSTALLER_PAYLOAD" ] || { - echo "Error: Invalid ONIE_INSTALLER_PAYLOAD in onie image config file" +[ -n "$INSTALLER_PAYLOAD" ] || { + echo "Error: Invalid INSTALLER_PAYLOAD in onie image config file" exit 1 } @@ -86,7 +86,7 @@ generate_onie_installer_image() ## Note: Don't leave blank between lines. It is single line command. ./onie-mk-demo.sh $CONFIGURED_ARCH $TARGET_MACHINE $TARGET_PLATFORM-$TARGET_MACHINE-$ONIEIMAGE_VERSION \ installer platform/$TARGET_MACHINE/platform.conf $output_file OS $IMAGE_VERSION $ONIE_IMAGE_PART_SIZE \ - $ONIE_INSTALLER_PAYLOAD $SECURE_UPGRADE_SIGNING_CERT $SECURE_UPGRADE_DEV_SIGNING_KEY + $INSTALLER_PAYLOAD $SECURE_UPGRADE_SIGNING_CERT $SECURE_UPGRADE_DEV_SIGNING_KEY } # Generate asic-specific device list @@ -175,7 +175,7 @@ elif [ "$IMAGE_TYPE" = "aboot" ]; then sudo rm -f $OUTPUT_ABOOT_IMAGE sudo rm -f $ABOOT_BOOT_IMAGE ## Add main payload - cp $ONIE_INSTALLER_PAYLOAD $OUTPUT_ABOOT_IMAGE + cp $INSTALLER_PAYLOAD $OUTPUT_ABOOT_IMAGE ## Add Aboot boot0 file j2 -f env files/Aboot/boot0.j2 ./onie-image.conf > files/Aboot/boot0 sed -i -e "s/%%IMAGE_VERSION%%/$IMAGE_VERSION/g" files/Aboot/boot0 @@ -213,6 +213,38 @@ elif [ "$IMAGE_TYPE" = "aboot" ]; then [ -f "$CA_CERT" ] && cp "$CA_CERT" "$TARGET_CA_CERT" ./scripts/sign_image.sh -i "$OUTPUT_ABOOT_IMAGE" -k "$SIGNING_KEY" -c "$SIGNING_CERT" -a "$TARGET_CA_CERT" fi + +elif [ "$IMAGE_TYPE" = "dsc" ]; then + echo "Build DSC installer" + + dsc_installer_dir=files/dsc + dsc_installer=$dsc_installer_dir/install_debian + dsc_installer_manifest=$dsc_installer_dir/MANIFEST + + mkdir -p `dirname $OUTPUT_DSC_IMAGE` + sudo rm -f $OUTPUT_DSC_IMAGE + + source ./onie-image.conf + + j2 $dsc_installer.j2 > $dsc_installer + export installer_sha=$(sha512sum "$dsc_installer" | awk '{print $1}') + + export build_date=$(date -u) + export build_user=$(id -un) + export installer_payload_sha=$(sha512sum "$INSTALLER_PAYLOAD" | awk '{print $1}') + j2 $dsc_installer_manifest.j2 > $dsc_installer_manifest + + cp $INSTALLER_PAYLOAD $dsc_installer_dir + tar cf $OUTPUT_DSC_IMAGE -C files/dsc $(basename $dsc_installer_manifest) $INSTALLER_PAYLOAD $(basename $dsc_installer) + + echo "Build ONIE installer" + mkdir -p `dirname $OUTPUT_ONIE_IMAGE` + sudo rm -f $OUTPUT_ONIE_IMAGE + + generate_device_list "./installer/platforms_asic" + + generate_onie_installer_image + else echo "Error: Non supported image type $IMAGE_TYPE" exit 1 diff --git a/device/pensando/arm64-elba-asic-r0/Pensando-elba/port_config.ini b/device/pensando/arm64-elba-asic-r0/Pensando-elba/port_config.ini new file mode 100644 index 0000000000..70d1ec4f53 --- /dev/null +++ b/device/pensando/arm64-elba-asic-r0/Pensando-elba/port_config.ini @@ -0,0 +1,3 @@ +# name lanes alias speed autoneg fec +Ethernet1 0,1,2,3 Ethernet1 100000 on rs +Ethernet2 4,5,6,7 Ethernet2 100000 on rs diff --git a/device/pensando/arm64-elba-asic-r0/Pensando-elba/sai.profile b/device/pensando/arm64-elba-asic-r0/Pensando-elba/sai.profile new file mode 100644 index 0000000000..e69de29bb2 diff --git a/device/pensando/arm64-elba-asic-r0/default_sku b/device/pensando/arm64-elba-asic-r0/default_sku new file mode 100644 index 0000000000..732fd35092 --- /dev/null +++ b/device/pensando/arm64-elba-asic-r0/default_sku @@ -0,0 +1 @@ +Pensando-elba t1 diff --git a/device/pensando/arm64-elba-asic-r0/platform_asic b/device/pensando/arm64-elba-asic-r0/platform_asic new file mode 100644 index 0000000000..a2396ec3d5 --- /dev/null +++ b/device/pensando/arm64-elba-asic-r0/platform_asic @@ -0,0 +1 @@ +pensando diff --git a/device/pensando/arm64-elba-asic-r0/plugins/ssd_util.py b/device/pensando/arm64-elba-asic-r0/plugins/ssd_util.py new file mode 100644 index 0000000000..6ca016e20b --- /dev/null +++ b/device/pensando/arm64-elba-asic-r0/plugins/ssd_util.py @@ -0,0 +1,100 @@ +# +# ssd_generic.py +# +# Generic implementation of the SSD health API +# SSD models supported: +# - InnoDisk +# - StorFly +# - Virtium + +try: + import re + import subprocess + from sonic_platform_base.sonic_ssd.ssd_base import SsdBase +except ImportError as e: + raise ImportError (str(e) + "- required module not found") + +NOT_AVAILABLE = "N/A" +MMC_DATA_PATH = "/sys/class/mmc_host/mmc0/mmc0:0001/{}" + +class SsdUtil(SsdBase): + """ + Generic implementation of the SSD health API + """ + model = NOT_AVAILABLE + serial = NOT_AVAILABLE + firmware = NOT_AVAILABLE + temperature = NOT_AVAILABLE + health = NOT_AVAILABLE + ssd_info = NOT_AVAILABLE + vendor_ssd_info = NOT_AVAILABLE + + def __init__(self, diskdev): + + self.dev = diskdev + try: + self.model = ("emmc {}".format(open(MMC_DATA_PATH.format("name")).read())).replace("\n", "") + self.serial = open(MMC_DATA_PATH.format("serial")).read().replace("\n", "") + self.firmware = open(MMC_DATA_PATH.format("fwrev")).read().replace("\n", "") + value = open(MMC_DATA_PATH.format("life_time")).read().replace("\n", "") + [lifetime_a, lifetime_b] = [int(val, 16) for val in value.split()] + lifetime = lifetime_a if lifetime_a >= lifetime_b else lifetime_b + self.health = float(100 - (lifetime*10)) + except: + pass + + def get_health(self): + """ + Retrieves current disk health in percentages + + Returns: + A float number of current ssd health + e.g. 83.5 + """ + return self.health + + def get_temperature(self): + """ + Retrieves current disk temperature in Celsius + + Returns: + A float number of current temperature in Celsius + e.g. 40.1 + """ + return self.temperature + + def get_model(self): + """ + Retrieves model for the given disk device + + Returns: + A string holding disk model as provided by the manufacturer + """ + return self.model + + def get_firmware(self): + """ + Retrieves firmware version for the given disk device + + Returns: + A string holding disk firmware version as provided by the manufacturer + """ + return self.firmware + + def get_serial(self): + """ + Retrieves serial number for the given disk device + + Returns: + A string holding disk serial number as provided by the manufacturer + """ + return self.serial + + def get_vendor_output(self): + """ + Retrieves vendor specific data for the given disk device + + Returns: + A string holding some vendor specific disk information + """ + return self.vendor_ssd_info \ No newline at end of file diff --git a/device/pensando/arm64-elba-asic-r0/pmon_daemon_control.json b/device/pensando/arm64-elba-asic-r0/pmon_daemon_control.json new file mode 100644 index 0000000000..f8dfa401ad --- /dev/null +++ b/device/pensando/arm64-elba-asic-r0/pmon_daemon_control.json @@ -0,0 +1,10 @@ +{ + "skip_thermalctld": true, + "skip_fancontrol": true, + "skip_ledd": true, + "skip_psud": true, + "skip_syseepromd": false, + "skip_xcvrd": true, + "skip_chassis_db_init": false, + "skip_pcied": true +} \ No newline at end of file diff --git a/dockers/docker-orchagent/orchagent.sh b/dockers/docker-orchagent/orchagent.sh index f407e266ca..883864dbd9 100755 --- a/dockers/docker-orchagent/orchagent.sh +++ b/dockers/docker-orchagent/orchagent.sh @@ -64,6 +64,9 @@ elif [ "$platform" == "mellanox" ]; then ORCHAGENT_ARGS+="" elif [ "$platform" == "innovium" ]; then ORCHAGENT_ARGS+="-m $MAC_ADDRESS" +elif [ "$platform" == "pensando" ]; then + MAC_ADDRESS=$(ip link property add dev oob_mnic0 altname eth0; ip link show oob_mnic0 | grep ether | awk '{print $2}') + ORCHAGENT_ARGS+="-m $MAC_ADDRESS" else # Should we use the fallback MAC in case it is not found in Device.Metadata ORCHAGENT_ARGS+="-m $MAC_ADDRESS" diff --git a/files/build_templates/sonic_debian_extension.j2 b/files/build_templates/sonic_debian_extension.j2 index 75bc1eca50..ed52539e2a 100644 --- a/files/build_templates/sonic_debian_extension.j2 +++ b/files/build_templates/sonic_debian_extension.j2 @@ -31,6 +31,7 @@ IMAGE_DISTRO=$3 set -x -e CONFIGURED_ARCH=$([ -f .arch ] && cat .arch || echo amd64) +CONFIGURED_PLATFORM=$([ -f .platform ] && cat .platform || echo generic) . functions.sh BUILD_SCRIPTS_DIR=files/build_scripts @@ -762,6 +763,14 @@ sudo LANG=C DOCKER_HOST="$DOCKER_HOST" chroot $FILESYSTEM_ROOT docker tag {{imag fi {% endfor %} +if [[ $CONFIGURED_PLATFORM == pensando ]]; then +#Disable rc.local +sudo LANG=C chroot $FILESYSTEM_ROOT chmod -x /etc/rc.local +sudo cp files/dsc/dpu.service $FILESYSTEM_ROOT_USR_LIB_SYSTEMD_SYSTEM/ +sudo cp files/dsc/dpu.init $FILESYSTEM_ROOT/etc/init.d/dpu +sudo LANG=C chroot $FILESYSTEM_ROOT systemctl enable dpu.service +fi + SONIC_PACKAGE_MANAGER_FOLDER="/var/lib/sonic-package-manager/" sudo mkdir -p $FILESYSTEM_ROOT/$SONIC_PACKAGE_MANAGER_FOLDER target_machine="$TARGET_MACHINE" j2 $BUILD_TEMPLATES/packages.json.j2 | sudo tee $FILESYSTEM_ROOT/$SONIC_PACKAGE_MANAGER_FOLDER/packages.json diff --git a/files/dsc/MANIFEST.j2 b/files/dsc/MANIFEST.j2 new file mode 100644 index 0000000000..d6aadf1980 --- /dev/null +++ b/files/dsc/MANIFEST.j2 @@ -0,0 +1,20 @@ +{ + "metadata_version": 1, + "package_version": 2, + "asic_compat": "elba", + "build_date": "{{ build_date }}", + "build_user": "{{ build_user }}", + "installer": { + "name": "install_debian", + "verify": { + "algorithm": "sha512", + "hash": "{{ installer_sha }}" + } + }, + "shas": { + "fs.zip": "{{ installer_payload_sha }}" + }, + "package_compat": { + "board_policy": "accept" + } +} diff --git a/files/dsc/dpu.init b/files/dsc/dpu.init new file mode 100755 index 0000000000..09e6562c50 --- /dev/null +++ b/files/dsc/dpu.init @@ -0,0 +1,85 @@ +#!/bin/bash + +# {C} Copyright 2023 AMD Systems Inc. All rights reserved + +# This script starts/stops dpu sw + + +### BEGIN INIT INFO +# Provides: load-dpu +# Required-Start: +# Required-Stop: +# Should-Start: +# Should-Stop: +# Default-Start: S +# Default-Stop: 0 6 +# Short-Description: Load dpu sw +### END INIT INFO +ACTIVE_FILE="/boot/active.txt" +NIC_MOUNT="" +LOG_FILE="/tmp/active_nic" +TAG="latest" +HOST_DIR=/host/dpu + +function start_dpu() +{ + modprobe ionic_mnic + modprobe mnet_uio_pdrv_genirq + modprobe mdev + + mkdir -p $HOST_DIR/update + mkdir -p $HOST_DIR/sysconfig/config0 + mkdir -p $HOST_DIR/sysconfig/config1 + mkdir -p $HOST_DIR/obfl + mkdir -p $HOST_DIR/data + mkdir -p $HOST_DIR/tmpfsshare + mkdir -p $HOST_DIR/runfs + mkdir -p $HOST_DIR/logfs + mount -t tmpfs -o size=20M,mode=1777 tmpfs $HOST_DIR/tmpfsshare + mount -t tmpfs -o size=20M,mode=0755 runs $HOST_DIR/runfs + mount -t tmpfs -o size=20M,mode=0755 logfs $HOST_DIR/logfs + + if [ -f "$ACTIVE_FILE" ]; then + ACTIVE_CONTENTS=$(cat "$ACTIVE_FILE") + ACTIVE_NIC=$(echo "$ACTIVE_CONTENTS" | cut -d " " -f 8-) + if [ "$ACTIVE_NIC" = "/boot/nicA" ]; then + NIC_MOUNT="-v /dev/shm:/dev/shm -v /boot/nicA/nic_core:/nic -v /boot/nicA/shared/conf/gen:/nic/conf/gen" + elif [ "$ACTIVE_NIC" = "/boot/nicB" ]; then + NIC_MOUNT="-v /dev/shm:/dev/shm -v /boot/nicB/nic_core:/nic -v /boot/nicB/shared/conf/gen:/nic/conf/gen" + fi + else + echo "/boot/active.txt not present" > $LOG_FILE + fi + echo "Active Nic: $ACTIVE_NIC" >> $LOG_FILE + echo "NIC_MOUNT: $NIC_MOUNT" >> $LOG_FILE + + docker ps -a --format "{{.ID}}\t{{.Image}}" | grep "docker-dpu:latest" | awk '{print $1}' | xargs -I {} docker rm {} + + docker run -v $HOST_DIR/update:/update -v $HOST_DIR/sysconfig/config0:/sysconfig/config0 -v $HOST_DIR/sysconfig/config1:/sysconfig/config1 -v $HOST_DIR/obfl:/obfl -v $HOST_DIR/data:/data -v $HOST_DIR/tmpfsshare:/tmp -v $HOST_DIR/runfs:/run -v $HOST_DIR/logfs:/var/log -v /sys:/sys $NIC_MOUNT --net=host --name=dpu --privileged docker-dpu:$TAG +} + +case "$1" in +start) + echo -n "Start dpu... " + + start_dpu + + echo "done." + ;; + +stop) + echo "Not supported" + ;; + +force-reload|restart) + echo "Not supported" + ;; + +*) + echo "Usage: /etc/init.d/dpu.init {start}" + exit 1 + ;; +esac + +exit 0 + diff --git a/files/dsc/dpu.service b/files/dsc/dpu.service new file mode 100644 index 0000000000..ff6e5fc7db --- /dev/null +++ b/files/dsc/dpu.service @@ -0,0 +1,17 @@ +[Unit] +Description=dpu sw +Requires=docker.service +After=docker.service +Requires=local-fs.target +After=local-fs.target +Requires=ionic-modules.service +After=ionic-modules.service + +[Service] +Type=oneshot +ExecStart=/etc/init.d/dpu start +ExecStop=/etc/init.d/dpu stop +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/files/dsc/install_debian.j2 b/files/dsc/install_debian.j2 new file mode 100755 index 0000000000..4cb6d24631 --- /dev/null +++ b/files/dsc/install_debian.j2 @@ -0,0 +1,331 @@ +#!/bin/sh + +R="" +export LD_LIBRARY_PATH=/platform/lib:/nic/lib:$LD_LIBRARY_PATH +export PATH=/platform/bin:$PATH +if [ $(arch) != aarch64 ]; then + export PATH=$(pwd)/bin:$PATH + R=$(pwd)/data +fi +root_mnt=$R/mnt + +image_version={{ SONIC_IMAGE_VERSION }} + +image_dir=image-$image_version + +INSTALLER_PAYLOAD=fs.zip +DOCKERFS_DIR=docker +FILESYSTEM_DOCKERFS=dockerfs.tar.gz +BL_CONF=boot.conf + +DATA_PARTUUID=6ED62003-DD8D-44B8-9538-0A2B7C7E628F +ROOT_PARTUUID=C7F48DD2-C265-404B-959D-C64D21D49168 + +ROOT_PARTSIZE=24G + +exec 0< /dev/tty 1> /dev/tty 2> /dev/tty + +PKG="" +ACTION="" + +root_pn=0 +data_pn=0 + +REPART_NEEDED=0 + +set -e + +fatal() +{ + echo "FATAL: $1" >&2 + exit 1 +} + +hash() +{ + sha512sum $1 | awk '{ print $1 }' +} + +check_running_goldfw() +{ + local fw=$(expr "$(cat $R/proc/cmdline)" : '.*FW_NAME=\([a-z]\+\)') + if [ "$fw" != "goldfw" ]; then + fatal "Installer can only be run from goldfw" + fi +} + +check_package() +{ + local f want_hash got_hash + + echo "==> Checking package" + for f in $(jq -r ".shas | keys | join(\" \") | tostring" MANIFEST); do + echo -n "${f}..." + want_hash=$(jq -r .shas.\"$f\" MANIFEST) + got_hash=$(tar xfO $PKG $f | hash /dev/stdin) + if [ "$got_hash" != "$want_hash" ]; then + echo " BAD" + echo "WANT: $want_hash" + echo "GOT: $got_hash" + fatal "Package file error" + else + echo " OK" + fi + done +} + +get_install_mode() +{ + local r + + while [ -z "$install_mode" ]; do + echo "### Select install mode:" + echo "### 1. Whole disk (wipe all Pensando filesystems)" + echo "### 2. Half of /data (lose /data contents)" + echo "### 3. Half of /data (save/restore /data contents)" + read -p '### Selection: ' r + case "X-$r" in + X-1) install_mode=FULL_DISK; ;; + X-2) install_mode=DATA_LOSE; ;; + X-3) install_mode=DATA_KEEP; ;; + *) fatal "ABORTED"; ;; + esac + done +} + +check_existing_parts() +{ + local nparts i partuuid boot_partsize boot_lastsec data_firstsec + + nparts=$(sgdisk -p /dev/mmcblk0 | grep '^[ ]*[1-9]' | wc -l) + for i in $(seq $nparts); do + partuuid=$(sgdisk -i $i /dev/mmcblk0 | awk '/Partition unique GUID/ { print $NF }') + case "$partuuid" in + $DATA_PARTUUID) data_pn=$i; ;; + $ROOT_PARTUUID) root_pn=$i; ;; + esac + done + + if [ $install_mode != FULL_DISK ]; then + if [ $root_pn -ne 0 ]; then + boot_partsize=$(sgdisk -i $root_pn /dev/mmcblk0 | awk -F '[( ]' '/Partition size/ {print int($6)}') + boot_lastsec=$(sgdisk -i $root_pn /dev/mmcblk0 | awk '/Last sector/ {print $3}') + if [ ${boot_partsize}G = $ROOT_PARTSIZE ]; then + echo "SONiC root partitions already present with requested size. No repartition, only formatting" + else + echo "SONiC root partitions already present with mismatch size ${partsize}G. Repartition needed" + REPART_NEEDED=1 + fi + fi + + if [ $data_pn -eq 0 ]; then + echo "Data partition not found; Repartition needed" + REPART_NEEDED=1 + elif [ $data_pn -ne $nparts ]; then + fatal "Data partition is not the last partition; exiting." >&2 + else + data_firstsec=$(sgdisk -i $data_pn /dev/mmcblk0 | awk '/First sector/ {print $3}') + if [ $data_firstsec -ne $((boot_lastsec+1)) ]; then + echo "Data partition not contigent with boot partition. Repartition needed" + REPART_NEEDED=1 + fi + fi + fi +} + +save_data() +{ + echo "==> Saving /data" + mount /dev/mmcblk0p$data_pn $R/mnt + tar cvf $R/root/data.tar -C $R/mnt . + umount $R/mnt +} + +setup_partitions_full() +{ + local i + + echo "==> Setting up partitions..." + root_pn=1 + + set +e + sgdisk -Z /dev/mmcblk0 >/dev/null 2>&1 + partprobe >/dev/null 2>&1 + sgdisk \ + -n $root_pn:+0:+$ROOT_PARTSIZE -t $root_pn:8300 \ + -u $root_pn:$ROOT_PARTUUID -c $root_pn:"SONiC Root Filesystem" \ + /dev/mmcblk0 >/dev/null + while true; do + partprobe + if [ ! -e $R/dev/mmcblk0p3 ]; then + break + fi + sleep 1 + done + + echo "==> Creating filesystems" + for i in $root_pn; do + mkfs.ext4 -F -q /dev/mmcblk0p$i >/dev/null + done + set -e +} + +setup_partitions_multi() +{ + echo "==> Setting up partitions..." + + set +e + if [ $REPART_NEEDED -eq 0 ]; then + mkfs.ext4 -F -q /dev/mmcblk0p$root_pn >/dev/null + else + + if [ $root_pn -ne 0 ]; then + sgdisk -d $root_pn /dev/mmcblk0 >/dev/null + fi + [ $data_pn -ne 0 ] && sgdisk -d $data_pn /dev/mmcblk0 >/dev/null + + if [ $root_pn -eq 0 ]; then + root_pn=10 + data_pn=$(($root_pn + 1)) + fi + + if [ $data_pn -eq 0 ]; then + data_pn=$(($root_pn + 1)) + fi + + sgdisk \ + -n $root_pn:+0:+$ROOT_PARTSIZE -t $root_pn:8300 \ + -u $root_pn:$ROOT_PARTUUID -c $root_pn:"SONiC Root Filesystem" \ + -n $data_pn:+0:0 -t $data_pn:8300 -u $data_pn:$DATA_PARTUUID \ + -c $data_pn:"Data Filesystem" \ + /dev/mmcblk0 >/dev/null + sgdisk -U R /dev/mmcblk0 >/dev/null + + while true; do + partprobe + if [ -e $R/dev/mmcblk0p$data_pn ]; then + break + fi + sleep 1 + done + + echo "==> Creating filesystems" + for i in $root_pn $data_pn; do + mkfs.ext4 -F -q /dev/mmcblk0p$i >/dev/null + done + fi + set -e +} + +setup_partitions() +{ + if [ $install_mode = INSTALL_FULL ]; then + setup_partitions_full + else + setup_partitions_multi + fi +} + +restore_data() +{ + echo "==> Restoring /data" + if [ -f $R/root/data.tar ]; then + mount /dev/mmcblk0p$data_pn $R/mnt + tar xpvf $R/root/data.tar -C $R/mnt + umount $R/mnt + fi +} + +create_bootloader_conf() +{ + echo "==> Create bootloader config" + +cat <> $root_mnt/$BL_CONF +default main + +label main + kernel /$image_dir/boot/vmlinuz-6.1.0-11-2-arm64 + initrd /$image_dir/boot/initrd.img-6.1.0-11-2-arm64 + devicetree /$image_dir/boot/elba-asic-psci.dtb + append softdog.soft_panic=1 FW_NAME=mainfwa root=/dev/mmcblk0p10 rw rootwait rootfstype=ext4 loopfstype=squashfs loop=/$image_dir/fs.squashfs +} +EOF +} + +install_root_filesystem() +{ + echo "==> Installing root filesystem" + + mount /dev/mmcblk0p$root_pn $root_mnt + mkdir -p $root_mnt/$image_dir + + # Decompress the file for the file system directly to the partition + if [ x"$docker_inram" = x"on" ]; then + # when disk is small, keep dockerfs.tar.gz in disk, expand it into ramfs during initrd + tar xfO $PKG $INSTALLER_PAYLOAD | unzip -o - -x "platform.tar.gz" -d $root_mnt/$image_dir + else + tar xfO $PKG $INSTALLER_PAYLOAD | unzip -o - -x "$FILESYSTEM_DOCKERFS" "platform.tar.gz" -d $root_mnt/$image_dir + + TAR_EXTRA_OPTION="--numeric-owner" + mkdir -p $root_mnt/$image_dir/$DOCKERFS_DIR + tar xfO $PKG $INSTALLER_PAYLOAD | unzip -op - "$FILESYSTEM_DOCKERFS" | tar xz $TAR_EXTRA_OPTION -f - -C $root_mnt/$image_dir/$DOCKERFS_DIR + fi + + create_bootloader_conf + + umount $root_mnt +} + +set_boot_command() +{ + local pn + #set to mainfwa where sonic is installed + mtd=/dev/$(grep fwsel /proc/mtd | sed -e 's/:.*//') + flash_erase -q $mtd 0 1 + echo -n mainfwa | dd of=$mtd status=none + + echo "==> Setting u-boot environment for Debian Boot" + pn=$(printf "%x" $root_pn) + fwenv -E \ + -s kernel_comp_addr_r 88000000 \ + -s kernel_comp_size 8000000 \ + -s kernel_addr_r a0000000 \ + -s fdt_addr_r bb100000 \ + -s ramdisk_addr_r a4000000 \ + -s bootcmd "sysboot mmc 0:$pn any bf000000 /$BL_CONF" +} + +main() +{ + while getopts ":p:i:" opt; do + case "$opt" in + p) PKG=$OPTARG; ;; + i) ACTION=INSTALL; ;; + *) true; ;; + esac + done + + if [ "$PKG" = "" -o "$ACTION" != "INSTALL" ]; then + fatal "Needs -p filename -i all" + fi + + tar xf $PKG MANIFEST + + check_running_goldfw + check_package + # get_install_mode + install_mode=DATA_KEEP + check_existing_parts + if [ $install_mode = DATA_KEEP -a $REPART_NEEDED -eq 1 -a $data_pn -ne 0 ]; then + save_data + fi + setup_partitions + if [ $install_mode = DATA_KEEP -a $REPART_NEEDED -eq 1 ]; then + restore_data + fi + install_root_filesystem + set_boot_command + echo "==> Installation complete" +} + +main "$@" diff --git a/installer/install.sh b/installer/install.sh index 4d8ad75a16..0b03cf98f4 100755 --- a/installer/install.sh +++ b/installer/install.sh @@ -218,9 +218,9 @@ fi # Decompress the file for the file system directly to the partition if [ x"$docker_inram" = x"on" ]; then # when disk is small, keep dockerfs.tar.gz in disk, expand it into ramfs during initrd - unzip -o $ONIE_INSTALLER_PAYLOAD -x "platform.tar.gz" -d $demo_mnt/$image_dir + unzip -o $INSTALLER_PAYLOAD -x "platform.tar.gz" -d $demo_mnt/$image_dir else - unzip -o $ONIE_INSTALLER_PAYLOAD -x "$FILESYSTEM_DOCKERFS" "platform.tar.gz" -d $demo_mnt/$image_dir + unzip -o $INSTALLER_PAYLOAD -x "$FILESYSTEM_DOCKERFS" "platform.tar.gz" -d $demo_mnt/$image_dir if [ "$install_env" = "onie" ]; then TAR_EXTRA_OPTION="--numeric-owner" @@ -228,11 +228,11 @@ else TAR_EXTRA_OPTION="--numeric-owner --warning=no-timestamp" fi mkdir -p $demo_mnt/$image_dir/$DOCKERFS_DIR - unzip -op $ONIE_INSTALLER_PAYLOAD "$FILESYSTEM_DOCKERFS" | tar xz $TAR_EXTRA_OPTION -f - -C $demo_mnt/$image_dir/$DOCKERFS_DIR + unzip -op $INSTALLER_PAYLOAD "$FILESYSTEM_DOCKERFS" | tar xz $TAR_EXTRA_OPTION -f - -C $demo_mnt/$image_dir/$DOCKERFS_DIR fi mkdir -p $demo_mnt/$image_dir/platform -unzip -op $ONIE_INSTALLER_PAYLOAD "platform.tar.gz" | tar xz $TAR_EXTRA_OPTION -f - -C $demo_mnt/$image_dir/platform +unzip -op $INSTALLER_PAYLOAD "platform.tar.gz" | tar xz $TAR_EXTRA_OPTION -f - -C $demo_mnt/$image_dir/platform if [ "$install_env" = "onie" ]; then # Store machine description in target file system diff --git a/onie-image-arm64.conf b/onie-image-arm64.conf index 95dcd9ec11..b8a199c70b 100644 --- a/onie-image-arm64.conf +++ b/onie-image-arm64.conf @@ -19,7 +19,7 @@ FILESYSTEM_ROOT=./fsroot-${TARGET_MACHINE} FILESYSTEM_SQUASHFS=fs.squashfs ## Filename for onie installer payload, will be the main part of onie installer -ONIE_INSTALLER_PAYLOAD=fs.zip +INSTALLER_PAYLOAD=fs.zip ## Filename for docker file system FILESYSTEM_DOCKERFS=dockerfs.tar.gz @@ -50,3 +50,6 @@ OUTPUT_ABOOT_IMAGE=target/sonic-aboot-$TARGET_MACHINE.swi ## Aboot boot image name ABOOT_BOOT_IMAGE=.sonic-boot.swi + +## Output file name for dsc installer +OUTPUT_DSC_IMAGE=target/sonic-$TARGET_MACHINE.tar diff --git a/onie-image-armhf.conf b/onie-image-armhf.conf index 69b08fac13..2672187d95 100644 --- a/onie-image-armhf.conf +++ b/onie-image-armhf.conf @@ -19,7 +19,7 @@ FILESYSTEM_ROOT=./fsroot-${TARGET_MACHINE} FILESYSTEM_SQUASHFS=fs.squashfs ## Filename for onie installer payload, will be the main part of onie installer -ONIE_INSTALLER_PAYLOAD=fs.zip +INSTALLER_PAYLOAD=fs.zip ## Filename for docker file system FILESYSTEM_DOCKERFS=dockerfs.tar.gz diff --git a/onie-image.conf b/onie-image.conf index b0e2796a8d..c61008f7f0 100644 --- a/onie-image.conf +++ b/onie-image.conf @@ -19,7 +19,7 @@ FILESYSTEM_ROOT=./fsroot-${TARGET_MACHINE} FILESYSTEM_SQUASHFS=fs.squashfs ## Filename for onie installer payload, will be the main part of onie installer -ONIE_INSTALLER_PAYLOAD=fs.zip +INSTALLER_PAYLOAD=fs.zip ## Filename for docker file system FILESYSTEM_DOCKERFS=dockerfs.tar.gz @@ -56,3 +56,6 @@ OUTPUT_KVM_4ASIC_IMAGE=target/sonic-4asic-$TARGET_MACHINE.img ### Output file name for 6-asic kvm image OUTPUT_KVM_6ASIC_IMAGE=target/sonic-6asic-$TARGET_MACHINE.img + +## Output file name for dsc installer +OUTPUT_DSC_IMAGE=target/sonic-$TARGET_MACHINE.tar diff --git a/platform/pensando/docker-dpu-base.dep b/platform/pensando/docker-dpu-base.dep new file mode 100644 index 0000000000..fbc438fd3b --- /dev/null +++ b/platform/pensando/docker-dpu-base.dep @@ -0,0 +1,4 @@ +DEP_FILES := rules/docker-dpu-base.dep rules/docker-dpu-base.mk + +$(DOCKER_DPU_BASE)_CACHE_MODE := none +$(DOCKER_DPU_BASE)_DEP_FILES := $(DEP_FILES) diff --git a/platform/pensando/docker-dpu-base.mk b/platform/pensando/docker-dpu-base.mk new file mode 100644 index 0000000000..129cba00e2 --- /dev/null +++ b/platform/pensando/docker-dpu-base.mk @@ -0,0 +1,10 @@ +# docker dpu image for load + +DOCKER_DPU_BASE_STEM = docker-dpu-base + +DOCKER_DPU_BASE = $(DOCKER_DPU_BASE_STEM).gz + +$(DOCKER_DPU_BASE)_URL = https://github.com/pensando/dsc-artifacts/blob/main/docker-dpu-base.gz?raw=true + +DOWNLOADED_DOCKER_IMAGES += $(DOCKER_DPU_BASE) + diff --git a/platform/pensando/docker-dpu.dep b/platform/pensando/docker-dpu.dep new file mode 100644 index 0000000000..7611a05cdf --- /dev/null +++ b/platform/pensando/docker-dpu.dep @@ -0,0 +1,7 @@ +DOCKER_DPU_STEM = docker-dpu +DOCKER_DPU = $(DOCKER_DPU_STEM).gz +DPATH := $($(DOCKER_DPU)_PATH) +DEP_FILES := platform/pensando/docker-dpu.dep platform/pensando/docker-dpu.mk + +$(DOCKER_DPU)_CACHE_MODE := none +$(DOCKER_DPU)_DEP_FILES := $(DEP_FILES) diff --git a/platform/pensando/docker-dpu.mk b/platform/pensando/docker-dpu.mk new file mode 100644 index 0000000000..7eafdb6806 --- /dev/null +++ b/platform/pensando/docker-dpu.mk @@ -0,0 +1,20 @@ +# docker dpu image for load +DOCKER_DPU_STEM = docker-dpu + +DOCKER_DPU = $(DOCKER_DPU_STEM).gz + +$(DOCKER_DPU)_SQUASH = n +$(DOCKER_DPU)_PATH = $(PLATFORM_PATH)/$(DOCKER_DPU_STEM) + +$(DOCKER_DPU)_LOAD_DOCKERS = $(DOCKER_DPU_BASE) + +SONIC_DOCKER_IMAGES += $(DOCKER_DPU) + +$(DOCKER_DPU)_LOAD_DOCKERS += $(DOCKER_CONFIG_ENGINE_BULLSEYE) + +$(DOCKER_DPU)_PACKAGE_NAME = dpu +$(DOCKER_DPU)_CONTAINER_NAME = dpu +$(DOCKER_DPU)_VERSION = 1.0.0 + +SONIC_BULLSEYE_DOCKERS += $(DOCKER_DPU) +SONIC_INSTALL_DOCKER_IMAGES += $(DOCKER_DPU) diff --git a/platform/pensando/docker-dpu/Dockerfile.j2 b/platform/pensando/docker-dpu/Dockerfile.j2 new file mode 100644 index 0000000000..a8e285361f --- /dev/null +++ b/platform/pensando/docker-dpu/Dockerfile.j2 @@ -0,0 +1,2 @@ +FROM docker-dpu-base +# SKIP_HOOK diff --git a/platform/pensando/docker-syncd-pensando.mk b/platform/pensando/docker-syncd-pensando.mk new file mode 100644 index 0000000000..e3ba335aed --- /dev/null +++ b/platform/pensando/docker-syncd-pensando.mk @@ -0,0 +1,20 @@ +# docker image for centec syncd + +DOCKER_SYNCD_PLATFORM_CODE = pensando +include $(PLATFORM_PATH)/../template/docker-syncd-bullseye.mk + +$(DOCKER_SYNCD_BASE)_DEPENDS += $(SYNCD) + +$(DOCKER_SYNCD_BASE)_DBG_DEPENDS += $(SYNCD_DBG) \ + $(LIBSWSSCOMMON_DBG) \ + $(LIBSAIMETADATA_DBG) \ + $(LIBSAIREDIS_DBG) + +$(DOCKER_SYNCD_BASE)_VERSION = 1.0.0 +$(DOCKER_SYNCD_BASE)_PACKAGE_NAME = syncd + +$(DOCKER_SYNCD_BASE)_RUN_OPT += --privileged -t +$(DOCKER_SYNCD_BASE)_RUN_OPT += -v /host/warmboot:/var/warmboot +$(DOCKER_SYNCD_BASE)_RUN_OPT += -v /host/machine.conf:/etc/machine.conf +$(DOCKER_SYNCD_BASE)_RUN_OPT += -v /var/run/docker-syncd:/var/run/sswsyncd +$(DOCKER_SYNCD_BASE)_RUN_OPT += -v /etc/sonic:/etc/sonic:ro diff --git a/platform/pensando/docker-syncd-pensando/Dockerfile.j2 b/platform/pensando/docker-syncd-pensando/Dockerfile.j2 new file mode 100755 index 0000000000..eae830dd50 --- /dev/null +++ b/platform/pensando/docker-syncd-pensando/Dockerfile.j2 @@ -0,0 +1,34 @@ +FROM docker-config-engine-bullseye-{{DOCKER_USERNAME}}:{{DOCKER_USERTAG}} + +ARG docker_container_name + +## Make apt-get non-interactive +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update + +RUN apt-get install -y protobuf-compiler libprotobuf-dev libgrpc++-dev + +COPY \ +{% for deb in docker_syncd_pensando_debs.split(' ') -%} +debs/{{ deb }}{{' '}} +{%- endfor -%} +debs/ + +RUN dpkg -i \ +{% for deb in docker_syncd_pensando_debs.split(' ') -%} +debs/{{ deb }}{{' '}} +{%- endfor %} + +## TODO: add kmod into Depends +RUN apt-get install -yf kmod + +COPY ["supervisord.conf", "/etc/supervisor/conf.d/"] +COPY ["files/supervisor-proc-exit-listener", "/usr/bin/"] +COPY ["critical_processes", "/etc/supervisor/"] + +## Clean up +RUN apt-get clean -y; apt-get autoclean -y; apt-get autoremove -y +RUN rm -rf /debs + +ENTRYPOINT ["/usr/local/bin/supervisord"] diff --git a/platform/pensando/docker-syncd-pensando/critical_processes b/platform/pensando/docker-syncd-pensando/critical_processes new file mode 100644 index 0000000000..bdd6903c56 --- /dev/null +++ b/platform/pensando/docker-syncd-pensando/critical_processes @@ -0,0 +1 @@ +program:syncd diff --git a/platform/pensando/docker-syncd-pensando/supervisord.conf b/platform/pensando/docker-syncd-pensando/supervisord.conf new file mode 100644 index 0000000000..56d8b29de1 --- /dev/null +++ b/platform/pensando/docker-syncd-pensando/supervisord.conf @@ -0,0 +1,39 @@ +[supervisord] +logfile_maxbytes=1MB +logfile_backups=2 +nodaemon=true + +[eventlistener:dependent-startup] +command=python3 -m supervisord_dependent_startup +autostart=true +autorestart=unexpected +startretries=0 +exitcodes=0,3 +events=PROCESS_STATE +buffer_size=1024 + +[eventlistener:supervisor-proc-exit-listener] +command=python3 /usr/bin/supervisor-proc-exit-listener --container-name syncd +events=PROCESS_STATE_EXITED,PROCESS_STATE_RUNNING +autostart=true +autorestart=unexpected +buffer_size=1024 + +[program:rsyslogd] +command=/usr/sbin/rsyslogd -n -iNONE +priority=1 +autostart=false +autorestart=unexpected +stdout_logfile=syslog +stderr_logfile=syslog +dependent_startup=true + +[program:syncd] +command=/usr/bin/syncd_start.sh +priority=2 +autostart=false +autorestart=false +stdout_logfile=syslog +stderr_logfile=syslog +dependent_startup=true +dependent_startup_wait_for=rsyslogd:running diff --git a/platform/pensando/dsc-drivers.mk b/platform/pensando/dsc-drivers.mk new file mode 100644 index 0000000000..21f20e8a2a --- /dev/null +++ b/platform/pensando/dsc-drivers.mk @@ -0,0 +1,9 @@ +# Pensando Elba kernel modules + +IONIC_MODULE_VERSION = 22.11.1-001 + +IONIC_MODULE = ionic-modules_$(IONIC_MODULE_VERSION)_arm64.deb +$(IONIC_MODULE)_SRC_PATH = $(PLATFORM_PATH)/dsc-drivers +$(IONIC_MODULE)_DEPENDS += $(LINUX_HEADERS) $(LINUX_HEADERS_COMMON) +$(IONIC_MODULE)_MACHINE = pensando +SONIC_DPKG_DEBS += $(IONIC_MODULE) diff --git a/platform/pensando/dsc-drivers/debian/changelog b/platform/pensando/dsc-drivers/debian/changelog new file mode 100644 index 0000000000..29adf1ddf0 --- /dev/null +++ b/platform/pensando/dsc-drivers/debian/changelog @@ -0,0 +1,5 @@ +ionic (22.11.1-001) unstable; urgency=medium + + * Initial packaging + + -- Guohan Lu Tue, 19 APR 2022 12:30:00 +0000 diff --git a/platform/pensando/dsc-drivers/debian/compat b/platform/pensando/dsc-drivers/debian/compat new file mode 100644 index 0000000000..f599e28b8a --- /dev/null +++ b/platform/pensando/dsc-drivers/debian/compat @@ -0,0 +1 @@ +10 diff --git a/platform/pensando/dsc-drivers/debian/control b/platform/pensando/dsc-drivers/debian/control new file mode 100644 index 0000000000..342424ccbf --- /dev/null +++ b/platform/pensando/dsc-drivers/debian/control @@ -0,0 +1,14 @@ +Source: ionic +Section: main +Priority: extra +Maintainer: Shantanu Shrivastava +Build-Depends: debhelper (>= 8.0.0), bzip2 +Standards-Version: 3.9.3 +#Vcs-Git: git://git.debian.org/collab-maint/bcmsdk.git +#Vcs-Browser: http://git.debian.org/?p=collab-maint/bcmsdk.git;a=summary + +Package: ionic-modules +Architecture: arm64 +Section: main +Depends: linux-image-6.1.0-11-2-arm64-unsigned +Description: kernel modules for pensando elba diff --git a/platform/pensando/dsc-drivers/debian/ionic-modules.install b/platform/pensando/dsc-drivers/debian/ionic-modules.install new file mode 100644 index 0000000000..cad27cea25 --- /dev/null +++ b/platform/pensando/dsc-drivers/debian/ionic-modules.install @@ -0,0 +1,4 @@ +src/drivers/linux/build/mdev.ko lib/modules/6.1.0-11-2-arm64/extra +src/drivers/linux/build/mnet_uio_pdrv_genirq.ko lib/modules/6.1.0-11-2-arm64/extra +src/drivers/linux/build/ionic_mnic.ko lib/modules/6.1.0-11-2-arm64/extra +systemd/ionic-modules.service lib/systemd/system diff --git a/platform/pensando/dsc-drivers/debian/rules b/platform/pensando/dsc-drivers/debian/rules new file mode 100755 index 0000000000..5db1096c2c --- /dev/null +++ b/platform/pensando/dsc-drivers/debian/rules @@ -0,0 +1,65 @@ +#!/usr/bin/make -f +# -*- makefile -*- +# Sample debian/rules that uses debhelper. +# This file was originally written by Joey Hess and Craig Small. +# As a special exception, when this file is copied by dh-make into a +# dh-make output file, you may use that output file without restriction. +# This special exception was added by Craig Small in version 0.37 of dh-make. + +include /usr/share/dpkg/pkg-info.mk + +# Uncomment this to turn on verbose mode. +#export DH_VERBOSE=1 + +export INSTALL_MOD_DIR:=extra + +PACKAGE_PRE_NAME := ionic-modules +KVERSION ?= $(shell uname -r) +KERNEL_SRC := /lib/modules/$(KVERSION) +MOD_SRC_DIR:= $(shell pwd) + +%: + dh $@ + +clean: + dh_testdir + dh_testroot + dh_clean + ARCH=aarch64 KSRC=/lib/modules/$(KVERSION)/build KMOD_OUT_DIR=$(CURDIR)/src/drivers/linux/build KMOD_SRC_DIR=$(CURDIR)/src/drivers/linux make -C $(CURDIR)/src/drivers/linux clean + +build: + ARCH=aarch64 KSRC=/lib/modules/$(KVERSION)/build KMOD_OUT_DIR=$(CURDIR)/src/drivers/linux/build KMOD_SRC_DIR=$(CURDIR)/src/drivers/linux make -C $(CURDIR)/src/drivers/linux + +binary: binary-arch binary-indep + # Nothing to do + +binary-arch: + # Nothing to do + +#install: build + #dh_testdir + #dh_testroot + #dh_clean -k + #dh_installdirs + +binary-indep: + dh_testdir + dh_installdirs + + # Resuming debhelper scripts + dh_testroot + dh_install + dh_installchangelogs + dh_installdocs + dh_systemd_enable + dh_installinit + dh_systemd_start + dh_link + dh_fixperms + dh_compress + dh_strip + dh_installdeb + dh_gencontrol + dh_md5sums + dh_builddeb +.PHONY: build binary binary-arch binary-indep clean diff --git a/platform/pensando/dsc-drivers/src/README.md b/platform/pensando/dsc-drivers/src/README.md new file mode 100644 index 0000000000..c9ca9e37cf --- /dev/null +++ b/platform/pensando/dsc-drivers/src/README.md @@ -0,0 +1,167 @@ +# dsc-drivers + +## Overview + +This directory holds the three drivers that are used for device support +inside the Pensando DSC: ionic/ionic_mnic, mnet, and mnet_uio_pdrv_genirq. +These are all out-of-tree drivers, not used in the standard kernel tree. +However, a variant of the ionic driver is in the upstream kernel, but does +not support the internal DSC platform. + +When building for the host, only the "ionic" driver is built, +and uses ionic_bus_pci.c. In tandem with the kconfig files, this +driver can be built on a number of different Linux distributions and +kernel versions. When building for the DSC, "ionic_mnic" is built, with +ionic_bus_platform.c, along with mnet and mnet_uio_pdrv_genirq drivers. +The mnet and mnet_uio_pdrv_genirq drivers are only built on the DSC +Linux kernel, so don't make use of the kcompat facilities. + +In the DSC build the driver .ko files are found in /platform/drivers, +and are loaded by /nic/tools/sysinit.sh. Later, the nicmgr process reads +its device description file, e.g. /platform/etc/nicmgrd/device.json, +to determine what network interface ports are to be created. It then uses +ioctl() calls into the mnet driver to instantiate those network interfaces. + +## Drivers + +drivers/common: + API description files for communication between drivers + and the DSC. + +drivers/linux/eth/ionic: + Driver that supports standard network interface ports. + +drivers/linux/mnet: + Driver that listens for ioctl() commands from userland to start + and stop the network interface ports. + +drivers/linux/mnet_uio_pdrv_genirq: + UIO interface driver for supporting userspace I/O platform drivers. + +## Building + +The Makefile in drivers/linux will build all three drivers when +ARCH=aarch64, else will build the host version of ionic. Simply cd to +the drivers/linux directory and type 'make'. + +Well, okay maybe not that simple any more - it should be, but some things +changed in the makefiles internally, and it's a little more complex. Also, +we wanted to keep this archive closer to what is used internally. + +If the headers for your current Linux kernel are findable under +/lib/modules with kernel config values defined, this should work: + make M=`pwd` KCFLAGS="-Werror -Ddrv_ver=\\\"1.15.9.7\\\"" modules + +If the kernel config file doesn't have the Pensando configuration strings +set in it, you can add them in the make line. + +For Naples drivers: + make M=`pwd` KCFLAGS="-Werror -Ddrv_ver=\\\"1.15.9.7\\\"" CONFIG_IONIC_MNIC=m CONFIG_MDEV=m CONFIG_MNET_UIO_PDRV_GENIRQ=m modules + +For the host driver: + make M=`pwd` KCFLAGS="-Werror -Ddrv_ver=\\\"1.15.9.7\\\"" CONFIG_IONIC=m modules + +As usual, if the Linux headers are elsewhere, add the appropriate -C magic: + make -C M=`pwd` ... + +## History + +2020-07-07 - initial drivers using 1.8.0-E-48 + +2021-01-08 - driver updates to 1.15.3-C-14 + - FW update fixes + - Makefile cleanups + - Add support for choosing individual Tx and Rx interrupts rather than paired + - Fix memory leaks and timing issues + - Kcompat fixes for newer upstream and Red Hat kernels + - Add interrupt affinity option for mnic_ionic use + - Other optimizations and stability fixes + +2021-02-02 - driver updates to 1.15.4-C-8 + - Added support for PTP + - Dropped support for macvlan offload + - Cleaned some 'sparse' complaints + - Add support for devlink firmware update + - Dynamic interrupt coalescing + - Add support for separate Tx interrupts + - Rework queue reconfiguration for better memory handling + - Reorder some configuration steps to remove race conditions + - Changes to napi handling for better performance + +2021-02-24 - driver updates to 1.15.5-C-4 + - Add weak links for PTP api for compile and load on DSC kernel without PTP support + - Don't set up PTP in ionic_mnic if PTP bar is not available + - Closed a small window to prevent starting queues when in FW reset + - Other small bug fixes to PTP support + - Compat fixes for compiling on Linux v5.11 + - Guard against adminq use after free + +2021-03-29 - driver updates to 1.15.6-C-8 + - better error case handling + - bug fixes for PTP support and error handling + - clean up mnet code to upstream code format standards + - updates for compiling under v5.10 + +2021-04-30 - driver updates to 1.15.7-C-3 + - Copyright updates + - Minor code cleanups to better match upstream drivers + - Renamed mnet to mdev to be more generic + - Added support in mdev for future mcrypt devices + +2021-05-19 - driver updates to 1.15.8-C-12 + - added support for cmb-rings - Tx/Rx descriptor rings allocated in + DSC Controller Memory Buffers rather than on host + - rx_mode locking to block thread race + - struct ionic_lif rework for better cache line layout + +2021-06-30 - driver updates for 1.15.9-C-7 + - monitoring fw status generation for fw restart hints + - catch lack of PTP support earlier in service routine + - makefile fixes for sles 15 sp3 + - lower page splitting limit to better account for headers + - VF stats area fix for PF + - better thread-safe rx_mode work + + drivers: updates for 1.15.9.21 + +2021-08-04 - driver updates for 1.15.9-C-21 + - Added watchdog to platform for closer tracking of FW updates + and crash recycle + - Fixed dynamic interrupt management accounting + - Fixes for mac filter management + +2021-08-16 - driver updates for 1.15.9-C-26 + - Add work-around for Elba doorbell issue + +2021-08-19 - driver updates for 1.15.9-C-28 + - Additional queue config locking for stress timing issue + - Suppressed unnecessary log message + +2021-08-25 - driver update for 1.15.9-C-32 + - added use of reserved memory region for dma + +2022-02-02 - driver update for 1.15.9-C-64 + - Remove an unnecessary kcompat macro + +2022-02-03 - driver update for 1.15.9-C-65 + - add vlan filter management to mac filter management + - update filter management for handling overflow + - updates for recent upstream kernels and distros + - better handling of various FW recovery scenarios + +2022-06-20 - driver update for 1.15.9-C-100 + - various code cleanups + - add debugfs support to count number of Tx/Rx allocations + - better memory handling + - minor bug fixes + +2022-12-05 - driver update for 22.11.1-001 + - update ionic drivers to 22.11.1-001; version numbers now follow + the driver release numbers rather than the DSC firmware release version + - enable tunnel offloads + - support for changes in MTU, queue count, and ring length while CMB is active + - set random VF mac addresses by default + - better oprom debugging support + - Rx/Tx performance tuning + - fixes imported from upstream driver + - bug fixes diff --git a/platform/pensando/dsc-drivers/src/drivers/common/ionic_if.h b/platform/pensando/dsc-drivers/src/drivers/common/ionic_if.h new file mode 100644 index 0000000000..9b2f3b7175 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/common/ionic_if.h @@ -0,0 +1,3602 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */ +/* Copyright (c) 2017 - 2021 Pensando Systems, Inc. All rights reserved. */ + +#ifndef _IONIC_IF_H_ +#define _IONIC_IF_H_ + +#define IONIC_DEV_INFO_SIGNATURE 0x44455649 /* 'DEVI' */ +#define IONIC_DEV_INFO_VERSION 1 +#define IONIC_IFNAMSIZ 16 + +#ifdef __CHECKER__ +#define IONIC_CHECK_CMD_LENGTH(X) +#define IONIC_CHECK_COMP_LENGTH(X) +#define IONIC_CHECK_CMD_DATA_LENGTH(X) +#define IONIC_CHECK_OPROM_LENGTH(X) +#define IONIC_CHECK_DEV_INFO_REGS_LENGTH(X) +#else +#define IONIC_SIZE_CHECK(type, N, X) enum ionic_static_assert_enum_##X \ + { ionic_static_assert_##X = (N) / (sizeof(type X) == (N)) } +#define IONIC_CHECK_CMD_LENGTH(X) IONIC_SIZE_CHECK(struct, 64, X) +#define IONIC_CHECK_COMP_LENGTH(X) IONIC_SIZE_CHECK(struct, 16, X) +#define IONIC_CHECK_CMD_DATA_LENGTH(X) IONIC_SIZE_CHECK(union, 1912, X) +#define IONIC_CHECK_OPROM_LENGTH(X) IONIC_SIZE_CHECK(struct, 32, X) +#define IONIC_CHECK_DEV_INFO_REGS_LENGTH(X) IONIC_SIZE_CHECK(union, 2048, X) +#endif + +/** + * enum ionic_cmd_opcode - Device commands + */ +enum ionic_cmd_opcode { + IONIC_CMD_NOP = 0, + + /* Device commands */ + IONIC_CMD_IDENTIFY = 1, + IONIC_CMD_INIT = 2, + IONIC_CMD_RESET = 3, + IONIC_CMD_GETATTR = 4, + IONIC_CMD_SETATTR = 5, + IONIC_CMD_DEBUG = 6, + + /* Port commands */ + IONIC_CMD_PORT_IDENTIFY = 10, + IONIC_CMD_PORT_INIT = 11, + IONIC_CMD_PORT_RESET = 12, + IONIC_CMD_PORT_GETATTR = 13, + IONIC_CMD_PORT_SETATTR = 14, + + /* LIF commands */ + IONIC_CMD_LIF_IDENTIFY = 20, + IONIC_CMD_LIF_INIT = 21, + IONIC_CMD_LIF_RESET = 22, + IONIC_CMD_LIF_GETATTR = 23, + IONIC_CMD_LIF_SETATTR = 24, + IONIC_CMD_LIF_SETPHC = 25, + + IONIC_CMD_RX_MODE_SET = 30, + IONIC_CMD_RX_FILTER_ADD = 31, + IONIC_CMD_RX_FILTER_DEL = 32, + + /* Queue commands */ + IONIC_CMD_Q_IDENTIFY = 39, + IONIC_CMD_Q_INIT = 40, + IONIC_CMD_Q_CONTROL = 41, + + /* RDMA commands */ + IONIC_CMD_RDMA_RESET_LIF = 50, + IONIC_CMD_RDMA_CREATE_EQ = 51, + IONIC_CMD_RDMA_CREATE_CQ = 52, + IONIC_CMD_RDMA_CREATE_ADMINQ = 53, + + /* SR/IOV commands */ + IONIC_CMD_VF_GETATTR = 60, + IONIC_CMD_VF_SETATTR = 61, + IONIC_CMD_VF_CTRL = 62, + + /* UPT command */ + IONIC_CMD_UPT_MESSAGE = 100, + + /* UEFI HII commands */ + IONIC_CMD_HII_IDENTIFY = 235, + IONIC_CMD_HII_GETATTR = 236, + IONIC_CMD_HII_SETATTR = 237, + IONIC_CMD_HII_INIT = 238, + IONIC_CMD_HII_RESET = 239, + + /* QoS commands */ + IONIC_CMD_QOS_CLASS_IDENTIFY = 240, + IONIC_CMD_QOS_CLASS_INIT = 241, + IONIC_CMD_QOS_CLASS_RESET = 242, + IONIC_CMD_QOS_CLASS_UPDATE = 243, + IONIC_CMD_QOS_CLEAR_STATS = 244, + IONIC_CMD_QOS_RESET = 245, + + /* Firmware commands */ + IONIC_CMD_FW_DOWNLOAD = 252, + IONIC_CMD_FW_CONTROL = 253, + IONIC_CMD_FW_DOWNLOAD_V1 = 254, + IONIC_CMD_FW_CONTROL_V1 = 255, +}; + +/** + * enum ionic_status_code - Device command return codes + */ +enum ionic_status_code { + IONIC_RC_SUCCESS = 0, /* Success */ + IONIC_RC_EVERSION = 1, /* Incorrect version for request */ + IONIC_RC_EOPCODE = 2, /* Invalid cmd opcode */ + IONIC_RC_EIO = 3, /* I/O error */ + IONIC_RC_EPERM = 4, /* Permission denied */ + IONIC_RC_EQID = 5, /* Bad qid */ + IONIC_RC_EQTYPE = 6, /* Bad qtype */ + IONIC_RC_ENOENT = 7, /* No such element */ + IONIC_RC_EINTR = 8, /* operation interrupted */ + IONIC_RC_EAGAIN = 9, /* Try again */ + IONIC_RC_ENOMEM = 10, /* Out of memory */ + IONIC_RC_EFAULT = 11, /* Bad address */ + IONIC_RC_EBUSY = 12, /* Device or resource busy */ + IONIC_RC_EEXIST = 13, /* object already exists */ + IONIC_RC_EINVAL = 14, /* Invalid argument */ + IONIC_RC_ENOSPC = 15, /* No space left or alloc failure */ + IONIC_RC_ERANGE = 16, /* Parameter out of range */ + IONIC_RC_BAD_ADDR = 17, /* Descriptor contains a bad ptr */ + IONIC_RC_DEV_CMD = 18, /* Device cmd attempted on AdminQ */ + IONIC_RC_ENOSUPP = 19, /* Operation not supported */ + IONIC_RC_ERROR = 29, /* Generic error */ + IONIC_RC_ERDMA = 30, /* Generic RDMA error */ + IONIC_RC_EVFID = 31, /* VF ID does not exist */ + IONIC_RC_BAD_FW = 32, /* FW file is invalid or corrupted */ +}; + +enum ionic_notifyq_opcode { + IONIC_EVENT_LINK_CHANGE = 1, + IONIC_EVENT_RESET = 2, + IONIC_EVENT_HEARTBEAT = 3, + IONIC_EVENT_LOG = 4, + IONIC_EVENT_XCVR = 5, +}; + +/** + * struct ionic_upt_cmd - command format for all UPT commands + * @opcode: Opcode for the command + * @vf_index: VF Index. + * @upt_cmd_data: UPT specific command bytes + */ + +struct ionic_upt_cmd { + u8 opcode; + u8 rsvd; + __le16 vf_index; + u8 rsvd1[4]; + u8 upt_cmd_data[56]; +}; +IONIC_CHECK_CMD_LENGTH(ionic_upt_cmd); + +/** + * struct ionic_upt_comp - UPT command completion. + * @status: Status of the command (enum ionic_status_code) + */ +struct ionic_upt_comp { + u8 status; + u8 rsvd[15]; +}; +IONIC_CHECK_COMP_LENGTH(ionic_upt_comp); + +/** + * struct ionic_admin_cmd - General admin command format + * @opcode: Opcode for the command + * @lif_index: LIF index + * @cmd_data: Opcode-specific command bytes + */ +struct ionic_admin_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + u8 cmd_data[60]; +}; + +/** + * struct ionic_admin_comp - General admin command completion format + * @status: Status of the command (enum ionic_status_code) + * @comp_index: Index in the descriptor ring for which this is the completion + * @cmd_data: Command-specific bytes + * @color: Color bit (Always 0 for commands issued to the + * Device Cmd Registers) + */ +struct ionic_admin_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 cmd_data[11]; + u8 color; +#define IONIC_COMP_COLOR_MASK 0x80 +}; + +static inline u8 color_match(u8 color, u8 done_color) +{ + return (!!(color & IONIC_COMP_COLOR_MASK)) == done_color; +} + +/** + * struct ionic_nop_cmd - NOP command + * @opcode: opcode + */ +struct ionic_nop_cmd { + u8 opcode; + u8 rsvd[63]; +}; + +/** + * struct ionic_nop_comp - NOP command completion + * @status: Status of the command (enum ionic_status_code) + */ +struct ionic_nop_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * struct ionic_dev_init_cmd - Device init command + * @opcode: opcode + * @type: Device type + */ +struct ionic_dev_init_cmd { + u8 opcode; + u8 type; + u8 rsvd[62]; +}; + +/** + * struct ionic_dev_init_comp - Device init command completion + * @status: Status of the command (enum ionic_status_code) + */ +struct ionic_dev_init_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * struct ionic_dev_reset_cmd - Device reset command + * @opcode: opcode + */ +struct ionic_dev_reset_cmd { + u8 opcode; + u8 rsvd[63]; +}; + +/** + * struct ionic_dev_reset_comp - Reset command completion + * @status: Status of the command (enum ionic_status_code) + */ +struct ionic_dev_reset_comp { + u8 status; + u8 rsvd[15]; +}; + +#define IONIC_IDENTITY_VERSION_1 1 +#define IONIC_DEV_IDENTITY_VERSION_1 IONIC_IDENTITY_VERSION_1 +#define IONIC_DEV_IDENTITY_VERSION_2 (IONIC_DEV_IDENTITY_VERSION_1 + 1) + +/** + * struct ionic_dev_identify_cmd - Driver/device identify command + * @opcode: opcode + * @ver: Highest version of identify supported by driver + */ +struct ionic_dev_identify_cmd { + u8 opcode; + u8 ver; + u8 rsvd[62]; +}; + +/** + * struct ionic_dev_identify_comp - Driver/device identify command completion + * @status: Status of the command (enum ionic_status_code) + * @ver: Version of identify returned by device + */ +struct ionic_dev_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +enum ionic_debug_type { + IONIC_DEBUG_TYPE_MSG = 1, +}; +/** + * struct ionic_dev_debug_cmd - Driver/device debug command + * @opcode: opcode + * @type: debug_type (enum ionic_debug_type) + */ +struct ionic_dev_debug_cmd { + u8 opcode; + u8 debug_type; + u8 rsvd[62]; +}; +IONIC_CHECK_CMD_LENGTH(ionic_dev_debug_cmd); + +/** + * struct ionic_dev_debug_comp - Driver/device debug command completion + * @status: Status of the command (enum ionic_status_code) + */ +struct ionic_dev_debug_comp { + u8 status; + u8 rsvd[15]; +}; +IONIC_CHECK_COMP_LENGTH(ionic_dev_debug_comp); + +enum ionic_os_type { + IONIC_OS_TYPE_LINUX = 1, + IONIC_OS_TYPE_WIN = 2, + IONIC_OS_TYPE_DPDK = 3, + IONIC_OS_TYPE_FREEBSD = 4, + IONIC_OS_TYPE_IPXE = 5, + IONIC_OS_TYPE_ESXI = 6, +}; + +/** + * union ionic_drv_identity - driver identity information + * @os_type: OS type (see enum ionic_os_type) + * @os_dist: OS distribution, numeric format + * @os_dist_str: OS distribution, string format + * @kernel_ver: Kernel version, numeric format + * @kernel_ver_str: Kernel version, string format + * @driver_ver_str: Driver version, string format + */ +union ionic_drv_identity { + struct { + __le32 os_type; + __le32 os_dist; + char os_dist_str[128]; + __le32 kernel_ver; + char kernel_ver_str[32]; + char driver_ver_str[32]; + }; + __le32 words[478]; +}; + +/** + * enum ionic_dev_capability - Device capabilities + * @IONIC_DEV_CAP_VF_CTRL: Device supports VF ctrl operations + */ +enum ionic_dev_capability { + IONIC_DEV_CAP_VF_CTRL = BIT(0), +}; + +/** + * union ionic_dev_identity - device identity information + * @version: Version of device identify + * @type: Identify type (0 for now) + * @nports: Number of ports provisioned + * @nlifs: Number of LIFs provisioned + * @nintrs: Number of interrupts provisioned + * @ndbpgs_per_lif: Number of doorbell pages per LIF + * @intr_coal_mult: Interrupt coalescing multiplication factor + * Scale user-supplied interrupt coalescing + * value in usecs to device units using: + * device units = usecs * mult / div + * @intr_coal_div: Interrupt coalescing division factor + * Scale user-supplied interrupt coalescing + * value in usecs to device units using: + * device units = usecs * mult / div + * @eq_count: Number of shared event queues + * @hwstamp_mask: Bitmask for subtraction of hardware tick values. + * @hwstamp_mult: Hardware tick to nanosecond multiplier. + * @hwstamp_shift: Hardware tick to nanosecond divisor (power of two). + * @capabilities: Device capabilities + */ +union ionic_dev_identity { + struct { + u8 version; + u8 type; + u8 rsvd[2]; + u8 nports; + u8 rsvd2[3]; + __le32 nlifs; + __le32 nintrs; + __le32 ndbpgs_per_lif; + __le32 intr_coal_mult; + __le32 intr_coal_div; + __le32 eq_count; + __le64 hwstamp_mask; + __le32 hwstamp_mult; + __le32 hwstamp_shift; + __le64 capabilities; + }; + __le32 words[478]; +}; + +enum ionic_lif_type { + IONIC_LIF_TYPE_CLASSIC = 0, + IONIC_LIF_TYPE_MACVLAN = 1, + IONIC_LIF_TYPE_NETQUEUE = 2, +}; + +#define IONIC_LIF_IDENTITY_VERSION_1 IONIC_DEV_IDENTITY_VERSION_1 + +/** + * struct ionic_lif_identify_cmd - LIF identify command + * @opcode: opcode + * @type: LIF type (enum ionic_lif_type) + * @ver: Version of identify returned by device + */ +struct ionic_lif_identify_cmd { + u8 opcode; + u8 type; + u8 ver; + u8 rsvd[61]; +}; + +/** + * struct ionic_lif_identify_comp - LIF identify command completion + * @status: Status of the command (enum ionic_status_code) + * @ver: Version of identify returned by device + */ +struct ionic_lif_identify_comp { + u8 status; + u8 ver; + u8 rsvd2[14]; +}; + +/** + * enum ionic_lif_capability - LIF capabilities + * @IONIC_LIF_CAP_ETH: LIF supports Ethernet + * @IONIC_LIF_CAP_RDMA: LIF supports RDMA + */ +enum ionic_lif_capability { + IONIC_LIF_CAP_ETH = BIT(0), + IONIC_LIF_CAP_RDMA = BIT(1), +}; + +/** + * enum ionic_logical_qtype - Logical Queue Types + * @IONIC_QTYPE_ADMINQ: Administrative Queue + * @IONIC_QTYPE_NOTIFYQ: Notify Queue + * @IONIC_QTYPE_RXQ: Receive Queue + * @IONIC_QTYPE_TXQ: Transmit Queue + * @IONIC_QTYPE_EQ: Event Queue + * @IONIC_QTYPE_MAX: Max queue type supported + */ +enum ionic_logical_qtype { + IONIC_QTYPE_ADMINQ = 0, + IONIC_QTYPE_NOTIFYQ = 1, + IONIC_QTYPE_RXQ = 2, + IONIC_QTYPE_TXQ = 3, + IONIC_QTYPE_EQ = 4, + IONIC_QTYPE_MAX = 16, +}; + +/** + * enum ionic_q_feature - Common Features for most queue types + * + * Common features use bits 0-15. Per-queue-type features use higher bits. + * + * @IONIC_QIDENT_F_CQ: Queue has completion ring + * @IONIC_QIDENT_F_SG: Queue has scatter/gather ring + * @IONIC_QIDENT_F_EQ: Queue can use event queue + * @IONIC_QIDENT_F_CMB: Queue is in cmb bar + * @IONIC_Q_F_2X_DESC: Double main descriptor size + * @IONIC_Q_F_2X_CQ_DESC: Double cq descriptor size + * @IONIC_Q_F_2X_SG_DESC: Double sg descriptor size + * @IONIC_Q_F_4X_DESC: Quadruple main descriptor size + * @IONIC_Q_F_4X_CQ_DESC: Quadruple cq descriptor size + * @IONIC_Q_F_4X_SG_DESC: Quadruple sg descriptor size + */ +enum ionic_q_feature { + IONIC_QIDENT_F_CQ = BIT_ULL(0), + IONIC_QIDENT_F_SG = BIT_ULL(1), + IONIC_QIDENT_F_EQ = BIT_ULL(2), + IONIC_QIDENT_F_CMB = BIT_ULL(3), + IONIC_Q_F_2X_DESC = BIT_ULL(4), + IONIC_Q_F_2X_CQ_DESC = BIT_ULL(5), + IONIC_Q_F_2X_SG_DESC = BIT_ULL(6), + IONIC_Q_F_4X_DESC = BIT_ULL(7), + IONIC_Q_F_4X_CQ_DESC = BIT_ULL(8), + IONIC_Q_F_4X_SG_DESC = BIT_ULL(9), +}; + +/** + * enum ionic_rxq_feature - RXQ-specific Features + * + * Per-queue-type features use bits 16 and higher. + * + * @IONIC_RXQ_F_HWSTAMP: Queue supports Hardware Timestamping + */ +enum ionic_rxq_feature { + IONIC_RXQ_F_HWSTAMP = BIT_ULL(16), +}; + +/** + * enum ionic_txq_feature - TXQ-specific Features + * + * Per-queue-type features use bits 16 and higher. + * + * @IONIC_TXQ_F_HWSTAMP: Queue supports Hardware Timestamping + */ +enum ionic_txq_feature { + IONIC_TXQ_F_HWSTAMP = BIT_ULL(16), +}; + +/** + * struct ionic_hwstamp_bits - Hardware timestamp decoding bits + * @IONIC_HWSTAMP_INVALID: Invalid hardware timestamp value + * @IONIC_HWSTAMP_CQ_NEGOFFSET: Timestamp field negative offset + * from the base cq descriptor. + */ +enum ionic_hwstamp_bits { + IONIC_HWSTAMP_INVALID = ~0ull, + IONIC_HWSTAMP_CQ_NEGOFFSET = 8, +}; + +/** + * struct ionic_lif_logical_qtype - Descriptor of logical to HW queue type + * @qtype: Hardware Queue Type + * @qid_count: Number of Queue IDs of the logical type + * @qid_base: Minimum Queue ID of the logical type + */ +struct ionic_lif_logical_qtype { + u8 qtype; + u8 rsvd[3]; + __le32 qid_count; + __le32 qid_base; +}; + +/** + * enum ionic_lif_state - LIF state + * @IONIC_LIF_DISABLE: LIF disabled + * @IONIC_LIF_ENABLE: LIF enabled + * @IONIC_LIF_QUIESCE: LIF Quiesced + */ +enum ionic_lif_state { + IONIC_LIF_QUIESCE = 0, + IONIC_LIF_ENABLE = 1, + IONIC_LIF_DISABLE = 2, +}; + +/** + * union ionic_lif_config - LIF configuration + * @state: LIF state (enum ionic_lif_state) + * @name: LIF name + * @mtu: MTU + * @mac: Station MAC address + * @vlan: Default Vlan ID + * @features: Features (enum ionic_eth_hw_features) + * @queue_count: Queue counts per queue-type + */ +union ionic_lif_config { + struct { + u8 state; + u8 rsvd[3]; + char name[IONIC_IFNAMSIZ]; + __le32 mtu; + u8 mac[6]; + __le16 vlan; + __le64 features; + __le32 queue_count[IONIC_QTYPE_MAX]; + } __attribute__((packed)); + __le32 words[64]; +}; + +/** + * struct ionic_lif_identity - LIF identity information (type-specific) + * + * @capabilities: LIF capabilities + * + * @eth: Ethernet identify structure + * @version: Ethernet identify structure version + * @max_ucast_filters: Number of perfect unicast addresses supported + * @max_mcast_filters: Number of perfect multicast addresses supported + * @min_frame_size: Minimum size of frames to be sent + * @max_frame_size: Maximum size of frames to be sent + * @hwstamp_tx_modes: Bitmask of BIT_ULL(enum ionic_txstamp_mode) + * @hwstamp_rx_filters: Bitmask of enum ionic_pkt_class + * @config: LIF config struct with features, mtu, mac, q counts + * + * @rdma: RDMA identify structure + * @version: RDMA version of opcodes and queue descriptors + * @qp_opcodes: Number of RDMA queue pair opcodes supported + * @admin_opcodes: Number of RDMA admin opcodes supported + * @npts_per_lif: Page table size per LIF + * @nmrs_per_lif: Number of memory regions per LIF + * @nahs_per_lif: Number of address handles per LIF + * @max_stride: Max work request stride + * @cl_stride: Cache line stride + * @pte_stride: Page table entry stride + * @rrq_stride: Remote RQ work request stride + * @rsq_stride: Remote SQ work request stride + * @dcqcn_profiles: Number of DCQCN profiles + * @aq_qtype: RDMA Admin Qtype + * @sq_qtype: RDMA Send Qtype + * @rq_qtype: RDMA Receive Qtype + * @cq_qtype: RDMA Completion Qtype + * @eq_qtype: RDMA Event Qtype + */ +union ionic_lif_identity { + struct { + __le64 capabilities; + + struct { + u8 version; + u8 rsvd[3]; + __le32 max_ucast_filters; + __le32 max_mcast_filters; + __le16 rss_ind_tbl_sz; + __le32 min_frame_size; + __le32 max_frame_size; + u8 rsvd2[2]; + __le64 hwstamp_tx_modes; + __le64 hwstamp_rx_filters; + u8 rsvd3[88]; + union ionic_lif_config config; + } __attribute__((packed)) eth; + + struct { + u8 version; + u8 qp_opcodes; + u8 admin_opcodes; + u8 rsvd; + __le32 npts_per_lif; + __le32 nmrs_per_lif; + __le32 nahs_per_lif; + u8 max_stride; + u8 cl_stride; + u8 pte_stride; + u8 rrq_stride; + u8 rsq_stride; + u8 dcqcn_profiles; + u8 rsvd_dimensions[10]; + struct ionic_lif_logical_qtype aq_qtype; + struct ionic_lif_logical_qtype sq_qtype; + struct ionic_lif_logical_qtype rq_qtype; + struct ionic_lif_logical_qtype cq_qtype; + struct ionic_lif_logical_qtype eq_qtype; + } __attribute__((packed)) rdma; + } __attribute__((packed)); + __le32 words[478]; +}; + +/** + * struct ionic_lif_init_cmd - LIF init command + * @opcode: Opcode + * @type: LIF type (enum ionic_lif_type) + * @index: LIF index + * @info_pa: Destination address for LIF info (struct ionic_lif_info) + */ +struct ionic_lif_init_cmd { + u8 opcode; + u8 type; + __le16 index; + __le32 rsvd; + __le64 info_pa; + u8 rsvd2[48]; +}; + +/** + * struct ionic_lif_init_comp - LIF init command completion + * @status: Status of the command (enum ionic_status_code) + * @hw_index: Hardware index of the initialized LIF + */ +struct ionic_lif_init_comp { + u8 status; + u8 rsvd; + __le16 hw_index; + u8 rsvd2[12]; +}; + +/** + * struct ionic_q_identify_cmd - queue identify command + * @opcode: opcode + * @lif_type: LIF type (enum ionic_lif_type) + * @type: Logical queue type (enum ionic_logical_qtype) + * @ver: Highest queue type version that the driver supports + */ +struct ionic_q_identify_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_type; + u8 type; + u8 ver; + u8 rsvd2[58]; +}; + +/** + * struct ionic_q_identify_comp - queue identify command completion + * @status: Status of the command (enum ionic_status_code) + * @comp_index: Index in the descriptor ring for which this is the completion + * @ver: Queue type version that can be used with FW + */ +struct ionic_q_identify_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 ver; + u8 rsvd2[11]; +}; + +/** + * union ionic_q_identity - queue identity information + * @version: Queue type version that can be used with FW + * @supported: Bitfield of queue versions, first bit = ver 0 + * @features: Queue features (enum ionic_q_feature, etc) + * @desc_sz: Descriptor size + * @comp_sz: Completion descriptor size + * @sg_desc_sz: Scatter/Gather descriptor size + * @max_sg_elems: Maximum number of Scatter/Gather elements + * @sg_desc_stride: Number of Scatter/Gather elements per descriptor + */ +union ionic_q_identity { + struct { + u8 version; + u8 supported; + u8 rsvd[6]; + __le64 features; + __le16 desc_sz; + __le16 comp_sz; + __le16 sg_desc_sz; + __le16 max_sg_elems; + __le16 sg_desc_stride; + }; + __le32 words[478]; +}; + +/** + * struct ionic_q_init_cmd - Queue init command + * @opcode: opcode + * @type: Logical queue type + * @ver: Queue type version + * @lif_index: LIF index + * @index: (LIF, qtype) relative admin queue index + * @intr_index: Interrupt control register index, or Event queue index + * @pid: Process ID + * @flags: + * IRQ: Interrupt requested on completion + * ENA: Enable the queue. If ENA=0 the queue is initialized + * but remains disabled, to be later enabled with the + * Queue Enable command. If ENA=1, then queue is + * initialized and then enabled. + * SG: Enable Scatter-Gather on the queue. + * in number of descs. The actual ring size is + * (1 << ring_size). For example, to + * select a ring size of 64 descriptors write + * ring_size = 6. The minimum ring_size value is 2 + * for a ring size of 4 descriptors. The maximum + * ring_size value is 16 for a ring size of 64k + * descriptors. Values of ring_size <2 and >16 are + * reserved. + * EQ: Enable the Event Queue + * @cos: Class of service for this queue + * @ring_size: Queue ring size, encoded as a log2(size) + * @ring_base: Queue ring base address + * @cq_ring_base: Completion queue ring base address + * @sg_ring_base: Scatter/Gather ring base address + * @features: Mask of queue features to enable, if not in the flags above. + */ +struct ionic_q_init_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + u8 type; + u8 ver; + u8 rsvd1[2]; + __le32 index; + __le16 pid; + __le16 intr_index; + __le16 flags; +#define IONIC_QINIT_F_IRQ 0x01 /* Request interrupt on completion */ +#define IONIC_QINIT_F_ENA 0x02 /* Enable the queue */ +#define IONIC_QINIT_F_SG 0x04 /* Enable scatter/gather on the queue */ +#define IONIC_QINIT_F_EQ 0x08 /* Enable event queue */ +#define IONIC_QINIT_F_CMB 0x10 /* Enable cmb-based queue */ +#define IONIC_QINIT_F_DEBUG 0x80 /* Enable queue debugging */ + u8 cos; + u8 ring_size; + __le64 ring_base; + __le64 cq_ring_base; + __le64 sg_ring_base; + u8 rsvd2[12]; + __le64 features; +} __attribute__((packed)); + +IONIC_CHECK_CMD_LENGTH(ionic_q_init_cmd); + +/** + * struct ionic_q_init_comp - Queue init command completion + * @status: Status of the command (enum ionic_status_code) + * @comp_index: Index in the descriptor ring for which this is the completion + * @hw_index: Hardware Queue ID + * @hw_type: Hardware Queue type + * @color: Color + */ +struct ionic_q_init_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + __le32 hw_index; + u8 hw_type; + u8 rsvd2[6]; + u8 color; +}; + +/* the device's internal addressing uses up to 52 bits */ +#define IONIC_ADDR_LEN 52 +#define IONIC_ADDR_MASK (BIT_ULL(IONIC_ADDR_LEN) - 1) + +enum ionic_txq_desc_opcode { + IONIC_TXQ_DESC_OPCODE_CSUM_NONE = 0, + IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL = 1, + IONIC_TXQ_DESC_OPCODE_CSUM_HW = 2, + IONIC_TXQ_DESC_OPCODE_TSO = 3, +}; + +/** + * struct ionic_txq_desc - Ethernet Tx queue descriptor format + * @cmd: Tx operation, see IONIC_TXQ_DESC_OPCODE_*: + * + * IONIC_TXQ_DESC_OPCODE_CSUM_NONE: + * Non-offload send. No segmentation, + * fragmentation or checksum calc/insertion is + * performed by device; packet is prepared + * to send by software stack and requires + * no further manipulation from device. + * + * IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL: + * Offload 16-bit L4 checksum + * calculation/insertion. The device will + * calculate the L4 checksum value and + * insert the result in the packet's L4 + * header checksum field. The L4 checksum + * is calculated starting at @csum_start bytes + * into the packet to the end of the packet. + * The checksum insertion position is given + * in @csum_offset, which is the offset from + * @csum_start to the checksum field in the L4 + * header. This feature is only applicable to + * protocols such as TCP, UDP and ICMP where a + * standard (i.e. the 'IP-style' checksum) + * one's complement 16-bit checksum is used, + * using an IP pseudo-header to seed the + * calculation. Software will preload the L4 + * checksum field with the IP pseudo-header + * checksum. + * + * For tunnel encapsulation, @csum_start and + * @csum_offset refer to the inner L4 + * header. Supported tunnels encapsulations + * are: IPIP, GRE, and UDP. If the @encap + * is clear, no further processing by the + * device is required; software will + * calculate the outer header checksums. If + * the @encap is set, the device will + * offload the outer header checksums using + * LCO (local checksum offload) (see + * Documentation/networking/checksum-offloads.rst + * for more info). + * + * IONIC_TXQ_DESC_OPCODE_CSUM_HW: + * Offload 16-bit checksum computation to hardware. + * If @csum_l3 is set then the packet's L3 checksum is + * updated. Similarly, if @csum_l4 is set the the L4 + * checksum is updated. If @encap is set then encap header + * checksums are also updated. + * + * IONIC_TXQ_DESC_OPCODE_TSO: + * Device performs TCP segmentation offload + * (TSO). @hdr_len is the number of bytes + * to the end of TCP header (the offset to + * the TCP payload). @mss is the desired + * MSS, the TCP payload length for each + * segment. The device will calculate/ + * insert IP (IPv4 only) and TCP checksums + * for each segment. In the first data + * buffer containing the header template, + * the driver will set IPv4 checksum to 0 + * and preload TCP checksum with the IP + * pseudo header calculated with IP length = 0. + * + * Supported tunnel encapsulations are IPIP, + * layer-3 GRE, and UDP. @hdr_len includes + * both outer and inner headers. The driver + * will set IPv4 checksum to zero and + * preload TCP checksum with IP pseudo + * header on the inner header. + * + * TCP ECN offload is supported. The device + * will set CWR flag in the first segment if + * CWR is set in the template header, and + * clear CWR in remaining segments. + * @flags: + * vlan: + * Insert an L2 VLAN header using @vlan_tci + * encap: + * Calculate encap header checksum + * csum_l3: + * Compute L3 header checksum + * csum_l4: + * Compute L4 header checksum + * tso_sot: + * TSO start + * tso_eot: + * TSO end + * @num_sg_elems: Number of scatter-gather elements in SG + * descriptor + * @addr: First data buffer's DMA address + * (Subsequent data buffers are on txq_sg_desc) + * @len: First data buffer's length, in bytes + * @vlan_tci: VLAN tag to insert in the packet (if requested + * by @V-bit). Includes .1p and .1q tags + * @hdr_len: Length of packet headers, including + * encapsulating outer header, if applicable + * Valid for opcodes IONIC_TXQ_DESC_OPCODE_CALC_CSUM and + * IONIC_TXQ_DESC_OPCODE_TSO. Should be set to zero for + * all other modes. For + * IONIC_TXQ_DESC_OPCODE_CALC_CSUM, @hdr_len is length + * of headers up to inner-most L4 header. For + * IONIC_TXQ_DESC_OPCODE_TSO, @hdr_len is up to + * inner-most L4 payload, so inclusive of + * inner-most L4 header. + * @mss: Desired MSS value for TSO; only applicable for + * IONIC_TXQ_DESC_OPCODE_TSO + * @csum_start: Offset from packet to first byte checked in L4 checksum + * @csum_offset: Offset from csum_start to L4 checksum field + */ +struct ionic_txq_desc { + __le64 cmd; +#define IONIC_TXQ_DESC_OPCODE_MASK 0xf +#define IONIC_TXQ_DESC_OPCODE_SHIFT 4 +#define IONIC_TXQ_DESC_FLAGS_MASK 0xf +#define IONIC_TXQ_DESC_FLAGS_SHIFT 0 +#define IONIC_TXQ_DESC_NSGE_MASK 0xf +#define IONIC_TXQ_DESC_NSGE_SHIFT 8 +#define IONIC_TXQ_DESC_ADDR_MASK (BIT_ULL(IONIC_ADDR_LEN) - 1) +#define IONIC_TXQ_DESC_ADDR_SHIFT 12 + +/* common flags */ +#define IONIC_TXQ_DESC_FLAG_VLAN 0x1 +#define IONIC_TXQ_DESC_FLAG_ENCAP 0x2 + +/* flags for csum_hw opcode */ +#define IONIC_TXQ_DESC_FLAG_CSUM_L3 0x4 +#define IONIC_TXQ_DESC_FLAG_CSUM_L4 0x8 + +/* flags for tso opcode */ +#define IONIC_TXQ_DESC_FLAG_TSO_SOT 0x4 +#define IONIC_TXQ_DESC_FLAG_TSO_EOT 0x8 + + __le16 len; + union { + __le16 vlan_tci; + __le16 hword0; + }; + union { + __le16 csum_start; + __le16 hdr_len; + __le16 hword1; + }; + union { + __le16 csum_offset; + __le16 mss; + __le16 hword2; + }; +}; + +static inline u64 encode_txq_desc_cmd(u8 opcode, u8 flags, + u8 nsge, u64 addr) +{ + u64 cmd; + + cmd = (opcode & IONIC_TXQ_DESC_OPCODE_MASK) << IONIC_TXQ_DESC_OPCODE_SHIFT; + cmd |= (flags & IONIC_TXQ_DESC_FLAGS_MASK) << IONIC_TXQ_DESC_FLAGS_SHIFT; + cmd |= (nsge & IONIC_TXQ_DESC_NSGE_MASK) << IONIC_TXQ_DESC_NSGE_SHIFT; + cmd |= (addr & IONIC_TXQ_DESC_ADDR_MASK) << IONIC_TXQ_DESC_ADDR_SHIFT; + + return cmd; +}; + +static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags, + u8 *nsge, u64 *addr) +{ + *opcode = (cmd >> IONIC_TXQ_DESC_OPCODE_SHIFT) & IONIC_TXQ_DESC_OPCODE_MASK; + *flags = (cmd >> IONIC_TXQ_DESC_FLAGS_SHIFT) & IONIC_TXQ_DESC_FLAGS_MASK; + *nsge = (cmd >> IONIC_TXQ_DESC_NSGE_SHIFT) & IONIC_TXQ_DESC_NSGE_MASK; + *addr = (cmd >> IONIC_TXQ_DESC_ADDR_SHIFT) & IONIC_TXQ_DESC_ADDR_MASK; +}; + +/** + * struct ionic_txq_sg_elem - Transmit scatter-gather (SG) descriptor element + * @addr: DMA address of SG element data buffer + * @len: Length of SG element data buffer, in bytes + */ +struct ionic_txq_sg_elem { + __le64 addr; + __le16 len; + __le16 rsvd[3]; +}; + +/** + * struct ionic_txq_sg_desc - Transmit scatter-gather (SG) list + * @elems: Scatter-gather elements + */ +struct ionic_txq_sg_desc { +#define IONIC_TX_MAX_SG_ELEMS 8 +#define IONIC_TX_SG_DESC_STRIDE 8 + struct ionic_txq_sg_elem elems[IONIC_TX_MAX_SG_ELEMS]; +}; + +struct ionic_txq_sg_desc_v1 { +#define IONIC_TX_MAX_SG_ELEMS_V1 15 +#define IONIC_TX_SG_DESC_STRIDE_V1 16 + struct ionic_txq_sg_elem elems[IONIC_TX_SG_DESC_STRIDE_V1]; +}; + +/** + * struct ionic_txq_comp - Ethernet transmit queue completion descriptor + * @status: Status of the command (enum ionic_status_code) + * @comp_index: Index in the descriptor ring for which this is the completion + * @color: Color bit + */ +struct ionic_txq_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 rsvd2[11]; + u8 color; +}; + +enum ionic_rxq_desc_opcode { + IONIC_RXQ_DESC_OPCODE_SIMPLE = 0, + IONIC_RXQ_DESC_OPCODE_SG = 1, +}; + +/** + * struct ionic_rxq_desc - Ethernet Rx queue descriptor format + * @opcode: Rx operation, see IONIC_RXQ_DESC_OPCODE_*: + * + * IONIC_RXQ_DESC_OPCODE_SIMPLE: + * Receive full packet into data buffer + * starting at @addr. Results of + * receive, including actual bytes received, + * are recorded in Rx completion descriptor. + * + * @len: Data buffer's length, in bytes + * @addr: Data buffer's DMA address + */ +struct ionic_rxq_desc { + u8 opcode; + u8 rsvd[5]; + __le16 len; + __le64 addr; +}; + +/** + * struct ionic_rxq_sg_elem - Receive scatter-gather (SG) descriptor element + * @addr: DMA address of SG element data buffer + * @len: Length of SG element data buffer, in bytes + */ +struct ionic_rxq_sg_elem { + __le64 addr; + __le16 len; + __le16 rsvd[3]; +}; + +/** + * struct ionic_rxq_sg_desc - Receive scatter-gather (SG) list + * @elems: Scatter-gather elements + */ +struct ionic_rxq_sg_desc { +#define IONIC_RX_MAX_SG_ELEMS 8 +#define IONIC_RX_SG_DESC_STRIDE 8 + struct ionic_rxq_sg_elem elems[IONIC_RX_SG_DESC_STRIDE]; +}; + +/** + * struct ionic_rxq_comp - Ethernet receive queue completion descriptor + * @status: Status of the command (enum ionic_status_code) + * @num_sg_elems: Number of SG elements used by this descriptor + * @comp_index: Index in the descriptor ring for which this is the completion + * @rss_hash: 32-bit RSS hash + * @csum: 16-bit sum of the packet's L2 payload + * If the packet's L2 payload is odd length, an extra + * zero-value byte is included in the @csum calculation but + * not included in @len. + * @vlan_tci: VLAN tag stripped from the packet. Valid if @VLAN is + * set. Includes .1p and .1q tags. + * @len: Received packet length, in bytes. Excludes FCS. + * @csum_calc L2 payload checksum is computed or not + * @csum_flags: See IONIC_RXQ_COMP_CSUM_F_*: + * + * IONIC_RXQ_COMP_CSUM_F_TCP_OK: + * The TCP checksum calculated by the device + * matched the checksum in the receive packet's + * TCP header. + * + * IONIC_RXQ_COMP_CSUM_F_TCP_BAD: + * The TCP checksum calculated by the device did + * not match the checksum in the receive packet's + * TCP header. + * + * IONIC_RXQ_COMP_CSUM_F_UDP_OK: + * The UDP checksum calculated by the device + * matched the checksum in the receive packet's + * UDP header + * + * IONIC_RXQ_COMP_CSUM_F_UDP_BAD: + * The UDP checksum calculated by the device did + * not match the checksum in the receive packet's + * UDP header. + * + * IONIC_RXQ_COMP_CSUM_F_IP_OK: + * The IPv4 checksum calculated by the device + * matched the checksum in the receive packet's + * first IPv4 header. If the receive packet + * contains both a tunnel IPv4 header and a + * transport IPv4 header, the device validates the + * checksum for the both IPv4 headers. + * + * IONIC_RXQ_COMP_CSUM_F_IP_BAD: + * The IPv4 checksum calculated by the device did + * not match the checksum in the receive packet's + * first IPv4 header. If the receive packet + * contains both a tunnel IPv4 header and a + * transport IPv4 header, the device validates the + * checksum for both IP headers. + * + * IONIC_RXQ_COMP_CSUM_F_VLAN: + * The VLAN header was stripped and placed in @vlan_tci. + * + * IONIC_RXQ_COMP_CSUM_F_CALC: + * The checksum was calculated by the device. + * + * @pkt_type_color: Packet type and color bit; see IONIC_RXQ_COMP_PKT_TYPE_MASK + */ +struct ionic_rxq_comp { + u8 status; + u8 num_sg_elems; + __le16 comp_index; + __le32 rss_hash; + __le16 csum; + __le16 vlan_tci; + __le16 len; + u8 csum_flags; +#define IONIC_RXQ_COMP_CSUM_F_TCP_OK 0x01 +#define IONIC_RXQ_COMP_CSUM_F_TCP_BAD 0x02 +#define IONIC_RXQ_COMP_CSUM_F_UDP_OK 0x04 +#define IONIC_RXQ_COMP_CSUM_F_UDP_BAD 0x08 +#define IONIC_RXQ_COMP_CSUM_F_IP_OK 0x10 +#define IONIC_RXQ_COMP_CSUM_F_IP_BAD 0x20 +#define IONIC_RXQ_COMP_CSUM_F_VLAN 0x40 +#define IONIC_RXQ_COMP_CSUM_F_CALC 0x80 + u8 pkt_type_color; +#define IONIC_RXQ_COMP_PKT_TYPE_MASK 0x7f +}; + +enum ionic_pkt_type { + IONIC_PKT_TYPE_NON_IP = 0x00, + IONIC_PKT_TYPE_IPV4 = 0x01, + IONIC_PKT_TYPE_IPV4_TCP = 0x03, + IONIC_PKT_TYPE_IPV4_UDP = 0x05, + IONIC_PKT_TYPE_IPV6 = 0x08, + IONIC_PKT_TYPE_IPV6_TCP = 0x18, + IONIC_PKT_TYPE_IPV6_UDP = 0x28, + /* below types are only used if encap offloads are enabled on lif */ + IONIC_PKT_TYPE_ENCAP_NON_IP = 0x40, + IONIC_PKT_TYPE_ENCAP_IPV4 = 0x41, + IONIC_PKT_TYPE_ENCAP_IPV4_TCP = 0x43, + IONIC_PKT_TYPE_ENCAP_IPV4_UDP = 0x45, + IONIC_PKT_TYPE_ENCAP_IPV6 = 0x48, + IONIC_PKT_TYPE_ENCAP_IPV6_TCP = 0x58, + IONIC_PKT_TYPE_ENCAP_IPV6_UDP = 0x68, +}; + +enum ionic_eth_hw_features { + IONIC_ETH_HW_VLAN_TX_TAG = BIT(0), + IONIC_ETH_HW_VLAN_RX_STRIP = BIT(1), + IONIC_ETH_HW_VLAN_RX_FILTER = BIT(2), + IONIC_ETH_HW_RX_HASH = BIT(3), + IONIC_ETH_HW_RX_CSUM = BIT(4), + IONIC_ETH_HW_TX_SG = BIT(5), + IONIC_ETH_HW_RX_SG = BIT(6), + IONIC_ETH_HW_TX_CSUM = BIT(7), + IONIC_ETH_HW_TSO = BIT(8), + IONIC_ETH_HW_TSO_IPV6 = BIT(9), + IONIC_ETH_HW_TSO_ECN = BIT(10), + IONIC_ETH_HW_TSO_GRE = BIT(11), + IONIC_ETH_HW_TSO_GRE_CSUM = BIT(12), + IONIC_ETH_HW_TSO_IPXIP4 = BIT(13), + IONIC_ETH_HW_TSO_IPXIP6 = BIT(14), + IONIC_ETH_HW_TSO_UDP = BIT(15), + IONIC_ETH_HW_TSO_UDP_CSUM = BIT(16), + IONIC_ETH_HW_RX_CSUM_GENEVE = BIT(17), + IONIC_ETH_HW_TX_CSUM_GENEVE = BIT(18), + IONIC_ETH_HW_TSO_GENEVE = BIT(19), + IONIC_ETH_HW_TIMESTAMP = BIT(20), +}; + +/** + * enum ionic_pkt_class - Packet classification mask. + * + * Used with rx steering filter, packets indicated by the mask can be steered + * toward a specific receive queue. + * + * @IONIC_PKT_CLS_NTP_ALL: All NTP packets. + * @IONIC_PKT_CLS_PTP1_SYNC: PTPv1 sync + * @IONIC_PKT_CLS_PTP1_DREQ: PTPv1 delay-request + * @IONIC_PKT_CLS_PTP1_ALL: PTPv1 all packets + * @IONIC_PKT_CLS_PTP2_L4_SYNC: PTPv2-UDP sync + * @IONIC_PKT_CLS_PTP2_L4_DREQ: PTPv2-UDP delay-request + * @IONIC_PKT_CLS_PTP2_L4_ALL: PTPv2-UDP all packets + * @IONIC_PKT_CLS_PTP2_L2_SYNC: PTPv2-ETH sync + * @IONIC_PKT_CLS_PTP2_L2_DREQ: PTPv2-ETH delay-request + * @IONIC_PKT_CLS_PTP2_L2_ALL: PTPv2-ETH all packets + * @IONIC_PKT_CLS_PTP2_SYNC: PTPv2 sync + * @IONIC_PKT_CLS_PTP2_DREQ: PTPv2 delay-request + * @IONIC_PKT_CLS_PTP2_ALL: PTPv2 all packets + * @IONIC_PKT_CLS_PTP_SYNC: PTP sync + * @IONIC_PKT_CLS_PTP_DREQ: PTP delay-request + * @IONIC_PKT_CLS_PTP_ALL: PTP all packets + */ +enum ionic_pkt_class { + IONIC_PKT_CLS_NTP_ALL = BIT(0), + + IONIC_PKT_CLS_PTP1_SYNC = BIT(1), + IONIC_PKT_CLS_PTP1_DREQ = BIT(2), + IONIC_PKT_CLS_PTP1_ALL = BIT(3) | + IONIC_PKT_CLS_PTP1_SYNC | IONIC_PKT_CLS_PTP1_DREQ, + + IONIC_PKT_CLS_PTP2_L4_SYNC = BIT(4), + IONIC_PKT_CLS_PTP2_L4_DREQ = BIT(5), + IONIC_PKT_CLS_PTP2_L4_ALL = BIT(6) | + IONIC_PKT_CLS_PTP2_L4_SYNC | IONIC_PKT_CLS_PTP2_L4_DREQ, + + IONIC_PKT_CLS_PTP2_L2_SYNC = BIT(7), + IONIC_PKT_CLS_PTP2_L2_DREQ = BIT(8), + IONIC_PKT_CLS_PTP2_L2_ALL = BIT(9) | + IONIC_PKT_CLS_PTP2_L2_SYNC | IONIC_PKT_CLS_PTP2_L2_DREQ, + + IONIC_PKT_CLS_PTP2_SYNC = + IONIC_PKT_CLS_PTP2_L4_SYNC | IONIC_PKT_CLS_PTP2_L2_SYNC, + IONIC_PKT_CLS_PTP2_DREQ = + IONIC_PKT_CLS_PTP2_L4_DREQ | IONIC_PKT_CLS_PTP2_L2_DREQ, + IONIC_PKT_CLS_PTP2_ALL = + IONIC_PKT_CLS_PTP2_L4_ALL | IONIC_PKT_CLS_PTP2_L2_ALL, + + IONIC_PKT_CLS_PTP_SYNC = + IONIC_PKT_CLS_PTP1_SYNC | IONIC_PKT_CLS_PTP2_SYNC, + IONIC_PKT_CLS_PTP_DREQ = + IONIC_PKT_CLS_PTP1_DREQ | IONIC_PKT_CLS_PTP2_DREQ, + IONIC_PKT_CLS_PTP_ALL = + IONIC_PKT_CLS_PTP1_ALL | IONIC_PKT_CLS_PTP2_ALL, +}; + +/** + * struct ionic_q_control_cmd - Queue control command + * @opcode: opcode + * @type: Queue type + * @lif_index: LIF index + * @index: Queue index + * @oper: Operation (enum ionic_q_control_oper) + */ +struct ionic_q_control_cmd { + u8 opcode; + u8 type; + __le16 lif_index; + __le32 index; + u8 oper; + u8 rsvd[55]; +}; + +typedef struct ionic_admin_comp ionic_q_control_comp; + +enum ionic_q_control_oper { + IONIC_Q_DISABLE = 0, + IONIC_Q_ENABLE = 1, + IONIC_Q_HANG_RESET = 2, +}; + +/** + * enum ionic_phy_type - Physical connection type + * @IONIC_PHY_TYPE_NONE: No PHY installed + * @IONIC_PHY_TYPE_COPPER: Copper PHY + * @IONIC_PHY_TYPE_FIBER: Fiber PHY + */ +enum ionic_phy_type { + IONIC_PHY_TYPE_NONE = 0, + IONIC_PHY_TYPE_COPPER = 1, + IONIC_PHY_TYPE_FIBER = 2, +}; + +/** + * enum ionic_xcvr_state - Transceiver status + * @IONIC_XCVR_STATE_REMOVED: Transceiver removed + * @IONIC_XCVR_STATE_INSERTED: Transceiver inserted + * @IONIC_XCVR_STATE_PENDING: Transceiver pending + * @IONIC_XCVR_STATE_SPROM_READ: Transceiver data read + * @IONIC_XCVR_STATE_SPROM_READ_ERR: Transceiver data read error + */ +enum ionic_xcvr_state { + IONIC_XCVR_STATE_REMOVED = 0, + IONIC_XCVR_STATE_INSERTED = 1, + IONIC_XCVR_STATE_PENDING = 2, + IONIC_XCVR_STATE_SPROM_READ = 3, + IONIC_XCVR_STATE_SPROM_READ_ERR = 4, +}; + +/** + * enum ionic_xcvr_pid - Supported link modes + */ +enum ionic_xcvr_pid { + IONIC_XCVR_PID_UNKNOWN = 0, + + /* CU */ + IONIC_XCVR_PID_QSFP_100G_CR4 = 1, + IONIC_XCVR_PID_QSFP_40GBASE_CR4 = 2, + IONIC_XCVR_PID_SFP_25GBASE_CR_S = 3, + IONIC_XCVR_PID_SFP_25GBASE_CR_L = 4, + IONIC_XCVR_PID_SFP_25GBASE_CR_N = 5, + + /* Fiber */ + IONIC_XCVR_PID_QSFP_100G_AOC = 50, + IONIC_XCVR_PID_QSFP_100G_ACC = 51, + IONIC_XCVR_PID_QSFP_100G_SR4 = 52, + IONIC_XCVR_PID_QSFP_100G_LR4 = 53, + IONIC_XCVR_PID_QSFP_100G_ER4 = 54, + IONIC_XCVR_PID_QSFP_40GBASE_ER4 = 55, + IONIC_XCVR_PID_QSFP_40GBASE_SR4 = 56, + IONIC_XCVR_PID_QSFP_40GBASE_LR4 = 57, + IONIC_XCVR_PID_QSFP_40GBASE_AOC = 58, + IONIC_XCVR_PID_SFP_25GBASE_SR = 59, + IONIC_XCVR_PID_SFP_25GBASE_LR = 60, + IONIC_XCVR_PID_SFP_25GBASE_ER = 61, + IONIC_XCVR_PID_SFP_25GBASE_AOC = 62, + IONIC_XCVR_PID_SFP_10GBASE_SR = 63, + IONIC_XCVR_PID_SFP_10GBASE_LR = 64, + IONIC_XCVR_PID_SFP_10GBASE_LRM = 65, + IONIC_XCVR_PID_SFP_10GBASE_ER = 66, + IONIC_XCVR_PID_SFP_10GBASE_AOC = 67, + IONIC_XCVR_PID_SFP_10GBASE_CU = 68, + IONIC_XCVR_PID_QSFP_100G_CWDM4 = 69, + IONIC_XCVR_PID_QSFP_100G_PSM4 = 70, + IONIC_XCVR_PID_SFP_25GBASE_ACC = 71, + IONIC_XCVR_PID_SFP_10GBASE_T = 72, + IONIC_XCVR_PID_SFP_1000BASE_T = 73, +}; + +/** + * enum ionic_port_admin_state - Port config state + * @IONIC_PORT_ADMIN_STATE_NONE: Port admin state not configured + * @IONIC_PORT_ADMIN_STATE_DOWN: Port admin disabled + * @IONIC_PORT_ADMIN_STATE_UP: Port admin enabled + */ +enum ionic_port_admin_state { + IONIC_PORT_ADMIN_STATE_NONE = 0, + IONIC_PORT_ADMIN_STATE_DOWN = 1, + IONIC_PORT_ADMIN_STATE_UP = 2, +}; + +/** + * enum ionic_port_oper_status - Port operational status + * @IONIC_PORT_OPER_STATUS_NONE: Port disabled + * @IONIC_PORT_OPER_STATUS_UP: Port link status up + * @IONIC_PORT_OPER_STATUS_DOWN: Port link status down + */ +enum ionic_port_oper_status { + IONIC_PORT_OPER_STATUS_NONE = 0, + IONIC_PORT_OPER_STATUS_UP = 1, + IONIC_PORT_OPER_STATUS_DOWN = 2, +}; + +/** + * enum ionic_port_fec_type - Ethernet Forward error correction (FEC) modes + * @IONIC_PORT_FEC_TYPE_NONE: FEC Disabled + * @IONIC_PORT_FEC_TYPE_FC: FireCode FEC + * @IONIC_PORT_FEC_TYPE_RS: ReedSolomon FEC + */ +enum ionic_port_fec_type { + IONIC_PORT_FEC_TYPE_NONE = 0, + IONIC_PORT_FEC_TYPE_FC = 1, + IONIC_PORT_FEC_TYPE_RS = 2, +}; + +/** + * enum ionic_port_pause_type - Ethernet pause (flow control) modes + * @IONIC_PORT_PAUSE_TYPE_NONE: Disable Pause + * @IONIC_PORT_PAUSE_TYPE_LINK: Link level pause + * @IONIC_PORT_PAUSE_TYPE_PFC: Priority-Flow Control + */ +enum ionic_port_pause_type { + IONIC_PORT_PAUSE_TYPE_NONE = 0, + IONIC_PORT_PAUSE_TYPE_LINK = 1, + IONIC_PORT_PAUSE_TYPE_PFC = 2, +}; + +/** + * enum ionic_port_loopback_mode - Loopback modes + * @IONIC_PORT_LOOPBACK_MODE_NONE: Disable loopback + * @IONIC_PORT_LOOPBACK_MODE_MAC: MAC loopback + * @IONIC_PORT_LOOPBACK_MODE_PHY: PHY/SerDes loopback + */ +enum ionic_port_loopback_mode { + IONIC_PORT_LOOPBACK_MODE_NONE = 0, + IONIC_PORT_LOOPBACK_MODE_MAC = 1, + IONIC_PORT_LOOPBACK_MODE_PHY = 2, +}; + +/** + * struct ionic_xcvr_status - Transceiver Status information + * @state: Transceiver status (enum ionic_xcvr_state) + * @phy: Physical connection type (enum ionic_phy_type) + * @pid: Transceiver link mode (enum ionic_xcvr_pid) + * @sprom: Transceiver sprom contents + */ +struct ionic_xcvr_status { + u8 state; + u8 phy; + __le16 pid; + u8 sprom[256]; +}; + +/** + * union ionic_port_config - Port configuration + * @speed: port speed (in Mbps) + * @mtu: mtu + * @state: port admin state (enum ionic_port_admin_state) + * @an_enable: autoneg enable + * @fec_type: fec type (enum ionic_port_fec_type) + * @pause_type: pause type (enum ionic_port_pause_type) + * @loopback_mode: loopback mode (enum ionic_port_loopback_mode) + */ +union ionic_port_config { + struct { +#define IONIC_SPEED_100G 100000 /* 100G in Mbps */ +#define IONIC_SPEED_50G 50000 /* 50G in Mbps */ +#define IONIC_SPEED_40G 40000 /* 40G in Mbps */ +#define IONIC_SPEED_25G 25000 /* 25G in Mbps */ +#define IONIC_SPEED_10G 10000 /* 10G in Mbps */ +#define IONIC_SPEED_1G 1000 /* 1G in Mbps */ + __le32 speed; + __le32 mtu; + u8 state; + u8 an_enable; + u8 fec_type; +#define IONIC_PAUSE_TYPE_MASK 0x0f +#define IONIC_PAUSE_FLAGS_MASK 0xf0 +#define IONIC_PAUSE_F_TX 0x10 +#define IONIC_PAUSE_F_RX 0x20 + u8 pause_type; + u8 loopback_mode; + }; + __le32 words[64]; +}; + +/** + * struct ionic_port_status - Port Status information + * @status: link status (enum ionic_port_oper_status) + * @id: port id + * @speed: link speed (in Mbps) + * @link_down_count: number of times link went from up to down + * @fec_type: fec type (enum ionic_port_fec_type) + * @xcvr: transceiver status + */ +struct ionic_port_status { + __le32 id; + __le32 speed; + u8 status; + __le16 link_down_count; + u8 fec_type; + u8 rsvd[48]; + struct ionic_xcvr_status xcvr; +} __attribute__((packed)); + +/** + * struct ionic_port_identify_cmd - Port identify command + * @opcode: opcode + * @index: port index + * @ver: Highest version of identify supported by driver + */ +struct ionic_port_identify_cmd { + u8 opcode; + u8 index; + u8 ver; + u8 rsvd[61]; +}; + +/** + * struct ionic_port_identify_comp - Port identify command completion + * @status: Status of the command (enum ionic_status_code) + * @ver: Version of identify returned by device + */ +struct ionic_port_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +/** + * struct ionic_port_init_cmd - Port initialization command + * @opcode: opcode + * @index: port index + * @info_pa: destination address for port info (struct ionic_port_info) + */ +struct ionic_port_init_cmd { + u8 opcode; + u8 index; + u8 rsvd[6]; + __le64 info_pa; + u8 rsvd2[48]; +}; + +/** + * struct ionic_port_init_comp - Port initialization command completion + * @status: Status of the command (enum ionic_status_code) + */ +struct ionic_port_init_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * struct ionic_port_reset_cmd - Port reset command + * @opcode: opcode + * @index: port index + */ +struct ionic_port_reset_cmd { + u8 opcode; + u8 index; + u8 rsvd[62]; +}; + +/** + * struct ionic_port_reset_comp - Port reset command completion + * @status: Status of the command (enum ionic_status_code) + */ +struct ionic_port_reset_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * enum ionic_stats_ctl_cmd - List of commands for stats control + * @IONIC_STATS_CTL_RESET: Reset statistics + */ +enum ionic_stats_ctl_cmd { + IONIC_STATS_CTL_RESET = 0, +}; + +/** + * enum ionic_txstamp_mode - List of TX Timestamping Modes + * @IONIC_TXSTAMP_OFF: Disable TX hardware timetamping. + * @IONIC_TXSTAMP_ON: Enable local TX hardware timetamping. + * @IONIC_TXSTAMP_ONESTEP_SYNC: Modify TX PTP Sync packets. + * @IONIC_TXSTAMP_ONESTEP_P2P: Modify TX PTP Sync and PDelayResp. + */ +enum ionic_txstamp_mode { + IONIC_TXSTAMP_OFF = 0, + IONIC_TXSTAMP_ON = 1, + IONIC_TXSTAMP_ONESTEP_SYNC = 2, + IONIC_TXSTAMP_ONESTEP_P2P = 3, +}; + +/** + * enum ionic_port_attr - List of device attributes + * @IONIC_PORT_ATTR_STATE: Port state attribute + * @IONIC_PORT_ATTR_SPEED: Port speed attribute + * @IONIC_PORT_ATTR_MTU: Port MTU attribute + * @IONIC_PORT_ATTR_AUTONEG: Port autonegotiation attribute + * @IONIC_PORT_ATTR_FEC: Port FEC attribute + * @IONIC_PORT_ATTR_PAUSE: Port pause attribute + * @IONIC_PORT_ATTR_LOOPBACK: Port loopback attribute + * @IONIC_PORT_ATTR_STATS_CTRL: Port statistics control attribute + */ +enum ionic_port_attr { + IONIC_PORT_ATTR_STATE = 0, + IONIC_PORT_ATTR_SPEED = 1, + IONIC_PORT_ATTR_MTU = 2, + IONIC_PORT_ATTR_AUTONEG = 3, + IONIC_PORT_ATTR_FEC = 4, + IONIC_PORT_ATTR_PAUSE = 5, + IONIC_PORT_ATTR_LOOPBACK = 6, + IONIC_PORT_ATTR_STATS_CTRL = 7, +}; + +/** + * struct ionic_port_setattr_cmd - Set port attributes on the NIC + * @opcode: Opcode + * @index: Port index + * @attr: Attribute type (enum ionic_port_attr) + * @state: Port state + * @speed: Port speed + * @mtu: Port MTU + * @an_enable: Port autonegotiation setting + * @fec_type: Port FEC type setting + * @pause_type: Port pause type setting + * @loopback_mode: Port loopback mode + * @stats_ctl: Port stats setting + */ +struct ionic_port_setattr_cmd { + u8 opcode; + u8 index; + u8 attr; + u8 rsvd; + union { + u8 state; + __le32 speed; + __le32 mtu; + u8 an_enable; + u8 fec_type; + u8 pause_type; + u8 loopback_mode; + u8 stats_ctl; + u8 rsvd2[60]; + }; +}; + +/** + * struct ionic_port_setattr_comp - Port set attr command completion + * @status: Status of the command (enum ionic_status_code) + * @color: Color bit + */ +struct ionic_port_setattr_comp { + u8 status; + u8 rsvd[14]; + u8 color; +}; + +/** + * struct ionic_port_getattr_cmd - Get port attributes from the NIC + * @opcode: Opcode + * @index: port index + * @attr: Attribute type (enum ionic_port_attr) + */ +struct ionic_port_getattr_cmd { + u8 opcode; + u8 index; + u8 attr; + u8 rsvd[61]; +}; + +/** + * struct ionic_port_getattr_comp - Port get attr command completion + * @status: Status of the command (enum ionic_status_code) + * @state: Port state + * @speed: Port speed + * @mtu: Port MTU + * @an_enable: Port autonegotiation setting + * @fec_type: Port FEC type setting + * @pause_type: Port pause type setting + * @loopback_mode: Port loopback mode + * @color: Color bit + */ +struct ionic_port_getattr_comp { + u8 status; + u8 rsvd[3]; + union { + u8 state; + __le32 speed; + __le32 mtu; + u8 an_enable; + u8 fec_type; + u8 pause_type; + u8 loopback_mode; + u8 rsvd2[11]; + } __attribute__((packed)); + u8 color; +}; + +/** + * struct ionic_lif_status - LIF status register + * @eid: most recent NotifyQ event id + * @port_num: port the LIF is connected to + * @link_status: port status (enum ionic_port_oper_status) + * @link_speed: speed of link in Mbps + * @link_down_count: number of times link went from up to down + */ +struct ionic_lif_status { + __le64 eid; + u8 port_num; + u8 rsvd; + __le16 link_status; + __le32 link_speed; /* units of 1Mbps: eg 10000 = 10Gbps */ + __le16 link_down_count; + u8 rsvd2[46]; +}; + +/** + * struct ionic_lif_reset_cmd - LIF reset command + * @opcode: opcode + * @index: LIF index + */ +struct ionic_lif_reset_cmd { + u8 opcode; + u8 rsvd; + __le16 index; + __le32 rsvd2[15]; +}; + +typedef struct ionic_admin_comp ionic_lif_reset_comp; + +enum ionic_dev_state { + IONIC_DEV_DISABLE = 0, + IONIC_DEV_ENABLE = 1, + IONIC_DEV_HANG_RESET = 2, +}; + +/** + * enum ionic_dev_attr - List of device attributes + * @IONIC_DEV_ATTR_STATE: Device state attribute + * @IONIC_DEV_ATTR_NAME: Device name attribute + * @IONIC_DEV_ATTR_FEATURES: Device feature attributes + */ +enum ionic_dev_attr { + IONIC_DEV_ATTR_STATE = 0, + IONIC_DEV_ATTR_NAME = 1, + IONIC_DEV_ATTR_FEATURES = 2, +}; + +/** + * struct ionic_dev_setattr_cmd - Set Device attributes on the NIC + * @opcode: Opcode + * @attr: Attribute type (enum ionic_dev_attr) + * @state: Device state (enum ionic_dev_state) + * @name: The bus info, e.g. PCI slot-device-function, 0 terminated + * @features: Device features + */ +struct ionic_dev_setattr_cmd { + u8 opcode; + u8 attr; + __le16 rsvd; + union { + u8 state; + char name[IONIC_IFNAMSIZ]; + __le64 features; + u8 rsvd2[60]; + } __attribute__((packed)); +}; + +/** + * struct ionic_dev_setattr_comp - Device set attr command completion + * @status: Status of the command (enum ionic_status_code) + * @features: Device features + * @color: Color bit + */ +struct ionic_dev_setattr_comp { + u8 status; + u8 rsvd[3]; + union { + __le64 features; + u8 rsvd2[11]; + } __attribute__((packed)); + u8 color; +}; + +/** + * struct ionic_dev_getattr_cmd - Get Device attributes from the NIC + * @opcode: opcode + * @attr: Attribute type (enum ionic_dev_attr) + */ +struct ionic_dev_getattr_cmd { + u8 opcode; + u8 attr; + u8 rsvd[62]; +}; + +/** + * struct ionic_dev_setattr_comp - Device set attr command completion + * @status: Status of the command (enum ionic_status_code) + * @features: Device features + * @color: Color bit + */ +struct ionic_dev_getattr_comp { + u8 status; + u8 rsvd[3]; + union { + __le64 features; + u8 rsvd2[11]; + } __attribute__((packed)); + u8 color; +}; + +/** + * RSS parameters + */ +#define IONIC_RSS_HASH_KEY_SIZE 40 + +enum ionic_rss_hash_types { + IONIC_RSS_TYPE_IPV4 = BIT(0), + IONIC_RSS_TYPE_IPV4_TCP = BIT(1), + IONIC_RSS_TYPE_IPV4_UDP = BIT(2), + IONIC_RSS_TYPE_IPV6 = BIT(3), + IONIC_RSS_TYPE_IPV6_TCP = BIT(4), + IONIC_RSS_TYPE_IPV6_UDP = BIT(5), +}; + +/** + * enum ionic_lif_attr - List of LIF attributes + * @IONIC_LIF_ATTR_STATE: LIF state attribute + * @IONIC_LIF_ATTR_NAME: LIF name attribute + * @IONIC_LIF_ATTR_MTU: LIF MTU attribute + * @IONIC_LIF_ATTR_MAC: LIF MAC attribute + * @IONIC_LIF_ATTR_FEATURES: LIF features attribute + * @IONIC_LIF_ATTR_RSS: LIF RSS attribute + * @IONIC_LIF_ATTR_STATS_CTRL: LIF statistics control attribute + * @IONIC_LIF_ATTR_TXSTAMP: LIF TX timestamping mode + */ +enum ionic_lif_attr { + IONIC_LIF_ATTR_STATE = 0, + IONIC_LIF_ATTR_NAME = 1, + IONIC_LIF_ATTR_MTU = 2, + IONIC_LIF_ATTR_MAC = 3, + IONIC_LIF_ATTR_FEATURES = 4, + IONIC_LIF_ATTR_RSS = 5, + IONIC_LIF_ATTR_STATS_CTRL = 6, + IONIC_LIF_ATTR_TXSTAMP = 7, +}; + +/** + * struct ionic_lif_setattr_cmd - Set LIF attributes on the NIC + * @opcode: Opcode + * @attr: Attribute type (enum ionic_lif_attr) + * @index: LIF index + * @state: LIF state (enum ionic_lif_state) + * @name: The netdev name string, 0 terminated + * @mtu: Mtu + * @mac: Station mac + * @features: Features (enum ionic_eth_hw_features) + * @rss: RSS properties + * @types: The hash types to enable (see rss_hash_types) + * @key: The hash secret key + * @addr: Address for the indirection table shared memory + * @stats_ctl: stats control commands (enum ionic_stats_ctl_cmd) + * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode) + */ +struct ionic_lif_setattr_cmd { + u8 opcode; + u8 attr; + __le16 index; + union { + u8 state; + char name[IONIC_IFNAMSIZ]; + __le32 mtu; + u8 mac[6]; + __le64 features; + struct { + __le16 types; + u8 key[IONIC_RSS_HASH_KEY_SIZE]; + u8 rsvd[6]; + __le64 addr; + } rss; + u8 stats_ctl; + __le16 txstamp_mode; + u8 rsvd[60]; + } __attribute__((packed)); +}; + +/** + * struct ionic_lif_setattr_comp - LIF set attr command completion + * @status: Status of the command (enum ionic_status_code) + * @comp_index: Index in the descriptor ring for which this is the completion + * @features: features (enum ionic_eth_hw_features) + * @color: Color bit + */ +struct ionic_lif_setattr_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + union { + __le64 features; + u8 rsvd2[11]; + } __attribute__((packed)); + u8 color; +}; + +/** + * struct ionic_lif_getattr_cmd - Get LIF attributes from the NIC + * @opcode: Opcode + * @attr: Attribute type (enum ionic_lif_attr) + * @index: LIF index + */ +struct ionic_lif_getattr_cmd { + u8 opcode; + u8 attr; + __le16 index; + u8 rsvd[60]; +}; + +/** + * struct ionic_lif_getattr_comp - LIF get attr command completion + * @status: Status of the command (enum ionic_status_code) + * @comp_index: Index in the descriptor ring for which this is the completion + * @state: LIF state (enum ionic_lif_state) + * @name: The netdev name string, 0 terminated + * @mtu: Mtu + * @mac: Station mac + * @features: Features (enum ionic_eth_hw_features) + * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode) + * @color: Color bit + */ +struct ionic_lif_getattr_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + union { + u8 state; + __le32 mtu; + u8 mac[6]; + __le64 features; + __le16 txstamp_mode; + u8 rsvd2[11]; + } __attribute__((packed)); + u8 color; +}; + +/** + * struct ionic_lif_setphc_cmd - Set LIF PTP Hardware Clock + * @opcode: Opcode + * @lif_index: LIF index + * @tick: Hardware stamp tick of an instant in time. + * @nsec: Nanosecond stamp of the same instant. + * @frac: Fractional nanoseconds at the same instant. + * @mult: Cycle to nanosecond multiplier. + * @shift: Cycle to nanosecond divisor (power of two). + */ +struct ionic_lif_setphc_cmd { + u8 opcode; + u8 rsvd1; + __le16 lif_index; + u8 rsvd2[4]; + __le64 tick; + __le64 nsec; + __le64 frac; + __le32 mult; + __le32 shift; + u8 rsvd3[24]; +}; + +IONIC_CHECK_CMD_LENGTH(ionic_lif_setphc_cmd); + +enum ionic_rx_mode { + IONIC_RX_MODE_F_UNICAST = BIT(0), + IONIC_RX_MODE_F_MULTICAST = BIT(1), + IONIC_RX_MODE_F_BROADCAST = BIT(2), + IONIC_RX_MODE_F_PROMISC = BIT(3), + IONIC_RX_MODE_F_ALLMULTI = BIT(4), + IONIC_RX_MODE_F_RDMA_SNIFFER = BIT(5), +}; + +/** + * struct ionic_rx_mode_set_cmd - Set LIF's Rx mode command + * @opcode: opcode + * @lif_index: LIF index + * @rx_mode: Rx mode flags: + * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets + * IONIC_RX_MODE_F_MULTICAST: Accept known multicast packets + * IONIC_RX_MODE_F_BROADCAST: Accept broadcast packets + * IONIC_RX_MODE_F_PROMISC: Accept any packets + * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets + * IONIC_RX_MODE_F_RDMA_SNIFFER: Sniff RDMA packets + */ +struct ionic_rx_mode_set_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + __le16 rx_mode; + __le16 rsvd2[29]; +}; + +typedef struct ionic_admin_comp ionic_rx_mode_set_comp; + +enum ionic_rx_filter_match_type { + IONIC_RX_FILTER_MATCH_VLAN = 0x0, + IONIC_RX_FILTER_MATCH_MAC = 0x1, + IONIC_RX_FILTER_MATCH_MAC_VLAN = 0x2, + IONIC_RX_FILTER_STEER_PKTCLASS = 0x10, +}; + +/** + * struct ionic_rx_filter_add_cmd - Add LIF Rx filter command + * @opcode: opcode + * @qtype: Queue type + * @lif_index: LIF index + * @qid: Queue ID + * @match: Rx filter match type (see IONIC_RX_FILTER_MATCH_xxx) + * @vlan: VLAN filter + * @vlan: VLAN ID + * @mac: MAC filter + * @addr: MAC address (network-byte order) + * @mac_vlan: MACVLAN filter + * @vlan: VLAN ID + * @addr: MAC address (network-byte order) + * @pkt_class: Packet classification filter + */ +struct ionic_rx_filter_add_cmd { + u8 opcode; + u8 qtype; + __le16 lif_index; + __le32 qid; + __le16 match; + union { + struct { + __le16 vlan; + } vlan; + struct { + u8 addr[6]; + } mac; + struct { + __le16 vlan; + u8 addr[6]; + } mac_vlan; + __le64 pkt_class; + u8 rsvd[54]; + } __attribute__((packed)); +}; + +/** + * struct ionic_rx_filter_add_comp - Add LIF Rx filter command completion + * @status: Status of the command (enum ionic_status_code) + * @comp_index: Index in the descriptor ring for which this is the completion + * @filter_id: Filter ID + * @color: Color bit + */ +struct ionic_rx_filter_add_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + __le32 filter_id; + u8 rsvd2[7]; + u8 color; +}; + +/** + * struct ionic_rx_filter_del_cmd - Delete LIF Rx filter command + * @opcode: opcode + * @lif_index: LIF index + * @filter_id: Filter ID + */ +struct ionic_rx_filter_del_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + __le32 filter_id; + u8 rsvd2[56]; +}; + +typedef struct ionic_admin_comp ionic_rx_filter_del_comp; + +enum ionic_vf_attr { + IONIC_VF_ATTR_SPOOFCHK = 1, + IONIC_VF_ATTR_TRUST = 2, + IONIC_VF_ATTR_MAC = 3, + IONIC_VF_ATTR_LINKSTATE = 4, + IONIC_VF_ATTR_VLAN = 5, + IONIC_VF_ATTR_RATE = 6, + IONIC_VF_ATTR_STATSADDR = 7, +}; + +/** + * enum ionic_vf_link_status - Virtual Function link status + * @IONIC_VF_LINK_STATUS_AUTO: Use link state of the uplink + * @IONIC_VF_LINK_STATUS_UP: Link always up + * @IONIC_VF_LINK_STATUS_DOWN: Link always down + */ +enum ionic_vf_link_status { + IONIC_VF_LINK_STATUS_AUTO = 0, + IONIC_VF_LINK_STATUS_UP = 1, + IONIC_VF_LINK_STATUS_DOWN = 2, +}; + +/** + * struct ionic_vf_setattr_cmd - Set VF attributes on the NIC + * @opcode: Opcode + * @attr: Attribute type (enum ionic_vf_attr) + * @vf_index: VF index + * @macaddr: mac address + * @vlanid: vlan ID + * @maxrate: max Tx rate in Mbps + * @spoofchk: enable address spoof checking + * @trust: enable VF trust + * @linkstate: set link up or down + * @stats_pa: set DMA address for VF stats + */ +struct ionic_vf_setattr_cmd { + u8 opcode; + u8 attr; + __le16 vf_index; + union { + u8 macaddr[6]; + __le16 vlanid; + __le32 maxrate; + u8 spoofchk; + u8 trust; + u8 linkstate; + __le64 stats_pa; + u8 pad[60]; + } __attribute__((packed)); +}; + +struct ionic_vf_setattr_comp { + u8 status; + u8 attr; + __le16 vf_index; + __le16 comp_index; + u8 rsvd[9]; + u8 color; +}; + +/** + * struct ionic_vf_getattr_cmd - Get VF attributes from the NIC + * @opcode: Opcode + * @attr: Attribute type (enum ionic_vf_attr) + * @vf_index: VF index + */ +struct ionic_vf_getattr_cmd { + u8 opcode; + u8 attr; + __le16 vf_index; + u8 rsvd[60]; +}; + +struct ionic_vf_getattr_comp { + u8 status; + u8 attr; + __le16 vf_index; + union { + u8 macaddr[6]; + __le16 vlanid; + __le32 maxrate; + u8 spoofchk; + u8 trust; + u8 linkstate; + __le64 stats_pa; + u8 pad[11]; + } __attribute__((packed)); + u8 color; +}; + +enum ionic_vf_ctrl_opcode { + IONIC_VF_CTRL_START_ALL = 0, + IONIC_VF_CTRL_START = 1, +}; + +/** + * struct ionic_vf_ctrl - VF control command + * @opcode: Opcode for the command + * @vf_index: VF Index. It is unused if op START_ALL is used. + * @ctrl_opcode: VF control operation type + */ + +struct ionic_vf_ctrl_cmd { + u8 opcode; + u8 ctrl_opcode; + __le16 vf_index; + u8 rsvd1[60]; +}; +IONIC_CHECK_CMD_LENGTH(ionic_vf_ctrl_cmd); + +/** + * struct ionic_vf_ctrl_comp - VF_CTRL command completion. + * @status: Status of the command (enum ionic_status_code) + */ +struct ionic_vf_ctrl_comp { + u8 status; + u8 rsvd[15]; +}; +IONIC_CHECK_COMP_LENGTH(ionic_vf_ctrl_comp); + +/** + * struct ionic_qos_identify_cmd - QoS identify command + * @opcode: opcode + * @ver: Highest version of identify supported by driver + * + */ +struct ionic_qos_identify_cmd { + u8 opcode; + u8 ver; + u8 rsvd[62]; +}; + +/** + * struct ionic_qos_identify_comp - QoS identify command completion + * @status: Status of the command (enum ionic_status_code) + * @ver: Version of identify returned by device + */ +struct ionic_qos_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +#define IONIC_QOS_TC_MAX 8 +#define IONIC_QOS_ALL_TC 0xFF +/* Capri max supported, should be renamed. */ +#define IONIC_QOS_CLASS_MAX 7 +#define IONIC_QOS_PCP_MAX 8 +#define IONIC_QOS_CLASS_NAME_SZ 32 +#define IONIC_QOS_DSCP_MAX 64 +#define IONIC_QOS_ALL_PCP 0xFF +#define IONIC_DSCP_BLOCK_SIZE 8 + +/** + * enum ionic_qos_class + */ +enum ionic_qos_class { + IONIC_QOS_CLASS_DEFAULT = 0, + IONIC_QOS_CLASS_USER_DEFINED_1 = 1, + IONIC_QOS_CLASS_USER_DEFINED_2 = 2, + IONIC_QOS_CLASS_USER_DEFINED_3 = 3, + IONIC_QOS_CLASS_USER_DEFINED_4 = 4, + IONIC_QOS_CLASS_USER_DEFINED_5 = 5, + IONIC_QOS_CLASS_USER_DEFINED_6 = 6, +}; + +/** + * enum ionic_qos_class_type - Traffic classification criteria + * @IONIC_QOS_CLASS_TYPE_NONE: No QoS + * @IONIC_QOS_CLASS_TYPE_PCP: Dot1Q PCP + * @IONIC_QOS_CLASS_TYPE_DSCP: IP DSCP + */ +enum ionic_qos_class_type { + IONIC_QOS_CLASS_TYPE_NONE = 0, + IONIC_QOS_CLASS_TYPE_PCP = 1, + IONIC_QOS_CLASS_TYPE_DSCP = 2, +}; + +/** + * enum ionic_qos_sched_type - QoS class scheduling type + * @IONIC_QOS_SCHED_TYPE_STRICT: Strict priority + * @IONIC_QOS_SCHED_TYPE_DWRR: Deficit weighted round-robin + */ +enum ionic_qos_sched_type { + IONIC_QOS_SCHED_TYPE_STRICT = 0, + IONIC_QOS_SCHED_TYPE_DWRR = 1, +}; + +/** + * union ionic_qos_config - QoS configuration structure + * @flags: Configuration flags + * IONIC_QOS_CONFIG_F_ENABLE enable + * IONIC_QOS_CONFIG_F_NO_DROP drop/nodrop + * IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP enable dot1q pcp rewrite + * IONIC_QOS_CONFIG_F_RW_IP_DSCP enable ip dscp rewrite + * IONIC_QOS_CONFIG_F_NON_DISRUPTIVE Non-disruptive TC update + * @sched_type: QoS class scheduling type (enum ionic_qos_sched_type) + * @class_type: QoS class type (enum ionic_qos_class_type) + * @pause_type: QoS pause type (enum ionic_qos_pause_type) + * @name: QoS class name + * @mtu: MTU of the class + * @pfc_cos: Priority-Flow Control class of service + * @dwrr_weight: QoS class scheduling weight + * @strict_rlmt: Rate limit for strict priority scheduling + * @rw_dot1q_pcp: Rewrite dot1q pcp to value (valid iff F_RW_DOT1Q_PCP) + * @rw_ip_dscp: Rewrite ip dscp to value (valid iff F_RW_IP_DSCP) + * @dot1q_pcp: Dot1q pcp value + * @ndscp: Number of valid dscp values in the ip_dscp field + * @ip_dscp: IP dscp values + */ +union ionic_qos_config { + struct { +#define IONIC_QOS_CONFIG_F_ENABLE BIT(0) +#define IONIC_QOS_CONFIG_F_NO_DROP BIT(1) +/* Used to rewrite PCP or DSCP value. */ +#define IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP BIT(2) +#define IONIC_QOS_CONFIG_F_RW_IP_DSCP BIT(3) +/* Non-disruptive TC update */ +#define IONIC_QOS_CONFIG_F_NON_DISRUPTIVE BIT(4) + u8 flags; + u8 sched_type; + u8 class_type; + u8 pause_type; + char name[IONIC_QOS_CLASS_NAME_SZ]; + __le32 mtu; + /* flow control */ + u8 pfc_cos; + /* scheduler */ + union { + u8 dwrr_weight; + __le64 strict_rlmt; + }; + /* marking */ + /* Used to rewrite PCP or DSCP value. */ + union { + u8 rw_dot1q_pcp; + u8 rw_ip_dscp; + }; + /* classification */ + union { + u8 dot1q_pcp; + struct { + u8 ndscp; + u8 ip_dscp[IONIC_QOS_DSCP_MAX]; + }; + }; + } __attribute__((packed)); + __le32 words[64]; +}; + +/** + * union ionic_qos_identity - QoS identity structure + * @version: Version of the identify structure + * @type: QoS system type + * @nclasses: Number of usable QoS classes + * @config: Current configuration of classes + */ +union ionic_qos_identity { + struct { + u8 version; + u8 type; + u8 rsvd[62]; + union ionic_qos_config config[IONIC_QOS_CLASS_MAX]; + }; + __le32 words[478]; +}; + +/** + * struct ionic_qos_init_cmd - QoS config init command + * @opcode: Opcode + * @group: QoS class id + * @info_pa: destination address for qos info + */ +struct ionic_qos_init_cmd { + u8 opcode; + u8 group; + u8 rsvd[6]; + __le64 info_pa; + u8 rsvd1[48]; +}; + +typedef struct ionic_admin_comp ionic_qos_init_comp; + +/** + * struct ionic_qos_reset_cmd - QoS config reset command + * @opcode: Opcode + * @group: QoS class id + */ +struct ionic_qos_reset_cmd { + u8 opcode; + u8 group; + u8 rsvd[62]; +}; + +/** + * struct ionic_qos_clear_port_stats_cmd - Qos config reset command + * @opcode: Opcode + */ +struct ionic_qos_clear_stats_cmd { + u8 opcode; + u8 group_bitmap; + u8 rsvd[62]; +}; + +typedef struct ionic_admin_comp ionic_qos_reset_comp; + +/** + * struct ionic_fw_download_cmd - Firmware download command + * @opcode: opcode + * @addr: dma address of the firmware buffer + * @offset: offset of the firmware buffer within the full image + * @length: number of valid bytes in the firmware buffer + */ +struct ionic_fw_download_cmd { + u8 opcode; + u8 rsvd[3]; + __le32 offset; + __le64 addr; + __le32 length; +}; + +typedef struct ionic_admin_comp ionic_fw_download_comp; + +/** + * enum ionic_fw_control_oper - FW control operations + * @IONIC_FW_RESET: Reset firmware + * @IONIC_FW_INSTALL: Install firmware + * @IONIC_FW_ACTIVATE: Activate firmware + * @IONIC_FW_INSTALL_ASYNC: Install firmware asynchronously + * @IONIC_FW_INSTALL_STATUS: Firmware installation status + * @IONIC_FW_ACTIVATE_ASYNC: Activate firmware asynchronously + * @IONIC_FW_ACTIVATE_STATUS: Firmware activate status + */ +enum ionic_fw_control_oper { + IONIC_FW_RESET = 0, + IONIC_FW_INSTALL = 1, + IONIC_FW_ACTIVATE = 2, + IONIC_FW_INSTALL_ASYNC = 3, + IONIC_FW_INSTALL_STATUS = 4, + IONIC_FW_ACTIVATE_ASYNC = 5, + IONIC_FW_ACTIVATE_STATUS = 6, + IONIC_FW_UPDATE_CLEANUP = 7, + IONIC_FW_GET_BOOT = 8, +}; + +enum ionic_fw_slot { + IONIC_FW_SLOT_INVALID = 0, + IONIC_FW_SLOT_A = 1, + IONIC_FW_SLOT_B = 2, + IONIC_FW_SLOT_GOLD = 3, +}; + +/** + * struct ionic_fw_control_cmd - Firmware control command + * @opcode: opcode + * @oper: firmware control operation (enum ionic_fw_control_oper) + * @slot: slot to activate + */ +struct ionic_fw_control_cmd { + u8 opcode; + u8 rsvd[3]; + u8 oper; + u8 slot; + u8 rsvd1[58]; +}; + +/** + * struct ionic_fw_control_comp - Firmware control copletion + * @status: Status of the command (enum ionic_status_code) + * @comp_index: Index in the descriptor ring for which this is the completion + * @slot: Slot where the firmware was installed + * @color: Color bit + */ +struct ionic_fw_control_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 slot; + u8 rsvd1[10]; + u8 color; +}; + +/****************************************************************** + ******************* RDMA Commands ******************************** + ******************************************************************/ + +/** + * struct ionic_rdma_reset_cmd - Reset RDMA LIF cmd + * @opcode: opcode + * @lif_index: LIF index + * + * There is no RDMA specific dev command completion struct. Completion uses + * the common struct ionic_admin_comp. Only the status is indicated. + * Nonzero status means the LIF does not support RDMA. + **/ +struct ionic_rdma_reset_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + u8 rsvd2[60]; +}; + +/** + * struct ionic_rdma_queue_cmd - Create RDMA Queue command + * @opcode: opcode, 52, 53 + * @lif_index: LIF index + * @qid_ver: (qid | (RDMA version << 24)) + * @cid: intr, eq_id, or cq_id + * @dbid: doorbell page id + * @depth_log2: log base two of queue depth + * @stride_log2: log base two of queue stride + * @dma_addr: address of the queue memory + * + * The same command struct is used to create an RDMA event queue, completion + * queue, or RDMA admin queue. The cid is an interrupt number for an event + * queue, an event queue id for a completion queue, or a completion queue id + * for an RDMA admin queue. + * + * The queue created via a dev command must be contiguous in dma space. + * + * The dev commands are intended only to be used during driver initialization, + * to create queues supporting the RDMA admin queue. Other queues, and other + * types of RDMA resources like memory regions, will be created and registered + * via the RDMA admin queue, and will support a more complete interface + * providing scatter gather lists for larger, scattered queue buffers and + * memory registration. + * + * There is no RDMA specific dev command completion struct. Completion uses + * the common struct ionic_admin_comp. Only the status is indicated. + **/ +struct ionic_rdma_queue_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + __le32 qid_ver; + __le32 cid; + __le16 dbid; + u8 depth_log2; + u8 stride_log2; + __le64 dma_addr; + u8 rsvd2[40]; +}; + +/****************************************************************** + ******************* HII Commands ********************************* + ******************************************************************/ +#define IONIC_HII_IDENTITY_VERSION 1 + +/** + * struct ionic_hii_identify_cmd - HII identify command + * @opcode: opcode + * @ver: Highest version of identify supported by driver + */ +struct ionic_hii_identify_cmd { + u8 opcode; + u8 ver; + u8 rsvd[62]; +}; + +IONIC_CHECK_CMD_LENGTH(ionic_hii_identify_cmd); + +/** + * struct ionic_hii_identify_comp - HII identify command completion + * @status: Status of the command (enum ionic_status_code) + * @ver: Version of identify returned by device + */ +struct ionic_hii_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +IONIC_CHECK_COMP_LENGTH(ionic_hii_identify_comp); + +/** + * enum ionic_hii_capabilities - Bitmap of HII capabilities + * @IONIC_HII_CAPABILITY_NCSI: NCSI is supported + */ +enum ionic_hii_capabilities { + IONIC_HII_CAPABILITY_NCSI = 0, + IONIC_HII_CAPABILITY_OOB = 1, +}; + +/** + * union ionic_hii_dev_identity - HII identity information + * @ver: HII Identify version + * @oob_en: Enable out of band management + * @uid_led_on: Turn on the UID led + * @vlan_en: Enable pxe vlan + * @vlan: Vlan id used for pxe + * @capabilities: Bitmap of capabilities supported by nic + */ +union ionic_hii_dev_identity { + struct { + u8 ver; + u8 oob_en; + u8 uid_led_on; + u8 vlan_en; + __le16 vlan; + __le32 capabilities; + }; + __le32 words[478]; +}; + +IONIC_CHECK_CMD_DATA_LENGTH(ionic_hii_dev_identity); + +/** + * struct ionic_hii_init_cmd - HII initialization command + * @opcode: opcode + * @oob_en: Enable out of band management + * @uid_led_on: Turn on the UID led + * @vlan_en: Enable pxe vlan + * @vlan: Vlan id used for pxe + */ +struct ionic_hii_init_cmd { + u8 opcode; + u8 oob_en; + u8 uid_led_on; + u8 vlan_en; + __le16 vlan; + u8 rsvd[58]; +}; + +IONIC_CHECK_CMD_LENGTH(ionic_hii_init_cmd); + +/** + * struct ionic_hii_init_comp - HII initialization command completion + * @status: Status of the command (enum ionic_status_code) + */ +struct ionic_hii_init_comp { + u8 status; + u8 rsvd[15]; +}; + +IONIC_CHECK_COMP_LENGTH(ionic_hii_init_comp); + +/** + * enum ionic_hii_attr - List of HII attributes + * @IONIC_HII_ATTR_OOB_EN: HII OOB enable atrribute + * @IONIC_HII_ATTR_UID_LED: HII set UID led atrribute + * @IONIC_HII_ATTR_VLAN: HII PXE vlan atrribute + */ +enum ionic_hii_attr { + IONIC_HII_ATTR_OOB_EN = 0, + IONIC_HII_ATTR_UID_LED = 1, + IONIC_HII_ATTR_VLAN = 2, +}; + +/** + * struct ionic_hii_setattr_cmd - Set hii attributes on the NIC + * @opcode: Opcode + * @attr: Atrribute type (enum ionic_hii_attr) + * @oob_en: Enable out of band management + * @uid_led_on: Turn on the UID led + * @vlan: VLAN attributes + * @enable: Enable pxe vlan + * @id: Pxe vlan id + */ +struct ionic_hii_setattr_cmd { + u8 opcode; + u8 attr; + union { + u8 oob_en; + u8 uid_led_on; + struct { + u8 enable; + u8 rsvd; + __le16 id; + } vlan; + u8 rsvd2[62]; + }; +}; + +IONIC_CHECK_CMD_LENGTH(ionic_hii_setattr_cmd); + +/** + * struct ionic_hii_setattr_comp - Hii set attr command completion + * @status: Status of the command (enum ionic_status_code) + * @color: Color bit + */ +struct ionic_hii_setattr_comp { + u8 status; + u8 rsvd[14]; + u8 color; +}; + +IONIC_CHECK_COMP_LENGTH(ionic_hii_setattr_comp); + +/** + * struct ionic_hii_getattr_cmd - Get hii attributes from the NIC + * @opcode: Opcode + * @attr: Attribute type (enum ionic_hii_attr) + */ +struct ionic_hii_getattr_cmd { + u8 opcode; + u8 attr; + u8 rsvd[62]; +}; + +IONIC_CHECK_CMD_LENGTH(ionic_hii_getattr_cmd); + +/** + * struct ionic_hii_getattr_comp - Hii get attr command completion + * @status: Status of the command (enum ionic_status_code) + * @oob_en: Enable out of band management + * @uid_led_on: Turn on the UID led + * @vlan: VLAN attributes: + * @enable: Enable pxe vlan + * @id: Pxe vlan id + * @color: Color bit + */ +struct ionic_hii_getattr_comp { + u8 status; + u8 rsvd[3]; + union { + u8 oob_en; + u8 uid_led_on; + struct { + u8 enable; + u8 rsvd2; + __le16 id; + } vlan; + u8 rsvd3[11]; + } __attribute__((packed)); + u8 color; +}; + +#ifndef __CHECKER__ +IONIC_CHECK_COMP_LENGTH(ionic_hii_getattr_comp); +#endif + +/** + * struct ionic_hii_reset_cmd - HII configuration reset command + * @opcode: opcode + */ +struct ionic_hii_reset_cmd { + u8 opcode; + u8 rsvd[63]; +}; + +IONIC_CHECK_CMD_LENGTH(ionic_hii_reset_cmd); + +/** + * struct ionic_hii_reset_comp - HII reset command completion + * @status: Status of the command (enum ionic_status_code) + */ +struct ionic_hii_reset_comp { + u8 status; + u8 rsvd[15]; +}; + +IONIC_CHECK_COMP_LENGTH(ionic_hii_reset_comp); + +/****************************************************************** + ******************* Notify Events ******************************** + ******************************************************************/ + +/** + * struct ionic_notifyq_event - Generic event reporting structure + * @eid: event number + * @ecode: event code + * @data: unspecified data about the event + * + * This is the generic event report struct from which the other + * actual events will be formed. + */ +struct ionic_notifyq_event { + __le64 eid; + __le16 ecode; + u8 data[54]; +}; + +/** + * struct ionic_link_change_event - Link change event notification + * @eid: event number + * @ecode: event code = IONIC_EVENT_LINK_CHANGE + * @link_status: link up/down, with error bits (enum ionic_port_status) + * @link_speed: speed of the network link + * + * Sent when the network link state changes between UP and DOWN + */ +struct ionic_link_change_event { + __le64 eid; + __le16 ecode; + __le16 link_status; + __le32 link_speed; /* units of 1Mbps: e.g. 10000 = 10Gbps */ + u8 rsvd[48]; +}; + +/** + * struct ionic_reset_event - Reset event notification + * @eid: event number + * @ecode: event code = IONIC_EVENT_RESET + * @reset_code: reset type + * @state: 0=pending, 1=complete, 2=error + * + * Sent when the NIC or some subsystem is going to be or + * has been reset. + */ +struct ionic_reset_event { + __le64 eid; + __le16 ecode; + u8 reset_code; + u8 state; + u8 rsvd[52]; +}; + +/** + * struct ionic_heartbeat_event - Sent periodically by NIC to indicate health + * @eid: event number + * @ecode: event code = IONIC_EVENT_HEARTBEAT + */ +struct ionic_heartbeat_event { + __le64 eid; + __le16 ecode; + u8 rsvd[54]; +}; + +/** + * struct ionic_log_event - Sent to notify the driver of an internal error + * @eid: event number + * @ecode: event code = IONIC_EVENT_LOG + * @data: log data + */ +struct ionic_log_event { + __le64 eid; + __le16 ecode; + u8 data[54]; +}; + +/** + * struct ionic_xcvr_event - Transceiver change event + * @eid: event number + * @ecode: event code = IONIC_EVENT_XCVR + */ +struct ionic_xcvr_event { + __le64 eid; + __le16 ecode; + u8 rsvd[54]; +}; + +/** + * struct ionic_port_stats - Port statistics structure + */ +struct ionic_port_stats { + __le64 frames_rx_ok; + __le64 frames_rx_all; + __le64 frames_rx_bad_fcs; + __le64 frames_rx_bad_all; + __le64 octets_rx_ok; + __le64 octets_rx_all; + __le64 frames_rx_unicast; + __le64 frames_rx_multicast; + __le64 frames_rx_broadcast; + __le64 frames_rx_pause; + __le64 frames_rx_bad_length; + __le64 frames_rx_undersized; + __le64 frames_rx_oversized; + __le64 frames_rx_fragments; + __le64 frames_rx_jabber; + __le64 frames_rx_pripause; + __le64 frames_rx_stomped_crc; + __le64 frames_rx_too_long; + __le64 frames_rx_vlan_good; + __le64 frames_rx_dropped; + __le64 frames_rx_less_than_64b; + __le64 frames_rx_64b; + __le64 frames_rx_65b_127b; + __le64 frames_rx_128b_255b; + __le64 frames_rx_256b_511b; + __le64 frames_rx_512b_1023b; + __le64 frames_rx_1024b_1518b; + __le64 frames_rx_1519b_2047b; + __le64 frames_rx_2048b_4095b; + __le64 frames_rx_4096b_8191b; + __le64 frames_rx_8192b_9215b; + __le64 frames_rx_other; + __le64 frames_tx_ok; + __le64 frames_tx_all; + __le64 frames_tx_bad; + __le64 octets_tx_ok; + __le64 octets_tx_total; + __le64 frames_tx_unicast; + __le64 frames_tx_multicast; + __le64 frames_tx_broadcast; + __le64 frames_tx_pause; + __le64 frames_tx_pripause; + __le64 frames_tx_vlan; + __le64 frames_tx_less_than_64b; + __le64 frames_tx_64b; + __le64 frames_tx_65b_127b; + __le64 frames_tx_128b_255b; + __le64 frames_tx_256b_511b; + __le64 frames_tx_512b_1023b; + __le64 frames_tx_1024b_1518b; + __le64 frames_tx_1519b_2047b; + __le64 frames_tx_2048b_4095b; + __le64 frames_tx_4096b_8191b; + __le64 frames_tx_8192b_9215b; + __le64 frames_tx_other; + __le64 frames_tx_pri_0; + __le64 frames_tx_pri_1; + __le64 frames_tx_pri_2; + __le64 frames_tx_pri_3; + __le64 frames_tx_pri_4; + __le64 frames_tx_pri_5; + __le64 frames_tx_pri_6; + __le64 frames_tx_pri_7; + __le64 frames_rx_pri_0; + __le64 frames_rx_pri_1; + __le64 frames_rx_pri_2; + __le64 frames_rx_pri_3; + __le64 frames_rx_pri_4; + __le64 frames_rx_pri_5; + __le64 frames_rx_pri_6; + __le64 frames_rx_pri_7; + __le64 tx_pripause_0_1us_count; + __le64 tx_pripause_1_1us_count; + __le64 tx_pripause_2_1us_count; + __le64 tx_pripause_3_1us_count; + __le64 tx_pripause_4_1us_count; + __le64 tx_pripause_5_1us_count; + __le64 tx_pripause_6_1us_count; + __le64 tx_pripause_7_1us_count; + __le64 rx_pripause_0_1us_count; + __le64 rx_pripause_1_1us_count; + __le64 rx_pripause_2_1us_count; + __le64 rx_pripause_3_1us_count; + __le64 rx_pripause_4_1us_count; + __le64 rx_pripause_5_1us_count; + __le64 rx_pripause_6_1us_count; + __le64 rx_pripause_7_1us_count; + __le64 rx_pause_1us_count; + __le64 frames_tx_truncated; +}; + +struct ionic_mgmt_port_stats { + __le64 frames_rx_ok; + __le64 frames_rx_all; + __le64 frames_rx_bad_fcs; + __le64 frames_rx_bad_all; + __le64 octets_rx_ok; + __le64 octets_rx_all; + __le64 frames_rx_unicast; + __le64 frames_rx_multicast; + __le64 frames_rx_broadcast; + __le64 frames_rx_pause; + __le64 frames_rx_bad_length; + __le64 frames_rx_undersized; + __le64 frames_rx_oversized; + __le64 frames_rx_fragments; + __le64 frames_rx_jabber; + __le64 frames_rx_64b; + __le64 frames_rx_65b_127b; + __le64 frames_rx_128b_255b; + __le64 frames_rx_256b_511b; + __le64 frames_rx_512b_1023b; + __le64 frames_rx_1024b_1518b; + __le64 frames_rx_gt_1518b; + __le64 frames_rx_fifo_full; + __le64 frames_tx_ok; + __le64 frames_tx_all; + __le64 frames_tx_bad; + __le64 octets_tx_ok; + __le64 octets_tx_total; + __le64 frames_tx_unicast; + __le64 frames_tx_multicast; + __le64 frames_tx_broadcast; + __le64 frames_tx_pause; +}; + +enum ionic_pb_buffer_drop_stats { + IONIC_BUFFER_INTRINSIC_DROP = 0, + IONIC_BUFFER_DISCARDED, + IONIC_BUFFER_ADMITTED, + IONIC_BUFFER_OUT_OF_CELLS_DROP, + IONIC_BUFFER_OUT_OF_CELLS_DROP_2, + IONIC_BUFFER_OUT_OF_CREDIT_DROP, + IONIC_BUFFER_TRUNCATION_DROP, + IONIC_BUFFER_PORT_DISABLED_DROP, + IONIC_BUFFER_COPY_TO_CPU_TAIL_DROP, + IONIC_BUFFER_SPAN_TAIL_DROP, + IONIC_BUFFER_MIN_SIZE_VIOLATION_DROP, + IONIC_BUFFER_ENQUEUE_ERROR_DROP, + IONIC_BUFFER_INVALID_PORT_DROP, + IONIC_BUFFER_INVALID_OUTPUT_QUEUE_DROP, + IONIC_BUFFER_DROP_MAX, +}; + +enum ionic_oflow_drop_stats { + IONIC_OFLOW_OCCUPANCY_DROP, + IONIC_OFLOW_EMERGENCY_STOP_DROP, + IONIC_OFLOW_WRITE_BUFFER_ACK_FILL_UP_DROP, + IONIC_OFLOW_WRITE_BUFFER_ACK_FULL_DROP, + IONIC_OFLOW_WRITE_BUFFER_FULL_DROP, + IONIC_OFLOW_CONTROL_FIFO_FULL_DROP, + IONIC_OFLOW_DROP_MAX, +}; + +/** + * struct port_pb_stats - packet buffers system stats + * uses ionic_pb_buffer_drop_stats for drop_counts[] + */ +struct ionic_port_pb_stats { + __le64 sop_count_in; + __le64 eop_count_in; + __le64 sop_count_out; + __le64 eop_count_out; + __le64 drop_counts[IONIC_BUFFER_DROP_MAX]; + __le64 input_queue_buffer_occupancy[IONIC_QOS_TC_MAX]; + __le64 input_queue_port_monitor[IONIC_QOS_TC_MAX]; + __le64 output_queue_port_monitor[IONIC_QOS_TC_MAX]; + __le64 oflow_drop_counts[IONIC_OFLOW_DROP_MAX]; + __le64 input_queue_good_pkts_in[IONIC_QOS_TC_MAX]; + __le64 input_queue_good_pkts_out[IONIC_QOS_TC_MAX]; + __le64 input_queue_err_pkts_in[IONIC_QOS_TC_MAX]; + __le64 input_queue_fifo_depth[IONIC_QOS_TC_MAX]; + __le64 input_queue_max_fifo_depth[IONIC_QOS_TC_MAX]; + __le64 input_queue_peak_occupancy[IONIC_QOS_TC_MAX]; + __le64 output_queue_buffer_occupancy[IONIC_QOS_TC_MAX]; +}; + +/** + * enum ionic_port_type - Port types + * @IONIC_ETH_UNKNOWN: Port type not configured + * @IONIC_ETH_HOST: Port carries ethernet traffic (inband) + * @IONIC_ETH_HOST_MGMT: Port carries mgmt traffic (out-of-band) + * @IONIC_ETH_MNIC_OOB_MGMT: + * @IONIC_ETH_MNIC_INTERNAL_MGMT: + * @IONIC_ETH_MNIC_INBAND_MGMT: + * @IONIC_ETH_MNIC_CPU: + * @IONIC_ETH_MNIC_LEARN: + * @IONIC_ETH_MNIC_CONTROL: + */ +enum ionic_port_type { + IONIC_ETH_UNKNOWN, + IONIC_ETH_HOST, + IONIC_ETH_HOST_MGMT, + IONIC_ETH_MNIC_OOB_MGMT, + IONIC_ETH_MNIC_INTERNAL_MGMT, + IONIC_ETH_MNIC_INBAND_MGMT, + IONIC_ETH_MNIC_CPU, + IONIC_ETH_MNIC_LEARN, + IONIC_ETH_MNIC_CONTROL, +}; + +/** + * struct ionic_port_identity - port identity structure + * @version: identity structure version + * @type: type of port (enum ionic_port_type) + * @num_lanes: number of lanes for the port + * @autoneg: autoneg supported + * @min_frame_size: minimum frame size supported + * @max_frame_size: maximum frame size supported + * @fec_type: supported fec types + * @pause_type: supported pause types + * @loopback_mode: supported loopback mode + * @speeds: supported speeds + * @config: current port configuration + */ +union ionic_port_identity { + struct { + u8 version; + u8 type; + u8 num_lanes; + u8 autoneg; + __le32 min_frame_size; + __le32 max_frame_size; + u8 fec_type[4]; + u8 pause_type[2]; + u8 loopback_mode[2]; + __le32 speeds[16]; + u8 rsvd2[44]; + union ionic_port_config config; + }; + __le32 words[478]; +}; + +/** + * struct ionic_port_info - port info structure + * @config: Port configuration data + * @status: Port status data + * @stats: Port statistics data + * @mgmt_stats: Port management statistics data + * @port_pb_drop_stats: uplink pb drop stats + */ +struct ionic_port_info { + union ionic_port_config config; + struct ionic_port_status status; + union { + struct ionic_port_stats stats; + struct ionic_mgmt_port_stats mgmt_stats; + }; + /* room for pb_stats to start at 2k offset */ + u8 rsvd[760]; + struct ionic_port_pb_stats pb_stats; +}; + +/** + * struct ionic_lif_stats - LIF statistics structure + */ +struct ionic_lif_stats { + /* RX */ + __le64 rx_ucast_bytes; + __le64 rx_ucast_packets; + __le64 rx_mcast_bytes; + __le64 rx_mcast_packets; + __le64 rx_bcast_bytes; + __le64 rx_bcast_packets; + __le64 rsvd0; + __le64 rsvd1; + /* RX drops */ + __le64 rx_ucast_drop_bytes; + __le64 rx_ucast_drop_packets; + __le64 rx_mcast_drop_bytes; + __le64 rx_mcast_drop_packets; + __le64 rx_bcast_drop_bytes; + __le64 rx_bcast_drop_packets; + __le64 rx_dma_error; + __le64 rsvd2; + /* TX */ + __le64 tx_ucast_bytes; + __le64 tx_ucast_packets; + __le64 tx_mcast_bytes; + __le64 tx_mcast_packets; + __le64 tx_bcast_bytes; + __le64 tx_bcast_packets; + __le64 rsvd3; + __le64 rsvd4; + /* TX drops */ + __le64 tx_ucast_drop_bytes; + __le64 tx_ucast_drop_packets; + __le64 tx_mcast_drop_bytes; + __le64 tx_mcast_drop_packets; + __le64 tx_bcast_drop_bytes; + __le64 tx_bcast_drop_packets; + __le64 tx_dma_error; + __le64 rsvd5; + /* Rx Queue/Ring drops */ + __le64 rx_queue_disabled; + __le64 rx_queue_empty; + __le64 rx_queue_error; + __le64 rx_desc_fetch_error; + __le64 rx_desc_data_error; + __le64 rsvd6; + __le64 rsvd7; + __le64 rsvd8; + /* Tx Queue/Ring drops */ + __le64 tx_queue_disabled; + __le64 tx_queue_error; + __le64 tx_desc_fetch_error; + __le64 tx_desc_data_error; + __le64 tx_queue_empty; + __le64 rsvd10; + __le64 rsvd11; + __le64 rsvd12; + + /* RDMA/ROCE TX */ + __le64 tx_rdma_ucast_bytes; + __le64 tx_rdma_ucast_packets; + __le64 tx_rdma_mcast_bytes; + __le64 tx_rdma_mcast_packets; + __le64 tx_rdma_cnp_packets; + __le64 rsvd13; + __le64 rsvd14; + __le64 rsvd15; + + /* RDMA/ROCE RX */ + __le64 rx_rdma_ucast_bytes; + __le64 rx_rdma_ucast_packets; + __le64 rx_rdma_mcast_bytes; + __le64 rx_rdma_mcast_packets; + __le64 rx_rdma_cnp_packets; + __le64 rx_rdma_ecn_packets; + __le64 rsvd16; + __le64 rsvd17; + + __le64 rsvd18; + __le64 rsvd19; + __le64 rsvd20; + __le64 rsvd21; + __le64 rsvd22; + __le64 rsvd23; + __le64 rsvd24; + __le64 rsvd25; + + __le64 rsvd26; + __le64 rsvd27; + __le64 rsvd28; + __le64 rsvd29; + __le64 rsvd30; + __le64 rsvd31; + __le64 rsvd32; + __le64 rsvd33; + + __le64 rsvd34; + __le64 rsvd35; + __le64 rsvd36; + __le64 rsvd37; + __le64 rsvd38; + __le64 rsvd39; + __le64 rsvd40; + __le64 rsvd41; + + __le64 rsvd42; + __le64 rsvd43; + __le64 rsvd44; + __le64 rsvd45; + __le64 rsvd46; + __le64 rsvd47; + __le64 rsvd48; + __le64 rsvd49; + + /* RDMA/ROCE REQ Error/Debugs (768 - 895) */ + __le64 rdma_req_rx_pkt_seq_err; + __le64 rdma_req_rx_rnr_retry_err; + __le64 rdma_req_rx_remote_access_err; + __le64 rdma_req_rx_remote_inv_req_err; + __le64 rdma_req_rx_remote_oper_err; + __le64 rdma_req_rx_implied_nak_seq_err; + __le64 rdma_req_rx_cqe_err; + __le64 rdma_req_rx_cqe_flush_err; + + __le64 rdma_req_rx_dup_responses; + __le64 rdma_req_rx_invalid_packets; + __le64 rdma_req_tx_local_access_err; + __le64 rdma_req_tx_local_oper_err; + __le64 rdma_req_tx_memory_mgmt_err; + __le64 rsvd52; + __le64 rsvd53; + __le64 rsvd54; + + /* RDMA/ROCE RESP Error/Debugs (896 - 1023) */ + __le64 rdma_resp_rx_dup_requests; + __le64 rdma_resp_rx_out_of_buffer; + __le64 rdma_resp_rx_out_of_seq_pkts; + __le64 rdma_resp_rx_cqe_err; + __le64 rdma_resp_rx_cqe_flush_err; + __le64 rdma_resp_rx_local_len_err; + __le64 rdma_resp_rx_inv_request_err; + __le64 rdma_resp_rx_local_qp_oper_err; + + __le64 rdma_resp_rx_out_of_atomic_resource; + __le64 rdma_resp_tx_pkt_seq_err; + __le64 rdma_resp_tx_remote_inv_req_err; + __le64 rdma_resp_tx_remote_access_err; + __le64 rdma_resp_tx_remote_oper_err; + __le64 rdma_resp_tx_rnr_retry_err; + __le64 rsvd57; + __le64 rsvd58; +}; + +/** + * struct ionic_lif_info - LIF info structure + * @config: LIF configuration structure + * @status: LIF status structure + * @stats: LIF statistics structure + */ +struct ionic_lif_info { + union ionic_lif_config config; + struct ionic_lif_status status; + struct ionic_lif_stats stats; +}; + +union ionic_dev_cmd { + u32 words[16]; + struct ionic_admin_cmd cmd; + struct ionic_nop_cmd nop; + + struct ionic_dev_identify_cmd identify; + struct ionic_dev_init_cmd init; + struct ionic_dev_reset_cmd reset; + struct ionic_dev_getattr_cmd getattr; + struct ionic_dev_setattr_cmd setattr; + struct ionic_dev_debug_cmd debug; + + struct ionic_port_identify_cmd port_identify; + struct ionic_port_init_cmd port_init; + struct ionic_port_reset_cmd port_reset; + struct ionic_port_getattr_cmd port_getattr; + struct ionic_port_setattr_cmd port_setattr; + + struct ionic_vf_setattr_cmd vf_setattr; + struct ionic_vf_getattr_cmd vf_getattr; + struct ionic_vf_ctrl_cmd vf_ctrl; + + struct ionic_lif_identify_cmd lif_identify; + struct ionic_lif_init_cmd lif_init; + struct ionic_lif_reset_cmd lif_reset; + + struct ionic_qos_identify_cmd qos_identify; + struct ionic_qos_init_cmd qos_init; + struct ionic_qos_reset_cmd qos_reset; + struct ionic_qos_clear_stats_cmd qos_clear_stats; + + struct ionic_q_identify_cmd q_identify; + struct ionic_q_init_cmd q_init; + struct ionic_q_control_cmd q_control; + + struct ionic_fw_download_cmd fw_download; + struct ionic_fw_control_cmd fw_control; + + struct ionic_hii_identify_cmd hii_identify; + struct ionic_hii_init_cmd hii_init; + struct ionic_hii_setattr_cmd hii_setattr; + struct ionic_hii_getattr_cmd hii_getattr; + struct ionic_hii_reset_cmd hii_reset; + + struct ionic_upt_cmd upt_cmd; +}; + +union ionic_dev_cmd_comp { + u32 words[4]; + u8 status; + struct ionic_admin_comp comp; + struct ionic_nop_comp nop; + + struct ionic_dev_identify_comp identify; + struct ionic_dev_init_comp init; + struct ionic_dev_reset_comp reset; + struct ionic_dev_getattr_comp getattr; + struct ionic_dev_setattr_comp setattr; + struct ionic_dev_debug_comp debug; + + struct ionic_port_identify_comp port_identify; + struct ionic_port_init_comp port_init; + struct ionic_port_reset_comp port_reset; + struct ionic_port_getattr_comp port_getattr; + struct ionic_port_setattr_comp port_setattr; + + struct ionic_vf_setattr_comp vf_setattr; + struct ionic_vf_getattr_comp vf_getattr; + struct ionic_vf_ctrl_comp vf_ctrl; + + struct ionic_lif_identify_comp lif_identify; + struct ionic_lif_init_comp lif_init; + ionic_lif_reset_comp lif_reset; + + struct ionic_qos_identify_comp qos_identify; + ionic_qos_init_comp qos_init; + ionic_qos_reset_comp qos_reset; + + struct ionic_q_identify_comp q_identify; + struct ionic_q_init_comp q_init; + + ionic_fw_download_comp fw_download; + struct ionic_fw_control_comp fw_control; + + struct ionic_hii_identify_comp hii_identify; + struct ionic_hii_init_comp hii_init; + struct ionic_hii_setattr_comp hii_setattr; + struct ionic_hii_getattr_comp hii_getattr; + struct ionic_hii_reset_comp hii_reset; + + struct ionic_upt_comp upt_comp; +}; + +/** + * struct ionic_oprom_regs - Oprom debug/enable and bmp registers + * @oprom_log_level: Variables indicates whether Oprom log to be printed or not + * @oprom_reserved : Reserved for future use and make it as 32 byte alignment of oprom_regs. + */ +#define IONIC_DEVINFO_OPROM_RESERVED 31 +struct ionic_oprom_regs { + u8 oprom_log_level; + u8 oprom_reserved[IONIC_DEVINFO_OPROM_RESERVED]; +}; +IONIC_CHECK_OPROM_LENGTH(ionic_oprom_regs); + +/** + * struct ionic_hwstamp_regs - Hardware current timestamp registers + * @tick_low: Low 32 bits of hardware timestamp + * @tick_high: High 32 bits of hardware timestamp + */ +struct ionic_hwstamp_regs { + u32 tick_low; + u32 tick_high; +}; + +/** + * union ionic_dev_info_regs - Device info register format (read-only) + * @signature: Signature value of 0x44455649 ('DEVI') + * @version: Current version of info + * @asic_type: Asic type + * @asic_rev: Asic revision + * @fw_status: Firmware status + * bit 0 - 1 = fw running + * bit 4-7 - 4 bit generation number, changes on fw restart + * @fw_heartbeat: Firmware heartbeat counter + * @serial_num: Serial number + * @fw_version: Firmware version + * @oprom_regs: oprom_regs to store oprom debug enable/disable and bmp + * @hwstamp_regs: Hardware current timestamp registers + */ +union ionic_dev_info_regs { +#define IONIC_DEVINFO_FWVERS_BUFLEN 32 +#define IONIC_DEVINFO_SERIAL_BUFLEN 32 + struct { + u32 signature; + u8 version; + u8 asic_type; + u8 asic_rev; +#define IONIC_FW_STS_F_RUNNING 0x01 +#define IONIC_FW_STS_F_GENERATION 0xF0 + u8 fw_status; + u32 fw_heartbeat; + char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN]; + char serial_num[IONIC_DEVINFO_SERIAL_BUFLEN]; + struct ionic_oprom_regs oprom_regs; + u8 rsvd_pad1024[916]; + struct ionic_hwstamp_regs hwstamp; + }; + u32 words[512]; +}; +IONIC_CHECK_DEV_INFO_REGS_LENGTH(ionic_dev_info_regs); + +/** + * union ionic_dev_cmd_regs - Device command register format (read-write) + * @doorbell: Device Cmd Doorbell, write-only + * Write a 1 to signal device to process cmd, + * poll done for completion. + * @done: Done indicator, bit 0 == 1 when command is complete + * @cmd: Opcode-specific command bytes + * @comp: Opcode-specific response bytes + * @data: Opcode-specific side-data + */ +union ionic_dev_cmd_regs { + struct { + u32 doorbell; + u32 done; + union ionic_dev_cmd cmd; + union ionic_dev_cmd_comp comp; + u8 rsvd[48]; + u32 data[478]; + } __attribute__((packed)); + u32 words[512]; +}; + +/** + * union ionic_dev_regs - Device register format for bar 0 page 0 + * @info: Device info registers + * @devcmd: Device command registers + */ +union ionic_dev_regs { + struct { + union ionic_dev_info_regs info; + union ionic_dev_cmd_regs devcmd; + } __attribute__((packed)); + __le32 words[1024]; +}; + +union ionic_adminq_cmd { + struct ionic_admin_cmd cmd; + struct ionic_nop_cmd nop; + struct ionic_q_identify_cmd q_identify; + struct ionic_q_init_cmd q_init; + struct ionic_q_control_cmd q_control; + struct ionic_lif_setattr_cmd lif_setattr; + struct ionic_lif_getattr_cmd lif_getattr; + struct ionic_lif_setphc_cmd lif_setphc; + struct ionic_rx_mode_set_cmd rx_mode_set; + struct ionic_rx_filter_add_cmd rx_filter_add; + struct ionic_rx_filter_del_cmd rx_filter_del; + struct ionic_rdma_reset_cmd rdma_reset; + struct ionic_rdma_queue_cmd rdma_queue; + struct ionic_fw_download_cmd fw_download; + struct ionic_fw_control_cmd fw_control; +}; + +union ionic_adminq_comp { + struct ionic_admin_comp comp; + struct ionic_nop_comp nop; + struct ionic_q_identify_comp q_identify; + struct ionic_q_init_comp q_init; + struct ionic_lif_setattr_comp lif_setattr; + struct ionic_lif_getattr_comp lif_getattr; + struct ionic_admin_comp lif_setphc; + struct ionic_rx_filter_add_comp rx_filter_add; + ionic_fw_download_comp fw_download; + struct ionic_fw_control_comp fw_control; +}; + +#define IONIC_BARS_MAX 6 +#define IONIC_PCI_BAR_DBELL 1 +#define IONIC_PCI_BAR_CMB 2 + +/* BAR0 */ +#define IONIC_BAR0_SIZE 0x8000 +#define IONIC_BAR2_SIZE 0x800000 + +#define IONIC_BAR0_DEV_INFO_REGS_OFFSET 0x0000 +#define IONIC_BAR0_DEV_CMD_REGS_OFFSET 0x0800 +#define IONIC_BAR0_DEV_CMD_DATA_REGS_OFFSET 0x0c00 +#define IONIC_BAR0_INTR_STATUS_OFFSET 0x1000 +#define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000 +#define IONIC_DEV_CMD_DONE 0x00000001 + +#define IONIC_ASIC_TYPE_CAPRI 0 + +/** + * struct ionic_doorbell - Doorbell register layout + * @p_index: Producer index + * @ring: Selects the specific ring of the queue to update + * Type-specific meaning: + * ring=0: Default producer/consumer queue + * ring=1: (CQ, EQ) Re-Arm queue. RDMA CQs + * send events to EQs when armed. EQs send + * interrupts when armed. + * @qid_lo: Queue destination for the producer index and flags (low bits) + * @qid_hi: Queue destination for the producer index and flags (high bits) + */ +struct ionic_doorbell { + __le16 p_index; + u8 ring; + u8 qid_lo; + __le16 qid_hi; + u16 rsvd2; +}; + +/** + * struct ionic_intr_ctrl - Interrupt control register + * @coalescing_init: Coalescing timer initial value, in + * device units. Use @identity->intr_coal_mult + * and @identity->intr_coal_div to convert from + * usecs to device units: + * + * coal_init = coal_usecs * coal_mutl / coal_div + * + * When an interrupt is sent the interrupt + * coalescing timer current value + * (@coalescing_curr) is initialized with this + * value and begins counting down. No more + * interrupts are sent until the coalescing + * timer reaches 0. When @coalescing_init=0 + * interrupt coalescing is effectively disabled + * and every interrupt assert results in an + * interrupt. Reset value: 0 + * @mask: Interrupt mask. When @mask=1 the interrupt + * resource will not send an interrupt. When + * @mask=0 the interrupt resource will send an + * interrupt if an interrupt event is pending + * or on the next interrupt assertion event. + * Reset value: 1 + * @int_credits: Interrupt credits. This register indicates + * how many interrupt events the hardware has + * sent. When written by software this + * register atomically decrements @int_credits + * by the value written. When @int_credits + * becomes 0 then the "pending interrupt" bit + * in the Interrupt Status register is cleared + * by the hardware and any pending but unsent + * interrupts are cleared. + * !!!IMPORTANT!!! This is a signed register. + * @flags: Interrupt control flags + * @unmask -- When this bit is written with a 1 + * the interrupt resource will set mask=0. + * @coal_timer_reset -- When this + * bit is written with a 1 the + * @coalescing_curr will be reloaded with + * @coalescing_init to reset the coalescing + * timer. + * @mask_on_assert: Automatically mask on assertion. When + * @mask_on_assert=1 the interrupt resource + * will set @mask=1 whenever an interrupt is + * sent. When using interrupts in Legacy + * Interrupt mode the driver must select + * @mask_on_assert=0 for proper interrupt + * operation. + * @coalescing_curr: Coalescing timer current value, in + * microseconds. When this value reaches 0 + * the interrupt resource is again eligible to + * send an interrupt. If an interrupt event + * is already pending when @coalescing_curr + * reaches 0 the pending interrupt will be + * sent, otherwise an interrupt will be sent + * on the next interrupt assertion event. + */ +struct ionic_intr_ctrl { + u8 coalescing_init; + u8 rsvd[3]; + u8 mask; + u8 rsvd2[3]; + u16 int_credits; + u16 flags; +#define INTR_F_UNMASK 0x0001 +#define INTR_F_TIMER_RESET 0x0002 + u8 mask_on_assert; + u8 rsvd3[3]; + u8 coalescing_curr; + u8 rsvd4[3]; + u32 rsvd6[3]; +}; + +#define IONIC_INTR_CTRL_REGS_MAX 2048 +#define IONIC_INTR_CTRL_COAL_MAX 0x3F + +#define intr_to_coal(intr_ctrl) \ + ((void __iomem *)&(intr_ctrl)->coalescing_init) +#define intr_to_mask(intr_ctrl) \ + ((void __iomem *)&(intr_ctrl)->mask) +#define intr_to_credits(intr_ctrl) \ + ((void __iomem *)&(intr_ctrl)->int_credits) +#define intr_to_mask_on_assert(intr_ctrl)\ + ((void __iomem *)&(intr_ctrl)->mask_on_assert) + +struct ionic_intr_status { + u32 status[2]; +}; + +struct ionic_notifyq_cmd { + __le32 data; /* Not used but needed for qcq structure */ +}; + +union ionic_notifyq_comp { + struct ionic_notifyq_event event; + struct ionic_link_change_event link_change; + struct ionic_reset_event reset; + struct ionic_heartbeat_event heartbeat; + struct ionic_log_event log; +}; +union ionic_debug_msg { + struct { + char string[128]; + u8 rsvd[128]; + }; + __le32 words[64]; +}; + +/** + * struct ionic_eq_comp - Event queue completion descriptor + * + * @code: Event code, see enum ionic_eq_comp_code + * @lif_index: To which LIF the event pertains + * @qid: To which queue id the event pertains + * @gen_color: Event queue wrap counter, init 1, incr each wrap + */ +struct ionic_eq_comp { + __le16 code; + __le16 lif_index; + __le32 qid; + u8 rsvd[7]; + u8 gen_color; +}; + +enum ionic_eq_comp_code { + IONIC_EQ_COMP_CODE_NONE = 0, + IONIC_EQ_COMP_CODE_RX_COMP = 1, + IONIC_EQ_COMP_CODE_TX_COMP = 2, +}; + +/* Deprecate */ +struct ionic_identity { + union ionic_drv_identity drv; + union ionic_dev_identity dev; + union ionic_lif_identity lif; + union ionic_port_identity port; + union ionic_qos_identity qos; + union ionic_q_identity txq; +}; + +#endif /* _IONIC_IF_H_ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/common/ionic_regs.h b/platform/pensando/dsc-drivers/src/drivers/common/ionic_regs.h new file mode 100644 index 0000000000..f6fb4aee8b --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/common/ionic_regs.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */ +/* Copyright (c) 2018 - 2021 Pensando Systems, Inc. All rights reserved. */ + +#ifndef IONIC_REGS_H +#define IONIC_REGS_H + +#include + +/** struct ionic_intr - interrupt control register set. + * @coal_init: coalesce timer initial value. + * @mask: interrupt mask value. + * @credits: interrupt credit count and return. + * @mask_assert: interrupt mask value on assert. + * @coal: coalesce timer time remaining. + */ +struct ionic_intr { + u32 coal_init; + u32 mask; + u32 credits; + u32 mask_assert; + u32 coal; + u32 rsvd[3]; +}; + +/** enum ionic_intr_mask_vals - valid values for mask and mask_assert. + * @IONIC_INTR_MASK_CLEAR: unmask interrupt. + * @IONIC_INTR_MASK_SET: mask interrupt. + */ +enum ionic_intr_mask_vals { + IONIC_INTR_MASK_CLEAR = 0, + IONIC_INTR_MASK_SET = 1, +}; + +/** enum ionic_intr_credits_bits - bitwise composition of credits values. + * @IONIC_INTR_CRED_COUNT: bit mask of credit count, no shift needed. + * @IONIC_INTR_CRED_COUNT_SIGNED: bit mask of credit count, including sign bit. + * @IONIC_INTR_CRED_UNMASK: unmask the interrupt. + * @IONIC_INTR_CRED_RESET_COALESCE: reset the coalesce timer. + * @IONIC_INTR_CRED_REARM: unmask the and reset the timer. + */ +enum ionic_intr_credits_bits { + IONIC_INTR_CRED_COUNT = 0x7fffu, + IONIC_INTR_CRED_COUNT_SIGNED = 0xffffu, + IONIC_INTR_CRED_UNMASK = 0x10000u, + IONIC_INTR_CRED_RESET_COALESCE = 0x20000u, + IONIC_INTR_CRED_REARM = (IONIC_INTR_CRED_UNMASK | + IONIC_INTR_CRED_RESET_COALESCE), +}; + +static inline void ionic_intr_coal_init(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, u32 coal) +{ + iowrite32(coal, &intr_ctrl[intr_idx].coal_init); +} + +static inline void ionic_intr_mask(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, u32 mask) +{ + iowrite32(mask, &intr_ctrl[intr_idx].mask); +} + +static inline void ionic_intr_credits(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, u32 cred, u32 flags) +{ + if (WARN_ON_ONCE(cred > IONIC_INTR_CRED_COUNT)) { + cred = ioread32(&intr_ctrl[intr_idx].credits); + cred &= IONIC_INTR_CRED_COUNT_SIGNED; + } + + iowrite32(cred | flags, &intr_ctrl[intr_idx].credits); +} + +static inline void ionic_intr_clean_flags(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, u32 flags) +{ + u32 cred; + + cred = ioread32(&intr_ctrl[intr_idx].credits); + cred &= IONIC_INTR_CRED_COUNT_SIGNED; + cred |= flags; + iowrite32(cred, &intr_ctrl[intr_idx].credits); +} + +static inline void ionic_intr_clean(struct ionic_intr __iomem *intr_ctrl, + int intr_idx) +{ + ionic_intr_clean_flags(intr_ctrl, intr_idx, + IONIC_INTR_CRED_RESET_COALESCE); +} + +static inline void ionic_intr_mask_assert(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, u32 mask) +{ + iowrite32(mask, &intr_ctrl[intr_idx].mask_assert); +} + +/** enum ionic_dbell_bits - bitwise composition of dbell values. + * + * @IONIC_DBELL_QID_MASK: unshifted mask of valid queue id bits. + * @IONIC_DBELL_QID_SHIFT: queue id shift amount in dbell value. + * @IONIC_DBELL_QID: macro to build QID component of dbell value. + * + * @IONIC_DBELL_RING_MASK: unshifted mask of valid ring bits. + * @IONIC_DBELL_RING_SHIFT: ring shift amount in dbell value. + * @IONIC_DBELL_RING: macro to build ring component of dbell value. + * + * @IONIC_DBELL_RING_0: ring zero dbell component value. + * @IONIC_DBELL_RING_1: ring one dbell component value. + * @IONIC_DBELL_RING_2: ring two dbell component value. + * @IONIC_DBELL_RING_3: ring three dbell component value. + * + * @IONIC_DBELL_INDEX_MASK: bit mask of valid index bits, no shift needed. + */ +enum ionic_dbell_bits { + IONIC_DBELL_QID_MASK = 0xffffff, + IONIC_DBELL_QID_SHIFT = 24, + +#define IONIC_DBELL_QID(n) \ + (((u64)(n) & IONIC_DBELL_QID_MASK) << IONIC_DBELL_QID_SHIFT) + + IONIC_DBELL_RING_MASK = 0x7, + IONIC_DBELL_RING_SHIFT = 16, + +#define IONIC_DBELL_RING(n) \ + (((u64)(n) & IONIC_DBELL_RING_MASK) << IONIC_DBELL_RING_SHIFT) + + IONIC_DBELL_RING_0 = 0, + IONIC_DBELL_RING_1 = IONIC_DBELL_RING(1), + IONIC_DBELL_RING_2 = IONIC_DBELL_RING(2), + IONIC_DBELL_RING_3 = IONIC_DBELL_RING(3), + + IONIC_DBELL_INDEX_MASK = 0xffff, +}; + +static inline void ionic_dbell_ring(u64 __iomem *db_page, int qtype, u64 val) +{ +#if defined(CONFIG_IONIC_MNIC) + wmb(); + writeq_relaxed(val, &db_page[qtype]); +#else + writeq(val, &db_page[qtype]); +#endif +} + +#endif /* IONIC_REGS_H */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/Makefile b/platform/pensando/dsc-drivers/src/drivers/linux/Makefile new file mode 100644 index 0000000000..c754088fe4 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/Makefile @@ -0,0 +1,116 @@ +ifneq ($(KERNELRELEASE),) +obj-$(CONFIG_IONIC) += eth/ionic/ +obj-$(CONFIG_IONIC_MNIC) += eth/ionic/ +obj-$(CONFIG_MDEV) += mdev/ +obj-$(CONFIG_MNET_UIO_PDRV_GENIRQ) += mnet_uio_pdrv_genirq/ +else + +IONIC_ETH_SRC = $(CURDIR)/eth/ionic + +#KOPT += V=1 # verbose build +#KOPT += W=1 # extra warnings +#KOPT += C=1 # static analysis +#KOPT += CHECK=sparse # static analysis tool +#KOPT += CHECK=scripts/coccicheck + +default: all + +# Discover kernel configuration. +# +# Override running kernel with +# `make KSRC=/path/to/your/sources` or +# `export KSRC=/path/to/your/sources` +# + +ifeq ($(ARCH),aarch64) + +# Ionic mnic and mdev for drivers ARM +KSRC ?= ${NICDIR}/buildroot/output/${ASIC}/linux-headers +KMOD_OUT_DIR ?= ${BLD_OUT_DIR}/drivers_submake +KMOD_SRC_DIR ?= ${TOPDIR}/platform/drivers/linux-ionic +ETH_KOPT += CONFIG_IONIC_MNIC=m +ETH_KOPT += CONFIG_MDEV=m +ETH_KOPT += CONFIG_MNET_UIO_PDRV_GENIRQ=m +KOPT += ARCH=arm64 +KCFLAGS += -DCONFIG_IONIC_MNIC +KCFLAGS += -DCONFIG_MDEV +KCFLAGS += -DCONFIG_MNET_UIO_PDRV_GENIRQ +ALL = mnic +ALL += mnet_uio_pdrv_genirq +ALL += mdev +export PATH := $(PATH):$(TOOLCHAIN_DIR)/bin + +KSYMS_MNIC = "KBUILD_EXTRA_SYMBOLS=${KMOD_OUT_DIR}/Module.symvers.mnic" +KSYMS = "${KSYMS_MNIC} ${KMOD_OUT_DIR}/Module.symvers.uio" + + +else + +DVER = $(shell git describe --tags 2>/dev/null) + +# Ionic driver for host +include linux_ver.mk + +KSRC ?= /lib/modules/$(shell uname -r)/build +ETH_KOPT += CONFIG_IONIC=m +ETH_KOPT += CONFIG_IONIC_MNIC=_ +ETH_KOPT += CONFIG_MDEV=_ +ETH_KOPT += CONFIG_MNET_UIO_PDRV_GENIRQ=_ +KCFLAGS += -DCONFIG_IONIC + +KCFLAGS = -Werror +KCFLAGS += $(EXTRA_CFLAGS) + +ALL = eth + +endif + +ifeq ($(DVER),) + DVER = "22.11.1-001" +endif +KCFLAGS += -Ddrv_ver=\\\"$(DVER)\\\" + +KOPT += KCFLAGS="$(KCFLAGS)" + +all: $(ALL) + +KBUILD_RULE = $(MAKE) -C $(KSRC) $(KOPT) M=$(CURDIR) + +mnic: KOPT+=$(ETH_KOPT) +mnic: + @echo "===> Building MNIC driver " + touch $(KMOD_OUT_DIR)/Makefile || true + $(MAKE) -C $(KSRC) V=1 M=$(KMOD_OUT_DIR) src=$(KMOD_SRC_DIR)/eth/ionic $(KOPT) + mv ${KMOD_OUT_DIR}/Module.symvers ${KMOD_OUT_DIR}/Module.symvers.mnic + +mnet_uio_pdrv_genirq: KOPT+=$(ETH_KOPT) +mnet_uio_pdrv_genirq: + @echo "===> Building MNET_UIO driver " + $(MAKE) -C $(KSRC) V=1 M=$(KMOD_OUT_DIR) src=$(KMOD_SRC_DIR)/mnet_uio_pdrv_genirq $(KOPT) + mv ${KMOD_OUT_DIR}/Module.symvers ${KMOD_OUT_DIR}/Module.symvers.uio + +mdev: KOPT+=$(ETH_KOPT) +mdev: + @echo "===> Building MDEV driver " + $(MAKE) -C $(KSRC) $(KSYMS) V=1 M=$(KMOD_OUT_DIR) src=$(KMOD_SRC_DIR)/mdev $(KOPT) + +eth: KOPT+=$(ETH_KOPT) +eth: + $(KBUILD_RULE) + +clean: KOPT+=$(ETH_KOPT) +clean: + $(KBUILD_RULE) clean + +install: modules_install +modules_install: KOPT+=$(ETH_KOPT) +modules_install: + $(KBUILD_RULE) modules_install + +cscope: + find $(IONIC_ETH_SRC) -name '*.[ch]' > cscope.files + cscope -bkq + +.PHONY: default all mnic mdev mnet_uio_pdrv_genirq eth clean install modules_install cscope + +endif diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/Kconfig b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/Kconfig new file mode 100644 index 0000000000..2a1045f558 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/Kconfig @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2019 - 2020 Pensando Systems, Inc +# +# Pensando device configuration +# + +config NET_VENDOR_PENSANDO + bool "Pensando devices" + default y + help + If you have a Distributed Services Card (DSC) belonging to this + class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Pensando cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_PENSANDO + +config IONIC + tristate "Pensando DSC Ethernet Support" + depends on 64BIT && PCI + select NET_DEVLINK + help + This enables Ethernet support for the Pensando family of Distributed + Services Cards (DSCs). More specific information on this driver can + be found in + . + + To compile this driver as a module, choose M here. The module + will be called ionic. + +endif # NET_VENDOR_PENSANDO diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/Makefile b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/Makefile new file mode 100644 index 0000000000..d5e2808fc6 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2017 - 2019 Pensando Systems, Inc + +obj-$(CONFIG_IONIC) := ionic.o +obj-$(CONFIG_IONIC_MNIC) := ionic_mnic.o + +ccflags-y := -g -I$(KMOD_SRC_DIR)/../common + +ionic-y := ionic_main.o ionic_bus_pci.o ionic_dev.o ionic_ethtool.o \ + ionic_lif.o ionic_rx_filter.o ionic_txrx.o ionic_debugfs.o \ + ionic_api.o ionic_stats.o ionic_devlink.o kcompat.o ionic_fw.o \ + dim.o net_dim.o +ionic-$(CONFIG_PTP_1588_CLOCK) += ionic_phc.o + +ionic_mnic-y := ionic_main.o ionic_bus_platform.o ionic_dev.o ionic_ethtool.o \ + ionic_lif.o ionic_rx_filter.o ionic_txrx.o ionic_debugfs.o \ + ionic_api.o ionic_stats.o ionic_devlink.o kcompat.o ionic_fw.o \ + dim.o net_dim.o +ionic_mnic-$(CONFIG_PTP_1588_CLOCK) += ionic_phc.o ionic_phc_weak.o diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/dim.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/dim.c new file mode 100644 index 0000000000..4e47c0f68c --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/dim.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* + * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved. + */ + +#ifndef CONFIG_DIMLIB +#include "dim.h" + +bool dim_on_top(struct dim *dim) +{ + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + case DIM_PARKING_TIRED: + return true; + case DIM_GOING_RIGHT: + return (dim->steps_left > 1) && (dim->steps_right == 1); + default: /* DIM_GOING_LEFT */ + return (dim->steps_right > 1) && (dim->steps_left == 1); + } +} +//EXPORT_SYMBOL(dim_on_top); + +void dim_turn(struct dim *dim) +{ + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + case DIM_PARKING_TIRED: + break; + case DIM_GOING_RIGHT: + dim->tune_state = DIM_GOING_LEFT; + dim->steps_left = 0; + break; + case DIM_GOING_LEFT: + dim->tune_state = DIM_GOING_RIGHT; + dim->steps_right = 0; + break; + } +} +//EXPORT_SYMBOL(dim_turn); + +void dim_park_on_top(struct dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tired = 0; + dim->tune_state = DIM_PARKING_ON_TOP; +} +//EXPORT_SYMBOL(dim_park_on_top); + +void dim_park_tired(struct dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tune_state = DIM_PARKING_TIRED; +} +//EXPORT_SYMBOL(dim_park_tired); + +void dim_calc_stats(struct dim_sample *start, struct dim_sample *end, + struct dim_stats *curr_stats) +{ + /* u32 holds up to 71 minutes, should be enough */ + u32 delta_us = ktime_us_delta(end->time, start->time); + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, + start->byte_ctr); + u32 ncomps = BIT_GAP(BITS_PER_TYPE(u32), end->comp_ctr, + start->comp_ctr); + + if (!delta_us) + return; + + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); + curr_stats->epms = DIV_ROUND_UP(DIM_NEVENTS * USEC_PER_MSEC, + delta_us); + curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us); + if (curr_stats->epms != 0) + curr_stats->cpe_ratio = DIV_ROUND_DOWN_ULL( + curr_stats->cpms * 100, curr_stats->epms); + else + curr_stats->cpe_ratio = 0; + +} +//EXPORT_SYMBOL(dim_calc_stats); +#endif diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/dim.h b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/dim.h new file mode 100644 index 0000000000..d4a394e6cc --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/dim.h @@ -0,0 +1,344 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef DIM_H +#define DIM_H + + + +#include +#include +#include +#include + +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) + +#ifndef DIV_ROUND_DOWN_ULL +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) +#endif + +/* + * Number of events between DIM iterations. + * Causes a moderation of the algorithm run. + */ +#define DIM_NEVENTS 64 + +/* + * Is a difference between values justifies taking an action. + * We consider 10% difference as significant. + */ +#define IS_SIGNIFICANT_DIFF(val, ref) \ + (((100UL * abs((val) - (ref))) / (ref)) > 10) + +/* + * Calculate the gap between two values. + * Take wrap-around and variable size into consideration. + */ +#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) \ + & (BIT_ULL(bits) - 1)) + +/** + * struct dim_cq_moder - Structure for CQ moderation values. + * Used for communications between DIM and its consumer. + * + * @usec: CQ timer suggestion (by DIM) + * @pkts: CQ packet counter suggestion (by DIM) + * @comps: Completion counter + * @cq_period_mode: CQ period count mode (from CQE/EQE) + */ +struct dim_cq_moder { + u16 usec; + u16 pkts; + u16 comps; + u8 cq_period_mode; +}; + +/** + * struct dim_sample - Structure for DIM sample data. + * Used for communications between DIM and its consumer. + * + * @time: Sample timestamp + * @pkt_ctr: Number of packets + * @byte_ctr: Number of bytes + * @event_ctr: Number of events + * @comp_ctr: Current completion counter + */ +struct dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; + u32 comp_ctr; +}; + +/** + * struct dim_stats - Structure for DIM stats. + * Used for holding current measured rates. + * + * @ppms: Packets per msec + * @bpms: Bytes per msec + * @epms: Events per msec + * @cpms: Completions per msec + * @cpe_ratio: Ratio of completions to events + */ +struct dim_stats { + int ppms; /* packets per msec */ + int bpms; /* bytes per msec */ + int epms; /* events per msec */ + int cpms; /* completions per msec */ + int cpe_ratio; /* ratio of completions to events */ +}; + +/** + * struct dim - Main structure for dynamic interrupt moderation (DIM). + * Used for holding all information about a specific DIM instance. + * + * @state: Algorithm state (see below) + * @prev_stats: Measured rates from previous iteration (for comparison) + * @start_sample: Sampled data at start of current iteration + * @measuring_sample: A &dim_sample that is used to update the current events + * @work: Work to perform on action required + * @priv: A pointer to the struct that points to dim + * @profile_ix: Current moderation profile + * @mode: CQ period count mode + * @tune_state: Algorithm tuning state (see below) + * @steps_right: Number of steps taken towards higher moderation + * @steps_left: Number of steps taken towards lower moderation + * @tired: Parking depth counter + */ +struct dim { + u8 state; + struct dim_stats prev_stats; + struct dim_sample start_sample; + struct dim_sample measuring_sample; + struct work_struct work; + void *priv; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + +/** + * enum dim_cq_period_mode - Modes for CQ period count + * + * @DIM_CQ_PERIOD_MODE_START_FROM_EQE: Start counting from EQE + * @DIM_CQ_PERIOD_MODE_START_FROM_CQE: Start counting from CQE (implies timer reset) + * @DIM_CQ_PERIOD_NUM_MODES: Number of modes + */ +enum dim_cq_period_mode { + DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, + DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, + DIM_CQ_PERIOD_NUM_MODES +}; + +/** + * enum dim_state - DIM algorithm states + * + * These will determine if the algorithm is in a valid state to start an iteration. + * + * @DIM_START_MEASURE: This is the first iteration (also after applying a new profile) + * @DIM_MEASURE_IN_PROGRESS: Algorithm is already in progress - check if + * need to perform an action + * @DIM_APPLY_NEW_PROFILE: DIM consumer is currently applying a profile - no need to measure + */ +enum dim_state { + DIM_START_MEASURE, + DIM_MEASURE_IN_PROGRESS, + DIM_APPLY_NEW_PROFILE, +}; + +/** + * enum dim_tune_state - DIM algorithm tune states + * + * These will determine which action the algorithm should perform. + * + * @DIM_PARKING_ON_TOP: Algorithm found a local top point - exit on significant difference + * @DIM_PARKING_TIRED: Algorithm found a deep top point - don't exit if tired > 0 + * @DIM_GOING_RIGHT: Algorithm is currently trying higher moderation levels + * @DIM_GOING_LEFT: Algorithm is currently trying lower moderation levels + */ +enum dim_tune_state { + DIM_PARKING_ON_TOP, + DIM_PARKING_TIRED, + DIM_GOING_RIGHT, + DIM_GOING_LEFT, +}; + +/** + * enum dim_stats_state - DIM algorithm statistics states + * + * These will determine the verdict of current iteration. + * + * @DIM_STATS_WORSE: Current iteration shows worse performance than before + * @DIM_STATS_SAME: Current iteration shows same performance than before + * @DIM_STATS_BETTER: Current iteration shows better performance than before + */ +enum dim_stats_state { + DIM_STATS_WORSE, + DIM_STATS_SAME, + DIM_STATS_BETTER, +}; + +/** + * enum dim_step_result - DIM algorithm step results + * + * These describe the result of a step. + * + * @DIM_STEPPED: Performed a regular step + * @DIM_TOO_TIRED: Same kind of step was done multiple times - should go to + * tired parking + * @DIM_ON_EDGE: Stepped to the most left/right profile + */ +enum dim_step_result { + DIM_STEPPED, + DIM_TOO_TIRED, + DIM_ON_EDGE, +}; + +/** + * dim_on_top - check if current state is a good place to stop (top location) + * @dim: DIM context + * + * Check if current profile is a good place to park at. + * This will result in reducing the DIM checks frequency as we assume we + * shouldn't probably change profiles, unless traffic pattern wasn't changed. + */ +bool dim_on_top(struct dim *dim); + +/** + * dim_turn - change profile altering direction + * @dim: DIM context + * + * Go left if we were going right and vice-versa. + * Do nothing if currently parking. + */ +void dim_turn(struct dim *dim); + +/** + * dim_park_on_top - enter a parking state on a top location + * @dim: DIM context + * + * Enter parking state. + * Clear all movement history. + */ +void dim_park_on_top(struct dim *dim); + +/** + * dim_park_tired - enter a tired parking state + * @dim: DIM context + * + * Enter parking state. + * Clear all movement history and cause DIM checks frequency to reduce. + */ +void dim_park_tired(struct dim *dim); + +/** + * dim_calc_stats - calculate the difference between two samples + * @start: start sample + * @end: end sample + * @curr_stats: delta between samples + * + * Calculate the delta between two samples (in data rates). + * Takes into consideration counter wrap-around. + */ +void dim_calc_stats(struct dim_sample *start, struct dim_sample *end, + struct dim_stats *curr_stats); + +/** + * dim_update_sample - set a sample's fields with given values + * @event_ctr: number of events to set + * @packets: number of packets to set + * @bytes: number of bytes to set + * @s: DIM sample + */ +static inline void +dim_update_sample(u16 event_ctr, u64 packets, u64 bytes, struct dim_sample *s) +{ + s->time = ktime_get(); + s->pkt_ctr = packets; + s->byte_ctr = bytes; + s->event_ctr = event_ctr; +} + +/** + * dim_update_sample_with_comps - set a sample's fields with given + * values including the completion parameter + * @event_ctr: number of events to set + * @packets: number of packets to set + * @bytes: number of bytes to set + * @comps: number of completions to set + * @s: DIM sample + */ +static inline void +dim_update_sample_with_comps(u16 event_ctr, u64 packets, u64 bytes, u64 comps, + struct dim_sample *s) +{ + dim_update_sample(event_ctr, packets, bytes, s); + s->comp_ctr = comps; +} + +/* Net DIM */ + +/** + * net_dim_get_rx_moderation - provide a CQ moderation object for the given RX profile + * @cq_period_mode: CQ period mode + * @ix: Profile index + */ +struct dim_cq_moder net_dim_get_rx_moderation(u8 cq_period_mode, int ix); + +/** + * net_dim_get_def_rx_moderation - provide the default RX moderation + * @cq_period_mode: CQ period mode + */ +struct dim_cq_moder net_dim_get_def_rx_moderation(u8 cq_period_mode); + +/** + * net_dim_get_tx_moderation - provide a CQ moderation object for the given TX profile + * @cq_period_mode: CQ period mode + * @ix: Profile index + */ +struct dim_cq_moder net_dim_get_tx_moderation(u8 cq_period_mode, int ix); + +/** + * net_dim_get_def_tx_moderation - provide the default TX moderation + * @cq_period_mode: CQ period mode + */ +struct dim_cq_moder net_dim_get_def_tx_moderation(u8 cq_period_mode); + +/** + * net_dim - main DIM algorithm entry point + * @dim: DIM instance information + * @end_sample: Current data measurement + * + * Called by the consumer. + * This is the main logic of the algorithm, where data is processed in order + * to decide on next required action. + */ +void net_dim(struct dim *dim, struct dim_sample end_sample); + +/* RDMA DIM */ + +/* + * RDMA DIM profile: + * profile size must be of RDMA_DIM_PARAMS_NUM_PROFILES. + */ +#define RDMA_DIM_PARAMS_NUM_PROFILES 9 +#define RDMA_DIM_START_PROFILE 0 + +/** + * rdma_dim - Runs the adaptive moderation. + * @dim: The moderation struct. + * @completions: The number of completions collected in this round. + * + * Each call to rdma_dim takes the latest amount of completions that + * have been collected and counts them as a new event. + * Once enough events have been collected the algorithm decides a new + * moderation level. + */ +void rdma_dim(struct dim *dim, u64 completions); + +#endif /* DIM_H */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic.h b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic.h new file mode 100644 index 0000000000..d9e9e4d3ef --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#ifndef _IONIC_H_ +#define _IONIC_H_ + +struct ionic_lif; + +#include "kcompat.h" + +#include "ionic_if.h" +#include "ionic_dev.h" +#include "ionic_devlink.h" + +#define IONIC_DRV_NAME "ionic" +#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver" +#define IONIC_DRV_VERSION drv_ver + +#define PCI_VENDOR_ID_PENSANDO 0x1dd8 + +#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002 +#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003 +#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT 0x1004 + +#define DEVCMD_TIMEOUT 5 +#define SHORT_TIMEOUT 1 +#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100) +#define MAX_ETH_EQS 64 + +#define IONIC_PHC_UPDATE_NS 10000000000L /* 10s in nanoseconds */ +#define NORMAL_PPB 1000000000 /* one billion parts per billion */ +#define SCALED_PPM (1000000ull << 16) /* 2^16 million parts per 2^16 million */ + +extern bool port_init_up; +extern unsigned int rx_copybreak; +extern unsigned int rx_fill_threshold; +extern unsigned int tx_budget; +extern unsigned int devcmd_timeout; +extern unsigned long affinity_mask_override; + +struct ionic_vf { + u16 index; + u8 macaddr[6]; + __le32 maxrate; + __le16 vlanid; + u8 spoofchk; + u8 trusted; + u8 linkstate; + dma_addr_t stats_pa; + struct ionic_lif_stats stats; +}; + +struct ionic { + struct pci_dev *pdev; + struct platform_device *pfdev; + struct device *dev; + struct ionic_dev idev; + struct mutex dev_cmd_lock; /* lock for dev_cmd operations */ + struct dentry *dentry; + struct ionic_dev_bar bars[IONIC_BARS_MAX]; + unsigned int num_bars; + struct ionic_identity ident; + bool is_mgmt_nic; + struct ionic_lif *lif; + struct ionic_eq **eqs; + unsigned int nnqs_per_lif; + unsigned int nrdma_eqs_per_lif; + unsigned int ntxqs_per_lif; + unsigned int nrxqs_per_lif; + unsigned int nlifs; + unsigned int neth_eqs; + DECLARE_BITMAP(lifbits, IONIC_LIFS_MAX); + DECLARE_BITMAP(ethbits, IONIC_LIFS_MAX); + unsigned int nintrs; + DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX); +#ifndef HAVE_PCI_IRQ_API + struct msix_entry *msix; +#endif + struct work_struct nb_work; + struct notifier_block nb; +#ifdef IONIC_DEVLINK + struct devlink_port dl_port; +#endif + struct rw_semaphore vf_op_lock; /* lock for VF operations */ + struct ionic_vf *vfs; + int num_vfs; + struct timer_list watchdog_timer; + int watchdog_period; +}; + +int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); +int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, + const int err, const bool do_msg); +int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); +int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); +void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode, + u8 status, int err); + +int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait); +int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_wait); +void ionic_dev_cmd_dev_err_print(struct ionic *ionic, u8 opcode, u8 status, + int err); +int ionic_set_dma_mask(struct ionic *ionic); +int ionic_setup(struct ionic *ionic); + +int ionic_identify(struct ionic *ionic); +int ionic_init(struct ionic *ionic); +int ionic_reset(struct ionic *ionic); + +int ionic_port_identify(struct ionic *ionic); +int ionic_port_init(struct ionic *ionic); +int ionic_port_reset(struct ionic *ionic); + +const char *ionic_vf_attr_to_str(enum ionic_vf_attr attr); + +#endif /* _IONIC_H_ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_api.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_api.c new file mode 100644 index 0000000000..d2f53d29e8 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_api.c @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2017 - 2022 Pensando Systems, Inc. All rights reserved. */ + +#include + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_dev.h" +#include "ionic_lif.h" +#include "ionic_txrx.h" + +void *ionic_get_handle_from_netdev(struct net_device *netdev, + const char *api_version, + enum ionic_api_prsn prsn) +{ + struct ionic_lif *lif; + + if (strcmp(api_version, IONIC_API_VERSION)) + return ERR_PTR(-EINVAL); + + lif = ionic_netdev_lif(netdev); + if (!lif || !lif->nrdma_eqs) + return ERR_PTR(-ENXIO); + + /* TODO: Rework if supporting more than one child */ + if (lif->child_lif_cfg.prsn != IONIC_PRSN_NONE && + lif->child_lif_cfg.prsn != prsn) + return ERR_PTR(-EBUSY); + + return lif; +} +EXPORT_SYMBOL_GPL(ionic_get_handle_from_netdev); + +bool ionic_api_stay_registered(void *handle) +{ + /* TODO: Implement when eth driver reset is implemented */ + return false; +} +EXPORT_SYMBOL_GPL(ionic_api_stay_registered); + +void ionic_api_request_reset(void *handle) +{ + struct ionic_lif *lif = handle; + struct ionic *ionic; + int err; + + union ionic_dev_cmd cmd = { + .cmd.opcode = IONIC_CMD_RDMA_RESET_LIF, + .cmd.lif_index = cpu_to_le16(lif->child_lif_cfg.index), + }; + + ionic = lif->ionic; + + mutex_lock(&ionic->dev_cmd_lock); + + ionic_dev_cmd_go(&ionic->idev, &cmd); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + + mutex_unlock(&ionic->dev_cmd_lock); + + if (err) { + netdev_warn(lif->netdev, "request_reset: error %d\n", err); + return; + } + + if (lif->child_lif_cfg.priv && + lif->child_lif_cfg.reset_cb) + (*lif->child_lif_cfg.reset_cb)(lif->child_lif_cfg.priv); +} +EXPORT_SYMBOL_GPL(ionic_api_request_reset); + +void *ionic_api_get_private(void *handle, enum ionic_api_prsn prsn) +{ + struct ionic_lif *lif = handle; + + if (lif->child_lif_cfg.prsn != prsn) + return NULL; + + return lif->child_lif_cfg.priv; +} +EXPORT_SYMBOL_GPL(ionic_api_get_private); + +int ionic_api_set_private(void *handle, void *priv, + void (*reset_cb)(void *priv), + enum ionic_api_prsn prsn) +{ + struct ionic_lif *lif = handle; + struct ionic_lif_cfg *cfg = &lif->child_lif_cfg; + + if (priv && cfg->priv) + return -EBUSY; + + cfg->priv = priv; + cfg->prsn = prsn; + cfg->reset_cb = reset_cb; + + return 0; +} +EXPORT_SYMBOL_GPL(ionic_api_set_private); + +struct device *ionic_api_get_device(void *handle) +{ + struct ionic_lif *lif = handle; + + return lif->netdev->dev.parent; +} +EXPORT_SYMBOL_GPL(ionic_api_get_device); + +const struct ionic_devinfo *ionic_api_get_devinfo(void *handle) +{ + struct ionic_lif *lif = handle; + + return &lif->ionic->idev.dev_info; +} +EXPORT_SYMBOL_GPL(ionic_api_get_devinfo); + +struct dentry *ionic_api_get_debug_ctx(void *handle) +{ + struct ionic_lif *lif = handle; + + return lif->dentry; +} +EXPORT_SYMBOL_GPL(ionic_api_get_debug_ctx); + +const union ionic_lif_identity *ionic_api_get_identity(void *handle, + int *lif_index) +{ + struct ionic_lif *lif = handle; + + if (lif_index) + *lif_index = lif->child_lif_cfg.index; + + /* TODO: Do all LIFs have the same ident? */ + return &lif->ionic->ident.lif; +} +EXPORT_SYMBOL_GPL(ionic_api_get_identity); + +int ionic_api_get_intr(void *handle, int *irq) +{ + struct ionic_lif *lif = handle; + struct ionic_intr_info *intr_obj; + int err; + + if (!lif->nrdma_eqs_avail) + return -ENOSPC; + + intr_obj = kzalloc(sizeof(*intr_obj), GFP_KERNEL); + if (!intr_obj) + return -ENOSPC; + + err = ionic_intr_alloc(lif->ionic, intr_obj); + if (err) + goto done; + + err = ionic_bus_get_irq(lif->ionic, intr_obj->index); + if (err < 0) { + ionic_intr_free(lif->ionic, intr_obj->index); + goto done; + } + + lif->nrdma_eqs_avail--; + + *irq = err; + err = intr_obj->index; +done: + kfree(intr_obj); + return err; +} +EXPORT_SYMBOL_GPL(ionic_api_get_intr); + +void ionic_api_put_intr(void *handle, int intr) +{ + struct ionic_lif *lif = handle; + + ionic_intr_free(lif->ionic, intr); + + lif->nrdma_eqs_avail++; +} +EXPORT_SYMBOL_GPL(ionic_api_put_intr); + +int ionic_api_get_cmb(void *handle, u32 *pgid, phys_addr_t *pgaddr, int order) +{ + struct ionic_lif *lif = handle; + + return ionic_get_cmb(lif, pgid, pgaddr, order); +} +EXPORT_SYMBOL_GPL(ionic_api_get_cmb); + +void ionic_api_put_cmb(void *handle, u32 pgid, int order) +{ + struct ionic_lif *lif = handle; + + ionic_put_cmb(lif, pgid, order); +} +EXPORT_SYMBOL_GPL(ionic_api_put_cmb); + +void ionic_api_kernel_dbpage(void *handle, + struct ionic_intr __iomem **intr_ctrl, + u32 *dbid, u64 __iomem **dbpage) +{ + struct ionic_lif *lif = handle; + + *intr_ctrl = lif->ionic->idev.intr_ctrl; + + *dbid = lif->kern_pid; + *dbpage = lif->kern_dbpage; +} +EXPORT_SYMBOL_GPL(ionic_api_kernel_dbpage); + +int ionic_api_get_dbid(void *handle, u32 *dbid, phys_addr_t *addr) +{ + struct ionic_lif *lif = handle; + int id, dbpage_num; + + + if (ionic_bus_dbpage_per_pid(lif->ionic)) { + mutex_lock(&lif->dbid_inuse_lock); + + if (!lif->dbid_inuse) { + mutex_unlock(&lif->dbid_inuse_lock); + return -EINVAL; + } + + id = find_first_zero_bit(lif->dbid_inuse, lif->dbid_count); + if (id == lif->dbid_count) { + mutex_unlock(&lif->dbid_inuse_lock); + return -ENOMEM; + } + + set_bit(id, lif->dbid_inuse); + + mutex_unlock(&lif->dbid_inuse_lock); + + dbpage_num = ionic_db_page_num(lif, id); + } else { + id = 0; + dbpage_num = 0; + } + + *dbid = id; + *addr = ionic_bus_phys_dbpage(lif->ionic, dbpage_num); + + return 0; +} +EXPORT_SYMBOL_GPL(ionic_api_get_dbid); + +void ionic_api_put_dbid(void *handle, int dbid) +{ + struct ionic_lif *lif = handle; + + if (ionic_bus_dbpage_per_pid(lif->ionic)) { + mutex_lock(&lif->dbid_inuse_lock); + if (lif->dbid_inuse) + clear_bit(dbid, lif->dbid_inuse); + mutex_unlock(&lif->dbid_inuse_lock); + } +} +EXPORT_SYMBOL_GPL(ionic_api_put_dbid); + +int ionic_api_adminq_post(void *handle, struct ionic_admin_ctx *ctx) +{ + struct ionic_lif *lif = handle; + + return ionic_adminq_post(lif, ctx); +} +EXPORT_SYMBOL_GPL(ionic_api_adminq_post); diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_api.h b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_api.h new file mode 100644 index 0000000000..9894c01bbd --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_api.h @@ -0,0 +1,275 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2017 - 2022 Pensando Systems, Inc. All rights reserved. */ + +#ifndef IONIC_API_H +#define IONIC_API_H + +#include +#include +#include + +#include "ionic_if.h" +#include "ionic_regs.h" + +/** + * IONIC_API_VERSION - Version number of this interface + * + * Any interface changes to this interface must also change the version. + * + * If netdev and other (eg, rdma) drivers are compiled from different sources, + * they are compatible only if IONIC_API_VERSION is statically the same in both + * sources. Drivers must have matching values of IONIC_API_VERSION at compile + * time, to be considered compatible at run time. + */ +#define IONIC_API_VERSION "8" + +struct dentry; + +/** + * struct ionic_devinfo - device information + * @asic_type: Device ASIC type code + * @asic_rev: Device ASIC revision code + * @fw_version: Device firmware version, as a string + * @serial_num: Device serial number, as a string + */ +struct ionic_devinfo { + u8 asic_type; + u8 asic_rev; + char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN + 1]; + char serial_num[IONIC_DEVINFO_SERIAL_BUFLEN + 1]; +}; + +/** + * enum ionic_api_prsn - personalities that can be applied to a lif + * @IONIC_PRSN_NONE: No personality assigned + * @IONIC_PRSN_ETH: Ethernet NIC personality assigned + * @IONIC_PRSN_RDMA: RDMA HCA personality assigned + */ +enum ionic_api_prsn { + IONIC_PRSN_NONE = 0, + IONIC_PRSN_ETH, + IONIC_PRSN_RDMA, +}; + +/** + * ionic_get_handle_from_netdev() - Get a handle if the netdev is ionic + * @netdev: Net device to check + * @api_version: IONIC_API_VERSION + * @prsn: Personality to apply + * + * This returns an opaque handle if and only if the netdev was created + * by the ionic driver and the api version matches as described + * above for IONIC_API_VERSION. + * + * Return: Handle, if netdev is a compatible ionic device, or ERR_PTR(error) + */ +void *ionic_get_handle_from_netdev(struct net_device *netdev, + const char *api_version, + enum ionic_api_prsn prsn); + +/** + * ionic_api_stay_registered() - stay registered through net interface changes + * @handle: Handle to lif + * + * Return: true if the child device should ignore net deregistration events + */ +bool ionic_api_stay_registered(void *handle); + +/** + * ionic_api_request_reset() - request reset or disable the device or lif + * @handle: Handle to lif + * + * The reset will be carried out asynchronously. If it succeeds, then the + * callback specified in ionic_api_set_private() will be called. + */ +void ionic_api_request_reset(void *handle); + +/** + * ionic_api_get_private() - Get private data associated with the lif + * @handle: Handle to lif + * @prsn: Personality to which the private data applies + * + * Get the private data of some kind. The private data may be, for example, an + * instance of an rdma device for this lif. + * + * Return: private data or NULL + */ +void *ionic_api_get_private(void *handle, enum ionic_api_prsn prsn); + +/** + * ionic_api_set_private() - Set private data associated with the lif + * @handle: Handle to lif + * @priv: Private data or NULL + * @reset_cb: Callback if device has been disabled or reset + * @prsn: Personality to which the private data applies + * + * Set the private data of some kind. The private data may be, for example, an + * instance of an rdma device for this lif. + * + * This will fail if private data is already set for that personality. + * + * Return: zero or negative error status + */ +int ionic_api_set_private(void *handle, void *priv, + void (*reset_cb)(void *priv), + enum ionic_api_prsn prsn); + +/** + * ionic_api_clear_private() - Clear private data associated with the lif + * @handle: Handle to lif + */ +static inline void ionic_api_clear_private(void *handle) +{ + (void)ionic_api_set_private(handle, NULL, NULL, IONIC_PRSN_NONE); +} + +/** + * ionic_api_get_device() - Get the underlying device + * @handle: Handle to lif + * + * Return: pointer to underlying OS struct device associated with the lif + */ +struct device *ionic_api_get_device(void *handle); + +/** + * ionic_api_get_devinfo() - Get device information + * @handle: Handle to lif + * + * Return: pointer to device information + */ +const struct ionic_devinfo *ionic_api_get_devinfo(void *handle); + +/** + * ionic_api_get_debug_ctx() - Get the debug context (if any) for the lif + * @handle: Handle to lif + * + * This is the directory entry of the LIF in debugfs. + * + * Return: debug context for the lif or NULL + */ +struct dentry *ionic_api_get_debug_ctx(void *handle); + +/** + * ionic_api_get_identity() - Get result of device identification + * @handle: Handle to lif + * @lif_index: This lif index + * + * Return: pointer to result of identification + */ +const union ionic_lif_identity *ionic_api_get_identity(void *handle, + int *lif_index); + +/** + * ionic_api_get_intr() - Reserve a device interrupt index + * @handle: Handle to lif + * @irq: OS interrupt number returned + * + * Reserve an interrupt index, and indicate the irq number for that index. + * + * Return: interrupt index or negative error status + */ +int ionic_api_get_intr(void *handle, int *irq); + +/** + * ionic_api_put_intr() - Release a device interrupt index + * @handle: Handle to lif + * @intr: Interrupt index + * + * Mark the interrupt index unused so that it can be reserved again. + */ +void ionic_api_put_intr(void *handle, int intr); + +/** + * ionic_api_get_cmb() - Reserve cmb pages + * @handle: Handle to lif + * @pgid: First page index + * @pgaddr: First page bus addr (contiguous) + * @order: Log base two number of pages (PAGE_SIZE) + * + * Return: zero or negative error status + */ +int ionic_api_get_cmb(void *handle, u32 *pgid, phys_addr_t *pgaddr, int order); + +/** + * ionic_api_put_cmb() - Release cmb pages + * @handle: Handle to lif + * @pgid: First page index + * @order: Log base two number of pages (PAGE_SIZE) + */ +void ionic_api_put_cmb(void *handle, u32 pgid, int order); + +/** + * ionic_api_kernel_dbpage() - Get mapped doorbell page for use in kernel space + * @handle: Handle to lif + * @intr_ctrl: Interrupt control registers + * @dbid: Doorbell id for use in kernel space + * @dbpage: One ioremapped doorbell page for use in kernel space + * + * This also provides mapped interrupt control registers. + * + * The id and page returned here refer to the doorbell page reserved for use in + * kernel space for this lif. For user space, use ionic_api_get_dbid to + * allocate a doorbell id for exclusive use by a process. + */ +void ionic_api_kernel_dbpage(void *handle, + struct ionic_intr __iomem **intr_ctrl, + u32 *dbid, u64 __iomem **dbpage); + +/** + * ionic_api_get_dbid() - Reserve a doorbell id + * @handle: Handle to lif + * @dbid: Doorbell id + * @addr: Phys address of doorbell page + * + * Reserve a doorbell id. This corresponds with exactly one doorbell page at + * an offset from the doorbell page base address, that can be mapped into a + * user space process. + * + * Return: zero on success or negative error status + */ +int ionic_api_get_dbid(void *handle, u32 *dbid, phys_addr_t *addr); + +/** + * ionic_api_put_dbid() - Release a doorbell id + * @handle: Handle to lif + * @dbid: Doorbell id + * + * Mark the doorbell id unused, so that it can be reserved again. + */ +void ionic_api_put_dbid(void *handle, int dbid); + +/** + * struct ionic_admin_ctx - Admin command context + * @work: Work completion wait queue element + * @cmd: Admin command (64B) to be copied to the queue + * @comp: Admin completion (16B) copied from the queue + */ +struct ionic_admin_ctx { + struct completion work; + union ionic_adminq_cmd cmd; + union ionic_adminq_comp comp; +}; + +/** + * ionic_api_adminq_post() - Post an admin command + * @handle: Handle to lif + * @ctx: API admin command context + * + * Post the command to an admin queue in the ethernet driver. If this command + * succeeds, then the command has been posted, but that does not indicate a + * completion. If this command returns success, then the completion callback + * will eventually be called. + * + * Return: zero or negative error status + */ +int ionic_api_adminq_post(void *handle, struct ionic_admin_ctx *ctx); + +/** + * ionic_error_to_errno() - Transform ionic_if errors to os errno + * @code: Ionic error number + * + * Return: Negative OS error number or zero + */ +int ionic_error_to_errno(enum ionic_status_code code); + +#endif /* IONIC_API_H */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_bus.h b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_bus.h new file mode 100644 index 0000000000..6cbe6f17ad --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_bus.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#ifndef _IONIC_BUS_H_ +#define _IONIC_BUS_H_ + +int ionic_bus_get_irq(struct ionic *ionic, unsigned int num); +const char *ionic_bus_info(struct ionic *ionic); +int ionic_bus_alloc_irq_vectors(struct ionic *ionic, unsigned int nintrs); +void ionic_bus_free_irq_vectors(struct ionic *ionic); +int ionic_bus_register_driver(void); +void ionic_bus_unregister_driver(void); +struct net_device *ionic_alloc_netdev(struct ionic *ionic); +void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num); +void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page); +phys_addr_t ionic_bus_phys_dbpage(struct ionic *ionic, int page_num); + +static inline bool ionic_bus_dbpage_per_pid(struct ionic *ionic) +{ + return ionic->pdev; +} + +#endif /* _IONIC_BUS_H_ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_bus_pci.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_bus_pci.c new file mode 100644 index 0000000000..b44e414e6e --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_bus_pci.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#include +#include +#include +#include + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_debugfs.h" + +/* Supported devices */ +static const struct pci_device_id ionic_id_table[] = { + { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF) }, + { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF) }, + { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT) }, + { 0, } /* end of table */ +}; +MODULE_DEVICE_TABLE(pci, ionic_id_table); + +int ionic_bus_get_irq(struct ionic *ionic, unsigned int num) +{ +#ifdef HAVE_PCI_IRQ_API + return pci_irq_vector(ionic->pdev, num); +#else + return ionic->msix[num].vector; +#endif +} + +const char *ionic_bus_info(struct ionic *ionic) +{ + return pci_name(ionic->pdev); +} + +int ionic_bus_alloc_irq_vectors(struct ionic *ionic, unsigned int nintrs) +{ +#ifdef HAVE_PCI_IRQ_API + return pci_alloc_irq_vectors(ionic->pdev, nintrs, nintrs, + PCI_IRQ_MSIX); +#else + int err; + int i; + + if (ionic->msix) + return -EBUSY; + + ionic->msix = devm_kzalloc(ionic->dev, + sizeof(*ionic->msix) * nintrs, GFP_KERNEL); + if (!ionic->msix) + return -ENOMEM; + for (i = 0; i < nintrs; i++) + ionic->msix[i].entry = i; + err = pci_enable_msix_exact(ionic->pdev, ionic->msix, nintrs); + if (err < 0) { + devm_kfree(ionic->dev, ionic->msix); + ionic->msix = NULL; + return err; + } + return nintrs; +#endif +} + +void ionic_bus_free_irq_vectors(struct ionic *ionic) +{ + if (!ionic->nintrs) + return; + +#ifdef HAVE_PCI_IRQ_API + pci_free_irq_vectors(ionic->pdev); +#else + pci_disable_msix(ionic->pdev); + devm_kfree(ionic->dev, ionic->msix); + ionic->msix = NULL; +#endif +} + +struct net_device *ionic_alloc_netdev(struct ionic *ionic) +{ + dev_dbg(ionic->dev, "nxqs=%d nlifs=%d nintrs=%d\n", + ionic->ntxqs_per_lif, ionic->nlifs, ionic->nintrs); + + return alloc_etherdev_mqs(sizeof(struct ionic_lif), + ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); +} + +static int ionic_map_bars(struct ionic *ionic) +{ + struct pci_dev *pdev = ionic->pdev; + struct device *dev = ionic->dev; + struct ionic_dev_bar *bars; + unsigned int i, j; + + bars = ionic->bars; + ionic->num_bars = 0; + + for (i = 0, j = 0; i < IONIC_BARS_MAX; i++) { + if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) + continue; + bars[j].len = pci_resource_len(pdev, i); + + /* only map the whole bar 0 */ + if (j > 0) { + bars[j].vaddr = NULL; + } else { + bars[j].vaddr = pci_iomap(pdev, i, bars[j].len); + if (!bars[j].vaddr) { + dev_err(dev, + "Cannot memory-map BAR %d, aborting\n", + i); + return -ENODEV; + } + } + + bars[j].bus_addr = pci_resource_start(pdev, i); + bars[j].res_index = i; + ionic->num_bars++; + j++; + } + + ionic_debugfs_add_bars(ionic); + + return 0; +} + +static void ionic_unmap_bars(struct ionic *ionic) +{ + struct ionic_dev_bar *bars = ionic->bars; + unsigned int i; + + for (i = 0; i < IONIC_BARS_MAX; i++) { + if (bars[i].vaddr) { + iounmap(bars[i].vaddr); + bars[i].bus_addr = 0; + bars[i].vaddr = NULL; + bars[i].len = 0; + } + } +} + +void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num) +{ +#ifdef HAVE_PCI_IOMAP_RANGE + return pci_iomap_range(ionic->pdev, + ionic->bars[IONIC_PCI_BAR_DBELL].res_index, + (u64)page_num << PAGE_SHIFT, PAGE_SIZE); +#else + int bar = ionic->bars[IONIC_PCI_BAR_DBELL].res_index; + phys_addr_t start = pci_resource_start(ionic->pdev, bar); + phys_addr_t offset = start + ((phys_addr_t)page_num << PAGE_SHIFT); + + return ioremap(offset, PAGE_SIZE); +#endif /* HAVE_PCI_IOMAP_RANGE */ +} + +void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page) +{ + iounmap(page); +} + +phys_addr_t ionic_bus_phys_dbpage(struct ionic *ionic, int page_num) +{ + return ionic->bars[IONIC_PCI_BAR_DBELL].bus_addr + + ((phys_addr_t)page_num << PAGE_SHIFT); +} + +static void ionic_vf_dealloc_locked(struct ionic *ionic) +{ + struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_STATSADDR }; + struct ionic_vf *v; + int i; + + if (!ionic->vfs) + return; + + for (i = ionic->num_vfs - 1; i >= 0; i--) { + v = &ionic->vfs[i]; + + if (v->stats_pa) { + vfc.stats_pa = 0; + (void)ionic_set_vf_config(ionic, i, &vfc); + dma_unmap_single(ionic->dev, v->stats_pa, + sizeof(v->stats), DMA_FROM_DEVICE); + v->stats_pa = 0; + } + } + + kfree(ionic->vfs); + ionic->vfs = NULL; + ionic->num_vfs = 0; +} + +static void ionic_vf_dealloc(struct ionic *ionic) +{ + down_write(&ionic->vf_op_lock); + ionic_vf_dealloc_locked(ionic); + up_write(&ionic->vf_op_lock); +} + +static int ionic_vf_alloc(struct ionic *ionic, int num_vfs) +{ + struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_STATSADDR }; + struct ionic_vf *v; + int err = 0; + int i; + + down_write(&ionic->vf_op_lock); + + ionic->vfs = kcalloc(num_vfs, sizeof(struct ionic_vf), GFP_KERNEL); + if (!ionic->vfs) { + err = -ENOMEM; + goto out; + } + + for (i = 0; i < num_vfs; i++) { + v = &ionic->vfs[i]; + v->stats_pa = dma_map_single(ionic->dev, &v->stats, + sizeof(v->stats), DMA_FROM_DEVICE); + if (dma_mapping_error(ionic->dev, v->stats_pa)) { + dev_err(ionic->dev, "DMA mapping failed for vf[%d] stats\n", i); + v->stats_pa = 0; + err = -ENODEV; + goto out; + } + + ionic->num_vfs++; + + /* ignore failures from older FW, we just won't get stats */ + vfc.stats_pa = cpu_to_le64(v->stats_pa); + (void)ionic_set_vf_config(ionic, i, &vfc); + } + +out: + if (err) + ionic_vf_dealloc_locked(ionic); + up_write(&ionic->vf_op_lock); + return err; +} + +static int ionic_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct ionic *ionic = pci_get_drvdata(pdev); + struct device *dev = ionic->dev; + int ret = 0; + + if (ionic->lif && + test_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state)) + return -EBUSY; + + if (num_vfs > 0) { + ret = pci_enable_sriov(pdev, num_vfs); + if (ret) { + dev_err(dev, "Cannot enable SRIOV: %d\n", ret); + goto out; + } + + ret = ionic_vf_alloc(ionic, num_vfs); + if (ret) { + dev_err(dev, "Cannot alloc VFs: %d\n", ret); + pci_disable_sriov(pdev); + goto out; + } + + ret = num_vfs; + } else { + pci_disable_sriov(pdev); + ionic_vf_dealloc(ionic); + } + +out: + return ret; +} + +static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct ionic *ionic; + int num_vfs; + int err; + + ionic = ionic_devlink_alloc(dev); + if (!ionic) + return -ENOMEM; + + ionic->pdev = pdev; + ionic->dev = dev; + pci_set_drvdata(pdev, ionic); + mutex_init(&ionic->dev_cmd_lock); + + ionic->is_mgmt_nic = + ent->device == PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT; + ionic->pfdev = NULL; + + err = ionic_set_dma_mask(ionic); + if (err) { + dev_err(dev, "Cannot set DMA mask: %d, aborting\n", err); + goto err_out_clear_drvdata; + } + + ionic_debugfs_add_dev(ionic); + + /* Setup PCI device */ + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(dev, "Cannot enable PCI device: %d, aborting\n", err); + goto err_out_debugfs_del_dev; + } + + err = pci_request_regions(pdev, IONIC_DRV_NAME); + if (err) { + dev_err(dev, "Cannot request PCI regions: %d, aborting\n", err); + goto err_out_pci_disable_device; + } + pcie_print_link_status(pdev); + + err = ionic_map_bars(ionic); + if (err) + goto err_out_pci_release_regions; + + /* Configure the device */ + err = ionic_setup(ionic); + if (err) { + dev_err(dev, "Cannot setup device: %d, aborting\n", err); + goto err_out_unmap_bars; + } + pci_set_master(pdev); + + err = ionic_identify(ionic); + if (err) { + dev_err(dev, "Cannot identify device: %d, aborting\n", err); + goto err_out_teardown; + } + ionic_debugfs_add_ident(ionic); + + err = ionic_init(ionic); + if (err) { + dev_err(dev, "Cannot init device: %d, aborting\n", err); + goto err_out_teardown; + } + + /* Configure the ports */ + err = ionic_port_identify(ionic); + if (err) { + dev_err(dev, "Cannot identify port: %d, aborting\n", err); + goto err_out_reset; + } + + err = ionic_port_init(ionic); + if (err) { + dev_err(dev, "Cannot init port: %d, aborting\n", err); + goto err_out_reset; + } + + /* Allocate and init the LIF */ + err = ionic_lif_size(ionic); + if (err) { + dev_err(dev, "Cannot size LIF: %d, aborting\n", err); + goto err_out_port_reset; + } + + err = ionic_lif_alloc(ionic); + if (err) { + dev_err(dev, "Cannot allocate LIF: %d, aborting\n", err); + goto err_out_free_irqs; + } + + err = ionic_lif_init(ionic->lif); + if (err) { + dev_err(dev, "Cannot init LIF: %d, aborting\n", err); + goto err_out_free_lifs; + } + + init_rwsem(&ionic->vf_op_lock); + num_vfs = pci_num_vf(pdev); + if (num_vfs) { + dev_info(dev, "%d VFs found already enabled\n", num_vfs); + err = ionic_vf_alloc(ionic, num_vfs); + if (err) + dev_err(dev, "Cannot enable existing VFs: %d\n", err); + } + + err = ionic_devlink_register(ionic); + if (err) { + dev_err(dev, "Cannot register devlink: %d\n", err); + goto err_out_deinit_lifs; + } + + err = ionic_lif_register(ionic->lif); + if (err) { + dev_err(dev, "Cannot register LIF: %d, aborting\n", err); + goto err_out_deregister_devlink; + } + + mod_timer(&ionic->watchdog_timer, + round_jiffies(jiffies + ionic->watchdog_period)); + + return 0; + +err_out_deregister_devlink: + ionic_devlink_unregister(ionic); +err_out_deinit_lifs: + ionic_vf_dealloc(ionic); + ionic_lif_deinit(ionic->lif); +err_out_free_lifs: + ionic_lif_free(ionic->lif); + ionic->lif = NULL; +err_out_free_irqs: + ionic_bus_free_irq_vectors(ionic); +err_out_port_reset: + ionic_port_reset(ionic); +err_out_reset: + ionic_reset(ionic); +err_out_teardown: + ionic_dev_teardown(ionic); + pci_clear_master(pdev); + /* Don't fail the probe for these errors, keep + * the hw interface around for inspection + */ + return 0; + +err_out_unmap_bars: + ionic_unmap_bars(ionic); +err_out_pci_release_regions: + pci_release_regions(pdev); +err_out_pci_disable_device: + pci_disable_device(pdev); +err_out_debugfs_del_dev: + ionic_debugfs_del_dev(ionic); +err_out_clear_drvdata: + mutex_destroy(&ionic->dev_cmd_lock); + ionic_devlink_free(ionic); + pci_set_drvdata(pdev, NULL); + + return err; +} + +static void ionic_remove(struct pci_dev *pdev) +{ + struct ionic *ionic = pci_get_drvdata(pdev); + + if (!ionic) + return; + + del_timer_sync(&ionic->watchdog_timer); + + if (ionic->lif) { + ionic_lif_unregister(ionic->lif); + ionic_devlink_unregister(ionic); + ionic_lif_deinit(ionic->lif); + ionic_lif_free(ionic->lif); + ionic->lif = NULL; + ionic_bus_free_irq_vectors(ionic); + } + + ionic_port_reset(ionic); + ionic_reset(ionic); + ionic_dev_teardown(ionic); + pci_clear_master(pdev); + ionic_unmap_bars(ionic); + pci_release_regions(pdev); + pci_disable_device(pdev); + ionic_debugfs_del_dev(ionic); + mutex_destroy(&ionic->dev_cmd_lock); + ionic_devlink_free(ionic); +} + +static struct pci_driver ionic_driver = { + .name = IONIC_DRV_NAME, + .id_table = ionic_id_table, + .probe = ionic_probe, + .remove = ionic_remove, + .sriov_configure = ionic_sriov_configure, +}; + +int ionic_bus_register_driver(void) +{ + return pci_register_driver(&ionic_driver); +} + +void ionic_bus_unregister_driver(void) +{ + pci_unregister_driver(&ionic_driver); +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_bus_platform.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_bus_platform.c new file mode 100644 index 0000000000..dae106b986 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_bus_platform.c @@ -0,0 +1,472 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_debugfs.h" + +#define IONIC_DEV_BAR 0 +#define IONIC_INTR_CTRL_BAR 1 +#define IONIC_MSIX_CFG_BAR 2 +#define IONIC_DOORBELL_BAR 3 +#define IONIC_TSTAMP_BAR 4 + +#define IONIC_REQUIRED_BARS 4 +#define IONIC_NUM_OF_BAR 5 + +#define IONIC_INTR_MSIXCFG_STRIDE 0x10 + +struct ionic_intr_msixcfg { + __le64 msgaddr; + __le32 msgdata; + __le32 vector_ctrl; +}; + +static void *ionic_intr_msixcfg_addr(struct device *mnic_dev, const int intr) +{ + struct ionic_dev *idev = (struct ionic_dev *) mnic_dev->platform_data; + + dev_info(mnic_dev, "msix_cfg_base: %p\n", idev->msix_cfg_base); + return (idev->msix_cfg_base + (intr * IONIC_INTR_MSIXCFG_STRIDE)); +} + +static void ionic_intr_msixcfg(struct device *mnic_dev, + const int intr, const u64 msgaddr, + const u32 msgdata, const int vctrl) +{ + void *pa = ionic_intr_msixcfg_addr(mnic_dev, intr); + + writeq(msgaddr, (pa + offsetof(struct ionic_intr_msixcfg, msgaddr))); + writel(msgdata, (pa + offsetof(struct ionic_intr_msixcfg, msgdata))); + writel(vctrl, (pa + offsetof(struct ionic_intr_msixcfg, vector_ctrl))); +} + +/* Resources can only be mapped once at a time. A second mapping will fail. + * For resources that are shared by multiple devices, we avoid using devm, + * because the mapping will not be used exclusively by one device, and if + * devices are unregistered in any order, the mapping must not be destroyed + * when the first device is unregistered, when other devices may still be using + * it. ionic_shared_resource just maintains a refcount for mapping a shared + * resource for use by multiple ionic devices. + */ +struct ionic_shared_resource { + struct mutex lock; + void __iomem *base; + int refs; +}; + +#define IONIC_SHARED_RESOURCE_INITIALIZER(shres) { .lock = __MUTEX_INITIALIZER(shres.lock) } + +static void __iomem *ionic_ioremap_shared_resource(struct ionic_shared_resource *shres, + struct resource *res) +{ + void __iomem *base; + + mutex_lock(&shres->lock); + + if (shres->refs) { + base = shres->base; + ++shres->refs; + } else { + if (!request_mem_region(res->start, resource_size(res), res->name ?: KBUILD_MODNAME)) { + base = IOMEM_ERR_PTR(-EBUSY); + } else { + base = ioremap(res->start, resource_size(res)); + if (!IS_ERR_OR_NULL(base)) { + shres->base = base; + ++shres->refs; + } + } + } + + mutex_unlock(&shres->lock); + + return base; +} + +static void ionic_iounmap_shared_resource(struct ionic_shared_resource *shres, + void __iomem *vaddr, + resource_size_t start, + resource_size_t n) +{ + mutex_lock(&shres->lock); + + if (WARN_ON(!shres->refs)) { + mutex_unlock(&shres->lock); + return; + } + + --shres->refs; + + if (!shres->refs) { + iounmap(vaddr); + release_mem_region(start, n); + } + + mutex_unlock(&shres->lock); +} + +static struct ionic_shared_resource tstamp_res = + IONIC_SHARED_RESOURCE_INITIALIZER(tstamp_res); + +int ionic_bus_get_irq(struct ionic *ionic, unsigned int num) +{ + struct msi_desc *desc; + int i = 0; + + msi_for_each_desc(desc, ionic->dev, MSI_DESC_ALL) { + if (i == num) { + pr_info("[i = %d] msi_entry: %d.%d\n", + i, desc->msi_index, + desc->irq); + + return desc->irq; + } + i++; + } + + return -1; //return error if user is asking more irqs than allocated +} + +const char *ionic_bus_info(struct ionic *ionic) +{ + return ionic->pfdev->name; +} + +static void ionic_mnic_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg) +{ + dev_dbg(desc->dev, "msi_index: [%d] (msi_addr hi_lo): %x_%x msi_data: %x\n", + desc->msi_index, msg->address_hi, + msg->address_lo, msg->data); + + ionic_intr_msixcfg(desc->dev, desc->msi_index, + (((u64)msg->address_hi << 32) | msg->address_lo), + msg->data, 0/*vctrl*/); +} + +int ionic_bus_alloc_irq_vectors(struct ionic *ionic, unsigned int nintrs) +{ + int err = 0; + + err = platform_msi_domain_alloc_irqs(ionic->dev, nintrs, + ionic_mnic_set_msi_msg); + if (err) + return err; + + return nintrs; +} + +void ionic_bus_free_irq_vectors(struct ionic *ionic) +{ + platform_msi_domain_free_irqs(ionic->dev); +} + +struct net_device *ionic_alloc_netdev(struct ionic *ionic) +{ + struct net_device *netdev = NULL; + struct ionic_lif *lif; + + netdev = alloc_netdev_mqs(sizeof(struct ionic_lif), ionic->pfdev->name, + NET_NAME_USER, ether_setup, + ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); + if (!netdev) + return netdev; + + lif = netdev_priv(netdev); + + /* lif name is used for naming the interrupt handler so better + * to name them differently for mnic + */ + snprintf(lif->name, sizeof(lif->name), "%s-", ionic->pfdev->name); + + return netdev; +} + +static int ionic_mnic_dev_setup(struct ionic *ionic) +{ + unsigned int num_bars = ionic->num_bars; + struct ionic_dev *idev = &ionic->idev; + u32 sig; + + if (num_bars < IONIC_REQUIRED_BARS) + return -EFAULT; + + idev->dev_info_regs = ionic->bars[IONIC_DEV_BAR].vaddr; + idev->dev_cmd_regs = ionic->bars[IONIC_DEV_BAR].vaddr + + offsetof(union ionic_dev_regs, devcmd); + idev->intr_ctrl = ionic->bars[IONIC_INTR_CTRL_BAR].vaddr; + idev->msix_cfg_base = ionic->bars[IONIC_MSIX_CFG_BAR].vaddr; + if (num_bars > IONIC_TSTAMP_BAR) + idev->hwstamp_regs = ionic->bars[IONIC_TSTAMP_BAR].vaddr; + else + idev->hwstamp_regs = NULL; + + /* save the idev into dev->platform_data so we can use it later */ + ionic->dev->platform_data = idev; + + sig = ioread32(&idev->dev_info_regs->signature); + if (sig != IONIC_DEV_INFO_SIGNATURE) + return -EFAULT; + + ionic_init_devinfo(ionic); + ionic_watchdog_init(ionic); + + idev->db_pages = ionic->bars[IONIC_DOORBELL_BAR].vaddr; + idev->phy_db_pages = ionic->bars[IONIC_DOORBELL_BAR].bus_addr; + + ionic_debugfs_add_dev_cmd(ionic); + + return 0; +} + +static int ionic_map_bars(struct ionic *ionic) +{ + struct platform_device *pfdev = ionic->pfdev; + struct ionic_dev_bar *bars = ionic->bars; + struct device *dev = ionic->dev; + struct resource *res; + unsigned int i, j; + void *base; + + ionic->num_bars = 0; + for (i = 0, j = 0; i < IONIC_BARS_MAX; i++) { + res = platform_get_resource(pfdev, IORESOURCE_MEM, i); + if (!res) + continue; + if (i == IONIC_TSTAMP_BAR) + base = ionic_ioremap_shared_resource(&tstamp_res, res); + else + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) { + dev_err(dev, "Cannot memory-map BAR %d, aborting\n", j); + return -ENODEV; + } + bars[j].len = res->end - res->start + 1; + bars[j].vaddr = base; + bars[j].bus_addr = res->start; + ionic->num_bars++; + j++; + } + + ionic_debugfs_add_bars(ionic); + + return 0; +} + +static void ionic_unmap_bars(struct ionic *ionic) +{ + struct ionic_dev_bar *bars = ionic->bars; + struct device *dev = ionic->dev; + unsigned int i; + + for (i = 0; i < IONIC_BARS_MAX; i++) + if (bars[i].vaddr) { + dev_info(dev, "Unmapping BAR %d @%p, bus_addr: %llx\n", + i, bars[i].vaddr, bars[i].bus_addr); + if (i == IONIC_TSTAMP_BAR) { + ionic_iounmap_shared_resource(&tstamp_res, bars[i].vaddr, bars[i].bus_addr, bars[i].len); + } else { + devm_iounmap(dev, bars[i].vaddr); + devm_release_mem_region(dev, bars[i].bus_addr, bars[i].len); + } + } +} + +void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num) +{ + return ionic->idev.db_pages; +} + +void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page) +{ +} + +phys_addr_t ionic_bus_phys_dbpage(struct ionic *ionic, int page_num) +{ + return ionic->idev.phy_db_pages; +} + +int ionic_probe(struct platform_device *pfdev) +{ + struct device *dev = &pfdev->dev; + struct device_node *np; + struct ionic *ionic; + int err; + + ionic = devm_kzalloc(dev, sizeof(*ionic), GFP_KERNEL); + if (!ionic) + return -ENOMEM; + + ionic->pfdev = pfdev; + platform_set_drvdata(pfdev, ionic); + ionic->dev = dev; + mutex_init(&ionic->dev_cmd_lock); + + np = dev->of_node; + if (!np) { + dev_err(dev, "No device tree node\n"); + return -EINVAL; + } + + err = of_reserved_mem_device_init_by_idx(dev, np, 0); + if (err != 0 && err != -ENODEV) { + dev_err(dev, "Failed to init reserved memory region\n"); + return err; + } + + err = ionic_set_dma_mask(ionic); + if (err) { + dev_err(dev, "Cannot set DMA mask, aborting\n"); + return err; + } + + ionic_debugfs_add_dev(ionic); + + /* Setup platform device */ + err = ionic_map_bars(ionic); + if (err) + goto err_out_unmap_bars; + + /* Discover ionic dev resources */ + err = ionic_mnic_dev_setup(ionic); + if (err) { + dev_err(dev, "Cannot setup device, aborting\n"); + goto err_out_unmap_bars; + } + + err = ionic_identify(ionic); + if (err) { + dev_err(dev, "Cannot identify device, aborting\n"); + goto err_out_unmap_bars; + } + ionic_debugfs_add_ident(ionic); + + err = ionic_init(ionic); + if (err) { + dev_err(dev, "Cannot init device, aborting\n"); + goto err_out_unmap_bars; + } + + /* Configure the ports */ + err = ionic_port_identify(ionic); + if (err) { + dev_err(dev, "Cannot identify port: %d, aborting\n", err); + goto err_out_unmap_bars; + } + + if (ionic->ident.port.type == IONIC_ETH_HOST_MGMT || + ionic->ident.port.type == IONIC_ETH_MNIC_INTERNAL_MGMT) + ionic->is_mgmt_nic = true; + + err = ionic_port_init(ionic); + if (err) { + dev_err(dev, "Cannot init port: %d, aborting\n", err); + goto err_out_unmap_bars; + } + + /* Allocate and init the LIF */ + err = ionic_lif_size(ionic); + if (err) { + dev_err(dev, "Cannot size LIF: %d, aborting\n", err); + goto err_out_unmap_bars; + } + + err = ionic_lif_alloc(ionic); + if (err) { + dev_err(dev, "Cannot allocate LIF: %d, aborting\n", err); + goto err_out_free_irqs; + } + + err = ionic_lif_init(ionic->lif); + if (err) { + dev_err(dev, "Cannot init LIF: %d, aborting\n", err); + goto err_out_free_lifs; + } + + err = ionic_lif_register(ionic->lif); + if (err) { + dev_err(dev, "Cannot register LIF: %d, aborting\n", err); + goto err_out_deinit_lifs; + } + + mod_timer(&ionic->watchdog_timer, + round_jiffies(jiffies + ionic->watchdog_period)); + + return 0; + +err_out_deinit_lifs: + ionic_lif_deinit(ionic->lif); +err_out_free_lifs: + ionic_lif_free(ionic->lif); + ionic->lif = NULL; +err_out_free_irqs: + ionic_bus_free_irq_vectors(ionic); +err_out_unmap_bars: + ionic_unmap_bars(ionic); + ionic_debugfs_del_dev(ionic); + mutex_destroy(&ionic->dev_cmd_lock); + platform_set_drvdata(pfdev, NULL); + + return err; +} +EXPORT_SYMBOL_GPL(ionic_probe); + +int ionic_remove(struct platform_device *pfdev) +{ + struct ionic *ionic = platform_get_drvdata(pfdev); + + if (ionic) { + del_timer_sync(&ionic->watchdog_timer); + ionic_lif_unregister(ionic->lif); + ionic_lif_deinit(ionic->lif); + ionic_lif_free(ionic->lif); + ionic->lif = NULL; + ionic_port_reset(ionic); + ionic_reset(ionic); + ionic_bus_free_irq_vectors(ionic); + ionic_unmap_bars(ionic); + ionic_debugfs_del_dev(ionic); + + mutex_destroy(&ionic->dev_cmd_lock); + + dev_info(ionic->dev, "removed\n"); + } + + return 0; +} +EXPORT_SYMBOL_GPL(ionic_remove); + +static const struct of_device_id mnic_of_match[] = { + {.compatible = "pensando,ionic-mnic"}, + {/* end of table */} +}; + +static struct platform_driver ionic_driver = { + .probe = ionic_probe, + .remove = ionic_remove, + .driver = { + .name = "ionic-mnic", + .owner = THIS_MODULE, + .of_match_table = mnic_of_match, + }, +}; + +int ionic_bus_register_driver(void) +{ + return platform_driver_register(&ionic_driver); +} + +void ionic_bus_unregister_driver(void) +{ + platform_driver_unregister(&ionic_driver); +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_debugfs.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_debugfs.c new file mode 100644 index 0000000000..9ddb8f2fb6 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_debugfs.c @@ -0,0 +1,584 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#include + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_ethtool.h" +#include "ionic_debugfs.h" +#include "kcompat.h" + +#ifdef CONFIG_DEBUG_FS + +static struct dentry *ionic_dir; + +void ionic_debugfs_create(void) +{ + ionic_dir = debugfs_create_dir(IONIC_DRV_NAME, NULL); +} + +void ionic_debugfs_destroy(void) +{ + debugfs_remove_recursive(ionic_dir); +} + +void ionic_debugfs_add_dev(struct ionic *ionic) +{ + ionic->dentry = debugfs_create_dir(ionic_bus_info(ionic), ionic_dir); +} + +void ionic_debugfs_del_dev(struct ionic *ionic) +{ + debugfs_remove_recursive(ionic->dentry); + ionic->dentry = NULL; +} + +static int bars_show(struct seq_file *seq, void *v) +{ + struct ionic *ionic = seq->private; + struct ionic_dev_bar *bars = ionic->bars; + unsigned int i; + + for (i = 0; i < IONIC_BARS_MAX; i++) + if (bars[i].len) + seq_printf(seq, "BAR%d: res %d len 0x%08lx vaddr %pK bus_addr 0x%016llx\n", + i, bars[i].res_index, bars[i].len, + bars[i].vaddr, bars[i].bus_addr); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(bars); + +void ionic_debugfs_add_bars(struct ionic *ionic) +{ + debugfs_create_file("bars", 0400, ionic->dentry, ionic, &bars_fops); +} + +static const struct debugfs_reg32 dev_cmd_regs[] = { + { .name = "db", .offset = 0, }, + { .name = "done", .offset = 4, }, + { .name = "cmd.word[0]", .offset = 8, }, + { .name = "cmd.word[1]", .offset = 12, }, + { .name = "cmd.word[2]", .offset = 16, }, + { .name = "cmd.word[3]", .offset = 20, }, + { .name = "cmd.word[4]", .offset = 24, }, + { .name = "cmd.word[5]", .offset = 28, }, + { .name = "cmd.word[6]", .offset = 32, }, + { .name = "cmd.word[7]", .offset = 36, }, + { .name = "cmd.word[8]", .offset = 40, }, + { .name = "cmd.word[9]", .offset = 44, }, + { .name = "cmd.word[10]", .offset = 48, }, + { .name = "cmd.word[11]", .offset = 52, }, + { .name = "cmd.word[12]", .offset = 56, }, + { .name = "cmd.word[13]", .offset = 60, }, + { .name = "cmd.word[14]", .offset = 64, }, + { .name = "cmd.word[15]", .offset = 68, }, + { .name = "comp.word[0]", .offset = 72, }, + { .name = "comp.word[1]", .offset = 76, }, + { .name = "comp.word[2]", .offset = 80, }, + { .name = "comp.word[3]", .offset = 84, }, +}; + +void ionic_debugfs_add_dev_cmd(struct ionic *ionic) +{ + struct debugfs_regset32 *dev_cmd_regset; + struct device *dev = ionic->dev; + + dev_cmd_regset = devm_kzalloc(dev, sizeof(*dev_cmd_regset), GFP_KERNEL); + if (!dev_cmd_regset) + return; + dev_cmd_regset->regs = dev_cmd_regs; + dev_cmd_regset->nregs = ARRAY_SIZE(dev_cmd_regs); + dev_cmd_regset->base = ionic->idev.dev_cmd_regs; + + debugfs_create_regset32("dev_cmd", 0400, ionic->dentry, dev_cmd_regset); +} + +static void identity_show_qtype(struct seq_file *seq, const char *name, + struct ionic_lif_logical_qtype *qtype) +{ + seq_printf(seq, "%s_qtype:\t%d\n", name, qtype->qtype); + seq_printf(seq, "%s_count:\t%d\n", name, qtype->qid_count); + seq_printf(seq, "%s_base:\t%d\n", name, qtype->qid_base); +} + +static int identity_show(struct seq_file *seq, void *v) +{ + struct ionic *ionic = seq->private; + struct ionic_identity *ident; + struct ionic_dev *idev; + + ident = &ionic->ident; + idev = &ionic->idev; + + seq_printf(seq, "asic_type: 0x%x\n", idev->dev_info.asic_type); + seq_printf(seq, "asic_rev: 0x%x\n", idev->dev_info.asic_rev); + seq_printf(seq, "serial_num: %s\n", idev->dev_info.serial_num); + seq_printf(seq, "fw_version: %s\n", idev->dev_info.fw_version); + seq_printf(seq, "fw_status: 0x%x\n", + ioread8(&idev->dev_info_regs->fw_status)); + seq_printf(seq, "fw_heartbeat: 0x%x\n", + ioread32(&idev->dev_info_regs->fw_heartbeat)); + seq_printf(seq, "cmb_pages: 0x%x\n", ionic_cmb_pages_in_use(ionic->lif)); + + seq_printf(seq, "nlifs: %d\n", ident->dev.nlifs); + seq_printf(seq, "nintrs: %d\n", ident->dev.nintrs); + seq_printf(seq, "eth_eq_count: %d\n", ident->dev.eq_count); + seq_printf(seq, "ndbpgs_per_lif: %d\n", ident->dev.ndbpgs_per_lif); + seq_printf(seq, "intr_coal_mult: %d\n", ident->dev.intr_coal_mult); + seq_printf(seq, "intr_coal_div: %d\n", ident->dev.intr_coal_div); + + seq_printf(seq, "max_ucast_filters: %d\n", ident->lif.eth.max_ucast_filters); + seq_printf(seq, "max_mcast_filters: %d\n", ident->lif.eth.max_mcast_filters); + + seq_printf(seq, "rdma_qp_opcodes: %d\n", ident->lif.rdma.qp_opcodes); + seq_printf(seq, "rdma_admin_opcodes: %d\n", ident->lif.rdma.admin_opcodes); + seq_printf(seq, "rdma_max_stride: %d\n", ident->lif.rdma.max_stride); + seq_printf(seq, "rdma_cl_stride: %d\n", ident->lif.rdma.cl_stride); + seq_printf(seq, "rdma_pte_stride: %d\n", ident->lif.rdma.pte_stride); + seq_printf(seq, "rdma_rrq_stride: %d\n", ident->lif.rdma.rrq_stride); + seq_printf(seq, "rdma_rsq_stride: %d\n", ident->lif.rdma.rsq_stride); + + identity_show_qtype(seq, "rdma_aq", &ident->lif.rdma.aq_qtype); + identity_show_qtype(seq, "rdma_sq", &ident->lif.rdma.sq_qtype); + identity_show_qtype(seq, "rdma_rq", &ident->lif.rdma.rq_qtype); + identity_show_qtype(seq, "rdma_cq", &ident->lif.rdma.cq_qtype); + identity_show_qtype(seq, "rdma_eq", &ident->lif.rdma.eq_qtype); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(identity); + +void ionic_debugfs_add_ident(struct ionic *ionic) +{ + debugfs_create_file("identity", 0400, ionic->dentry, + ionic, &identity_fops); +} + +void ionic_debugfs_add_sizes(struct ionic *ionic) +{ + debugfs_create_u32("nlifs", 0400, ionic->dentry, + (u32 *)&ionic->ident.dev.nlifs); + debugfs_create_u32("nintrs", 0400, ionic->dentry, &ionic->nintrs); + + debugfs_create_u32("ntxqs_per_lif", 0400, ionic->dentry, + (u32 *)&ionic->ident.lif.eth.config.queue_count[IONIC_QTYPE_TXQ]); + debugfs_create_u32("nrxqs_per_lif", 0400, ionic->dentry, + (u32 *)&ionic->ident.lif.eth.config.queue_count[IONIC_QTYPE_RXQ]); +} + +static int q_tail_show(struct seq_file *seq, void *v) +{ + struct ionic_queue *q = seq->private; + + seq_printf(seq, "%d\n", q->tail_idx); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(q_tail); + +static int q_head_show(struct seq_file *seq, void *v) +{ + struct ionic_queue *q = seq->private; + + seq_printf(seq, "%d\n", q->head_idx); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(q_head); + +static int cq_tail_show(struct seq_file *seq, void *v) +{ + struct ionic_cq *cq = seq->private; + + seq_printf(seq, "%d\n", cq->tail_idx); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(cq_tail); + +static const struct debugfs_reg32 intr_ctrl_regs[] = { + { .name = "coal_init", .offset = 0, }, + { .name = "mask", .offset = 4, }, + { .name = "credits", .offset = 8, }, + { .name = "mask_on_assert", .offset = 12, }, + { .name = "coal_timer", .offset = 16, }, +}; + +void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct dentry *qcq_dentry, *q_dentry, *cq_dentry; + struct dentry *intr_dentry, *stats_dentry; + struct ionic_dev *idev = &lif->ionic->idev; + struct debugfs_regset32 *intr_ctrl_regset; + struct ionic_intr_info *intr = &qcq->intr; + struct debugfs_blob_wrapper *desc_blob; + struct device *dev = lif->ionic->dev; + struct ionic_tx_stats *txqstats; + struct ionic_rx_stats *rxqstats; + struct ionic_queue *q = &qcq->q; + struct ionic_cq *cq = &qcq->cq; + + qcq_dentry = debugfs_create_dir(q->name, lif->dentry); + if (IS_ERR_OR_NULL(qcq_dentry)) + return; + qcq->dentry = qcq_dentry; + + debugfs_create_x64("q_base_pa", 0400, qcq_dentry, &qcq->q_base_pa); + debugfs_create_x32("q_size", 0400, qcq_dentry, &qcq->q_size); + debugfs_create_x64("cq_base_pa", 0400, qcq_dentry, &qcq->cq_base_pa); + debugfs_create_x32("cq_size", 0400, qcq_dentry, &qcq->cq_size); + debugfs_create_x64("sg_base_pa", 0400, qcq_dentry, &qcq->sg_base_pa); + debugfs_create_x32("sg_size", 0400, qcq_dentry, &qcq->sg_size); + debugfs_create_x32("cmb_order", 0400, qcq_dentry, &qcq->cmb_order); + debugfs_create_x32("cmb_pgid", 0400, qcq_dentry, &qcq->cmb_pgid); + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_VERSION(7, 0) < RHEL_RELEASE_CODE)) + debugfs_create_u8("armed", 0400, qcq_dentry, (u8 *)&qcq->armed); +#else + debugfs_create_bool("armed", 0400, qcq_dentry, &qcq->armed); +#endif + + q_dentry = debugfs_create_dir("q", qcq->dentry); + if (IS_ERR_OR_NULL(q_dentry)) + return; + + debugfs_create_u32("index", 0400, q_dentry, &q->index); + debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs); + debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size); + debugfs_create_u32("pid", 0400, q_dentry, &q->pid); + debugfs_create_u32("qid", 0400, q_dentry, &q->hw_index); + debugfs_create_u32("qtype", 0400, q_dentry, &q->hw_type); + debugfs_create_u64("drop", 0400, q_dentry, &q->drop); + debugfs_create_u64("stop", 0400, q_dentry, &q->stop); + debugfs_create_u64("wake", 0400, q_dentry, &q->wake); + + debugfs_create_file("tail", 0400, q_dentry, q, &q_tail_fops); + debugfs_create_file("head", 0400, q_dentry, q, &q_head_fops); + + desc_blob = devm_kzalloc(dev, sizeof(*desc_blob), GFP_KERNEL); + if (!desc_blob) + return; + desc_blob->data = q->base; + desc_blob->size = (unsigned long)q->num_descs * q->desc_size; + debugfs_create_blob("desc_blob", 0400, q_dentry, desc_blob); + + if (qcq->flags & IONIC_QCQ_F_SG) { + desc_blob = devm_kzalloc(dev, sizeof(*desc_blob), GFP_KERNEL); + if (!desc_blob) + return; + desc_blob->data = q->sg_base; + desc_blob->size = (unsigned long)q->num_descs * q->sg_desc_size; + debugfs_create_blob("sg_desc_blob", 0400, q_dentry, + desc_blob); + } + + if (qcq->flags & IONIC_QCQ_F_TX_STATS) { + stats_dentry = debugfs_create_dir("tx_stats", q_dentry); + if (IS_ERR_OR_NULL(stats_dentry)) + return; + txqstats = &lif->txqstats[q->index]; + + debugfs_create_u64("dma_map_err", 0400, stats_dentry, + &txqstats[q->index].dma_map_err); + debugfs_create_u64("pkts", 0400, stats_dentry, + &txqstats[q->index].pkts); + debugfs_create_u64("bytes", 0400, stats_dentry, + &txqstats[q->index].bytes); + debugfs_create_u64("clean", 0400, stats_dentry, + &txqstats[q->index].clean); + debugfs_create_u64("linearize", 0400, stats_dentry, + &txqstats[q->index].linearize); + debugfs_create_u64("csum_none", 0400, stats_dentry, + &txqstats[q->index].csum_none); + debugfs_create_u64("csum", 0400, stats_dentry, + &txqstats[q->index].csum); + debugfs_create_u64("crc32_csum", 0400, stats_dentry, + &txqstats[q->index].crc32_csum); + debugfs_create_u64("tso", 0400, stats_dentry, + &txqstats[q->index].tso); + debugfs_create_u64("frags", 0400, stats_dentry, + &txqstats[q->index].frags); + } + + if (qcq->flags & IONIC_QCQ_F_RX_STATS) { + stats_dentry = debugfs_create_dir("rx_stats", q_dentry); + if (IS_ERR_OR_NULL(stats_dentry)) + return; + rxqstats = &lif->rxqstats[q->index]; + + debugfs_create_u64("dma_map_err", 0400, stats_dentry, + &rxqstats[q->index].dma_map_err); + debugfs_create_u64("alloc_err", 0400, stats_dentry, + &rxqstats[q->index].alloc_err); + debugfs_create_u64("pkts", 0400, stats_dentry, + &rxqstats[q->index].pkts); + debugfs_create_u64("bytes", 0400, stats_dentry, + &rxqstats[q->index].bytes); + debugfs_create_u64("csum_none", 0400, stats_dentry, + &rxqstats[q->index].csum_none); + debugfs_create_u64("csum_complete", 0400, stats_dentry, + &rxqstats[q->index].csum_complete); + debugfs_create_u64("csum_error", 0400, stats_dentry, + &rxqstats[q->index].csum_error); + } + + cq_dentry = debugfs_create_dir("cq", qcq->dentry); + if (IS_ERR_OR_NULL(cq_dentry)) + return; + + debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa); + debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs); + debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size); + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_VERSION(7, 0) < RHEL_RELEASE_CODE)) + debugfs_create_u8("done_color", 0400, cq_dentry, (u8 *)&cq->done_color); +#else + debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color); +#endif + + debugfs_create_file("tail", 0400, cq_dentry, cq, &cq_tail_fops); + + desc_blob = devm_kzalloc(dev, sizeof(*desc_blob), GFP_KERNEL); + if (!desc_blob) + return; + desc_blob->data = cq->base; + desc_blob->size = (unsigned long)cq->num_descs * cq->desc_size; + debugfs_create_blob("desc_blob", 0400, cq_dentry, desc_blob); + + if (qcq->flags & IONIC_QCQ_F_INTR) { + intr_dentry = debugfs_create_dir("intr", qcq->dentry); + if (IS_ERR_OR_NULL(intr_dentry)) + return; + + debugfs_create_u32("index", 0400, intr_dentry, + &intr->index); + debugfs_create_u32("vector", 0400, intr_dentry, + &intr->vector); + debugfs_create_u32("dim_coal_hw", 0400, intr_dentry, + &intr->dim_coal_hw); + + intr_ctrl_regset = devm_kzalloc(dev, sizeof(*intr_ctrl_regset), + GFP_KERNEL); + if (!intr_ctrl_regset) + return; + intr_ctrl_regset->regs = intr_ctrl_regs; + intr_ctrl_regset->nregs = ARRAY_SIZE(intr_ctrl_regs); + intr_ctrl_regset->base = &idev->intr_ctrl[intr->index]; + + debugfs_create_regset32("intr_ctrl", 0400, intr_dentry, + intr_ctrl_regset); + } + + if (qcq->flags & IONIC_QCQ_F_NOTIFYQ) { + stats_dentry = debugfs_create_dir("notifyblock", qcq->dentry); + if (IS_ERR_OR_NULL(stats_dentry)) + return; + + debugfs_create_u64("eid", 0400, stats_dentry, + (u64 *)&lif->info->status.eid); + debugfs_create_u16("link_status", 0400, stats_dentry, + (u16 *)&lif->info->status.link_status); + debugfs_create_u32("link_speed", 0400, stats_dentry, + (u32 *)&lif->info->status.link_speed); + debugfs_create_u16("link_down_count", 0400, stats_dentry, + (u16 *)&lif->info->status.link_down_count); + } +} + +static int netdev_show(struct seq_file *seq, void *v) +{ + struct net_device *netdev = seq->private; + + seq_printf(seq, "%s\n", netdev->name); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(netdev); + +static int lif_identity_show(struct seq_file *seq, void *v) +{ + union ionic_lif_identity *lid = seq->private; + + seq_printf(seq, "capabilities: 0x%llx\n", lid->capabilities); + seq_printf(seq, "eth-version: 0x%x\n", lid->eth.version); + seq_printf(seq, "max_ucast_filters: %d\n", lid->eth.max_ucast_filters); + seq_printf(seq, "max_mcast_filters: %d\n", lid->eth.max_mcast_filters); + seq_printf(seq, "rss_ind_tbl_sz: %d\n", lid->eth.rss_ind_tbl_sz); + seq_printf(seq, "min_frame_size: %d\n", lid->eth.min_frame_size); + seq_printf(seq, "max_frame_size: %d\n", lid->eth.max_frame_size); + + seq_printf(seq, "state: %d\n", lid->eth.config.state); + seq_printf(seq, "name: \"%s\"\n", lid->eth.config.name); + seq_printf(seq, "mtu: %d\n", lid->eth.config.mtu); + seq_printf(seq, "mac: %pM\n", lid->eth.config.mac); + seq_printf(seq, "features: 0x%08llx\n", + lid->eth.config.features); + seq_printf(seq, "adminq-count: %d\n", + lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]); + seq_printf(seq, "notifyq-count: %d\n", + lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]); + seq_printf(seq, "rxq-count: %d\n", + lid->eth.config.queue_count[IONIC_QTYPE_RXQ]); + seq_printf(seq, "txq-count: %d\n", + lid->eth.config.queue_count[IONIC_QTYPE_TXQ]); + seq_printf(seq, "eq-count: %d\n", + lid->eth.config.queue_count[IONIC_QTYPE_EQ]); + + seq_puts(seq, "\n"); + + seq_printf(seq, "rdma_version: 0x%x\n", lid->rdma.version); + seq_printf(seq, "rdma_qp_opcodes: %d\n", lid->rdma.qp_opcodes); + seq_printf(seq, "rdma_admin_opcodes: %d\n", lid->rdma.admin_opcodes); + seq_printf(seq, "rdma_npts_per_lif: %d\n", lid->rdma.npts_per_lif); + seq_printf(seq, "rdma_nmrs_per_lif: %d\n", lid->rdma.nmrs_per_lif); + seq_printf(seq, "rdma_nahs_per_lif: %d\n", lid->rdma.nahs_per_lif); + seq_printf(seq, "rdma_max_stride: %d\n", lid->rdma.max_stride); + seq_printf(seq, "rdma_cl_stride: %d\n", lid->rdma.cl_stride); + seq_printf(seq, "rdma_pte_stride: %d\n", lid->rdma.pte_stride); + seq_printf(seq, "rdma_rrq_stride: %d\n", lid->rdma.rrq_stride); + seq_printf(seq, "rdma_rsq_stride: %d\n", lid->rdma.rsq_stride); + seq_printf(seq, "rdma_dcqcn_profiles: %d\n", lid->rdma.dcqcn_profiles); + + identity_show_qtype(seq, "rdma_aq", &lid->rdma.aq_qtype); + identity_show_qtype(seq, "rdma_sq", &lid->rdma.sq_qtype); + identity_show_qtype(seq, "rdma_rq", &lid->rdma.rq_qtype); + identity_show_qtype(seq, "rdma_cq", &lid->rdma.cq_qtype); + identity_show_qtype(seq, "rdma_eq", &lid->rdma.eq_qtype); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(lif_identity); + +static int lif_state_show(struct seq_file *seq, void *v) +{ + struct ionic_lif *lif = seq->private; + + seq_printf(seq, "0x%08lx\n", lif->state[0]); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(lif_state); + +static int lif_filters_show(struct seq_file *seq, void *v) +{ + struct ionic_lif *lif = seq->private; + struct ionic_rx_filter *f; + struct hlist_head *head; + struct hlist_node *tmp; + unsigned int i; + + seq_puts(seq, "id flow state type filter\n"); + spin_lock_bh(&lif->rx_filters.lock); + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + head = &lif->rx_filters.by_id[i]; + hlist_for_each_entry_safe(f, tmp, head, by_id) { + switch (le16_to_cpu(f->cmd.match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + seq_printf(seq, "0x%04x 0x%08x 0x%02x vlan 0x%04x\n", + f->filter_id, f->flow_id, f->state, + le16_to_cpu(f->cmd.vlan.vlan)); + break; + case IONIC_RX_FILTER_MATCH_MAC: + seq_printf(seq, "0x%04x 0x%08x 0x%02x mac %pM\n", + f->filter_id, f->flow_id, f->state, + f->cmd.mac.addr); + break; + case IONIC_RX_FILTER_MATCH_MAC_VLAN: + seq_printf(seq, "0x%04x 0x%08x 0x%02x macvl 0x%04x %pM\n", + f->filter_id, f->flow_id, f->state, + le16_to_cpu(f->cmd.vlan.vlan), + f->cmd.mac.addr); + break; + case IONIC_RX_FILTER_STEER_PKTCLASS: + seq_printf(seq, "0x%04x 0x%08x 0x%02x rxstr 0x%llx\n", + f->filter_id, f->flow_id, f->state, + le64_to_cpu(f->cmd.pkt_class)); + break; + } + } + } + spin_unlock_bh(&lif->rx_filters.lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(lif_filters); + +static int lif_n_txrx_alloc_show(struct seq_file *seq, void *v) +{ + struct ionic_lif *lif = seq->private; + + seq_printf(seq, "%llu\n", lif->n_txrx_alloc); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(lif_n_txrx_alloc); + +void ionic_debugfs_add_lif(struct ionic_lif *lif) +{ + struct dentry *lif_dentry; + + lif_dentry = debugfs_create_dir(lif->name, lif->ionic->dentry); + if (IS_ERR_OR_NULL(lif_dentry)) + return; + lif->dentry = lif_dentry; + + debugfs_create_file("netdev", 0400, lif->dentry, + lif->netdev, &netdev_fops); + debugfs_create_file("identity", 0400, lif->dentry, + lif->identity, &lif_identity_fops); + debugfs_create_file("state", 0400, lif->dentry, + lif, &lif_state_fops); + debugfs_create_file("filters", 0400, lif->dentry, + lif, &lif_filters_fops); + debugfs_create_file("txrx_alloc", 0400, lif->dentry, + lif, &lif_n_txrx_alloc_fops); +} + +void ionic_debugfs_del_lif(struct ionic_lif *lif) +{ + debugfs_remove_recursive(lif->dentry); + lif->dentry = NULL; +} + +void ionic_debugfs_add_eq(struct ionic_eq *eq) +{ + const int ring_bytes = sizeof(struct ionic_eq_comp) * IONIC_EQ_DEPTH; + struct device *dev = eq->ionic->dev; + struct debugfs_blob_wrapper *blob; + struct debugfs_regset32 *regset; + struct dentry *ent; + char name[40]; + + snprintf(name, sizeof(name), "eq%02u", eq->index); + + ent = debugfs_create_dir(name, eq->ionic->dentry); + if (IS_ERR_OR_NULL(ent)) + return; + + blob = devm_kzalloc(dev, sizeof(*blob), GFP_KERNEL); + blob->data = eq->ring[0].base; + blob->size = ring_bytes; + debugfs_create_blob("ring0", 0400, ent, blob); + + blob = devm_kzalloc(dev, sizeof(*blob), GFP_KERNEL); + blob->data = eq->ring[1].base; + blob->size = ring_bytes; + debugfs_create_blob("ring1", 0400, ent, blob); + + regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); + regset->regs = intr_ctrl_regs; + regset->nregs = ARRAY_SIZE(intr_ctrl_regs); + regset->base = &eq->ionic->idev.intr_ctrl[eq->intr.index]; + debugfs_create_regset32("intr_ctrl", 0400, ent, regset); +} + +void ionic_debugfs_del_qcq(struct ionic_qcq *qcq) +{ + debugfs_remove_recursive(qcq->dentry); + qcq->dentry = NULL; +} + +#endif diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_debugfs.h b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_debugfs.h new file mode 100644 index 0000000000..5849ccb4d3 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_debugfs.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#ifndef _IONIC_DEBUGFS_H_ +#define _IONIC_DEBUGFS_H_ + +#include + +struct ionic; +struct ionic_qcq; + +#ifdef CONFIG_DEBUG_FS + +void ionic_debugfs_create(void); +void ionic_debugfs_destroy(void); +void ionic_debugfs_add_dev(struct ionic *ionic); +void ionic_debugfs_del_dev(struct ionic *ionic); +void ionic_debugfs_add_bars(struct ionic *ionic); +void ionic_debugfs_add_dev_cmd(struct ionic *ionic); +void ionic_debugfs_add_ident(struct ionic *ionic); +void ionic_debugfs_add_sizes(struct ionic *ionic); +void ionic_debugfs_add_eq(struct ionic_eq *eq); +void ionic_debugfs_add_lif(struct ionic_lif *lif); +void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq); +void ionic_debugfs_del_lif(struct ionic_lif *lif); +void ionic_debugfs_del_qcq(struct ionic_qcq *qcq); +#else +static inline void ionic_debugfs_create(void) { } +static inline void ionic_debugfs_destroy(void) { } +static inline void ionic_debugfs_add_dev(struct ionic *ionic) { } +static inline void ionic_debugfs_del_dev(struct ionic *ionic) { } +static inline void ionic_debugfs_add_bars(struct ionic *ionic) { } +static inline void ionic_debugfs_add_dev_cmd(struct ionic *ionic) { } +static inline void ionic_debugfs_add_ident(struct ionic *ionic) { } +static inline void ionic_debugfs_add_sizes(struct ionic *ionic) { } +static inline void ionic_debugfs_add_eq(struct ionic_eq *eq) { } +static inline void ionic_debugfs_add_lif(struct ionic_lif *lif) { } +static inline void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq) { } +static inline void ionic_debugfs_del_lif(struct ionic_lif *lif) { } +static inline void ionic_debugfs_del_qcq(struct ionic_qcq *qcq) { } +#endif + +#endif /* _IONIC_DEBUGFS_H_ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_dev.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_dev.c new file mode 100644 index 0000000000..42ba5ddf90 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_dev.c @@ -0,0 +1,1208 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#include +#include +#include +#include +#include +#include +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_dev.h" +#include "ionic_debugfs.h" +#include "ionic_lif.h" + +void ionic_watchdog_cb(struct timer_list *t) +{ + struct ionic *ionic = from_timer(ionic, t, watchdog_timer); + struct ionic_lif *lif = ionic->lif; + struct ionic_deferred_work *work; + int hb; + + mod_timer(&ionic->watchdog_timer, + round_jiffies(jiffies + ionic->watchdog_period)); + + if (!lif) + return; + + hb = ionic_heartbeat_check(ionic); + dev_dbg(ionic->dev, "%s: hb %d running %d UP %d\n", + __func__, hb, netif_running(lif->netdev), + test_bit(IONIC_LIF_F_UP, lif->state)); + + if (hb >= 0 && + !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + ionic_link_status_check_request(lif, CAN_NOT_SLEEP); + + if (test_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state) && + !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "rxmode change dropped\n"); + return; + } + + work->type = IONIC_DW_TYPE_RX_MODE; + netdev_dbg(lif->netdev, "deferred: rx_mode\n"); + ionic_lif_deferred_enqueue(&lif->deferred, work); + } +} + +void ionic_watchdog_init(struct ionic *ionic) +{ + struct ionic_dev *idev = &ionic->idev; + + timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0); + if (ionic->pdev) + ionic->watchdog_period = IONIC_WATCHDOG_PCI_SECS * HZ; + else + ionic->watchdog_period = IONIC_WATCHDOG_PLAT_MSECS * HZ / 1000; + + /* set times to ensure the first check will proceed */ + atomic_long_set(&idev->last_check_time, jiffies - 2 * HZ); + idev->last_hb_time = jiffies - 2 * ionic->watchdog_period; + /* init as ready, so no transition if the first check succeeds */ + idev->last_fw_hb = 0; + idev->fw_hb_ready = true; + idev->fw_status_ready = true; + idev->fw_generation = IONIC_FW_STS_F_GENERATION & + ioread8(&idev->dev_info_regs->fw_status); +} + +void ionic_init_devinfo(struct ionic *ionic) +{ + struct ionic_dev *idev = &ionic->idev; + + idev->dev_info.asic_type = ioread8(&idev->dev_info_regs->asic_type); + idev->dev_info.asic_rev = ioread8(&idev->dev_info_regs->asic_rev); + + memcpy_fromio(idev->dev_info.fw_version, + idev->dev_info_regs->fw_version, + IONIC_DEVINFO_FWVERS_BUFLEN); + + memcpy_fromio(idev->dev_info.serial_num, + idev->dev_info_regs->serial_num, + IONIC_DEVINFO_SERIAL_BUFLEN); + + idev->dev_info.fw_version[IONIC_DEVINFO_FWVERS_BUFLEN] = 0; + idev->dev_info.serial_num[IONIC_DEVINFO_SERIAL_BUFLEN] = 0; + + dev_dbg(ionic->dev, "fw_version %s\n", idev->dev_info.fw_version); +} + +int ionic_dev_setup(struct ionic *ionic) +{ + struct ionic_dev_bar *bar = ionic->bars; + unsigned int num_bars = ionic->num_bars; + struct ionic_dev *idev = &ionic->idev; + struct device *dev = ionic->dev; + int size; + u32 sig; + + /* BAR0: dev_cmd and interrupts */ + if (num_bars < 1) { + dev_err(dev, "No bars found, aborting\n"); + return -EFAULT; + } + + if (bar->len < IONIC_BAR0_SIZE) { + dev_err(dev, "Resource bar size %lu too small, aborting\n", + bar->len); + return -EFAULT; + } + + idev->dev_info_regs = bar->vaddr + IONIC_BAR0_DEV_INFO_REGS_OFFSET; + idev->dev_cmd_regs = bar->vaddr + IONIC_BAR0_DEV_CMD_REGS_OFFSET; + idev->intr_status = bar->vaddr + IONIC_BAR0_INTR_STATUS_OFFSET; + idev->intr_ctrl = bar->vaddr + IONIC_BAR0_INTR_CTRL_OFFSET; + + idev->hwstamp_regs = &idev->dev_info_regs->hwstamp; + + sig = ioread32(&idev->dev_info_regs->signature); + if (sig != IONIC_DEV_INFO_SIGNATURE) { + dev_err(dev, "Incompatible firmware signature %x", sig); + return -EFAULT; + } + + ionic_init_devinfo(ionic); + + /* BAR1: doorbells */ + bar++; + if (num_bars < 2) { + dev_err(dev, "Doorbell bar missing, aborting\n"); + return -EFAULT; + } + + ionic_watchdog_init(ionic); + + idev->db_pages = bar->vaddr; + idev->phy_db_pages = bar->bus_addr; + + /* BAR2: optional controller memory mapping */ + bar++; + mutex_init(&idev->cmb_inuse_lock); + if (num_bars < 3 || !ionic->bars[IONIC_PCI_BAR_CMB].len) { + idev->cmb_inuse = NULL; + idev->phy_cmb_pages = 0; + idev->cmb_npages = 0; + return 0; + } + + idev->phy_cmb_pages = bar->bus_addr; + idev->cmb_npages = bar->len / PAGE_SIZE; + size = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long); + idev->cmb_inuse = kzalloc(size, GFP_KERNEL); + if (!idev->cmb_inuse) { + idev->phy_cmb_pages = 0; + idev->cmb_npages = 0; + } + + return 0; +} + +void ionic_dev_teardown(struct ionic *ionic) +{ + struct ionic_dev *idev = &ionic->idev; + + kfree(idev->cmb_inuse); + idev->cmb_inuse = NULL; + idev->phy_cmb_pages = 0; + idev->cmb_npages = 0; + + mutex_destroy(&idev->cmb_inuse_lock); +} + +/* Devcmd Interface */ +bool ionic_is_fw_running(struct ionic_dev *idev) +{ + u8 fw_status = ioread8(&idev->dev_info_regs->fw_status); + + /* firmware is useful only if the running bit is set and + * fw_status != 0xff (bad PCI read) + */ + return (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING); +} + +int ionic_heartbeat_check(struct ionic *ionic) +{ + unsigned long check_time, last_check_time; + struct ionic_dev *idev = &ionic->idev; + struct ionic_lif *lif = ionic->lif; + bool fw_status_ready = true; + bool fw_hb_ready; + u8 fw_generation; + u8 fw_status; + u32 fw_hb; + u32 wt; + + check_time = jiffies; + last_check_time = atomic_long_read(&idev->last_check_time); +do_check_time: + /* on the host device wait at least one second before testing again */ + if (ionic->pdev && time_before(check_time, last_check_time + HZ)) + return 0; + if (!atomic_long_try_cmpxchg_relaxed(&idev->last_check_time, + &last_check_time, check_time)) { + /* if called concurrently, only the first should proceed. */ + dev_dbg(ionic->dev, "%s: do_check_time again\n", __func__); + goto do_check_time; + } + + fw_status = ioread8(&idev->dev_info_regs->fw_status); + + /* If fw_status is not ready don't bother with the generation */ + if (!ionic_is_fw_running(idev)) { + fw_status_ready = false; + } else { + fw_generation = fw_status & IONIC_FW_STS_F_GENERATION; + if (idev->fw_generation != fw_generation) { + dev_info(ionic->dev, "FW generation 0x%02x -> 0x%02x\n", + idev->fw_generation, fw_generation); + + idev->fw_generation = fw_generation; + + /* If the generation changed, the fw status is not + * ready so we need to trigger a fw-down cycle. After + * the down, the next watchdog will see the fw is up + * and the generation value stable, so will trigger + * the fw-up activity. + * + * If we had already moved to FW_RESET from a RESET event, + * it is possible that we never saw the fw_status go to 0, + * so we fake it a bit here to get FW up again. + */ + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + idev->fw_status_ready = false; /* go to running */ + else + fw_status_ready = false; /* go to down */ + } + } + + dev_dbg(ionic->dev, "fw_status 0x%02x ready %d idev->ready %d last_hb 0x%x state 0x%02lx\n", + fw_status, fw_status_ready, idev->fw_status_ready, + idev->last_fw_hb, lif->state[0]); + + /* is this a transition? */ + if (fw_status_ready != idev->fw_status_ready && + !test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) { + bool trigger = false; + + idev->fw_status_ready = fw_status_ready; + + if (!fw_status_ready && lif && + !test_bit(IONIC_LIF_F_FW_RESET, lif->state) && + !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) { + + dev_info(ionic->dev, "FW stopped 0x%02x\n", fw_status); + trigger = true; + + } else if (fw_status_ready && lif && + test_bit(IONIC_LIF_F_FW_RESET, lif->state) && + !test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) { + + dev_info(ionic->dev, "FW running 0x%02x\n", fw_status); + trigger = true; + } + + if (trigger) { + struct ionic_deferred_work *work; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (work) { + work->type = IONIC_DW_TYPE_LIF_RESET; + work->fw_status = fw_status_ready; + ionic_lif_deferred_enqueue(&lif->deferred, work); + } + } + } + + if (!idev->fw_status_ready) + return -ENXIO; + + /* Because of some variability in the actual FW heartbeat, we + * wait longer than the current devcmd_timeout before checking + * again, but never less than 5 seconds. + */ + last_check_time = idev->last_hb_time; + wt = max_t(int, (devcmd_timeout * 2), DEVCMD_TIMEOUT); + if (time_before(check_time, last_check_time + wt * HZ)) + return 0; + + fw_hb = ioread32(&idev->dev_info_regs->fw_heartbeat); + fw_hb_ready = fw_hb != idev->last_fw_hb; + + /* early FW version had no heartbeat, so fake it */ + if (!fw_hb_ready && !fw_hb) + fw_hb_ready = true; + + dev_dbg(ionic->dev, "%s: fw_hb %u last_fw_hb %u ready %u\n", + __func__, fw_hb, idev->last_fw_hb, fw_hb_ready); + + idev->last_fw_hb = fw_hb; + + /* log a transition */ + if (fw_hb_ready != idev->fw_hb_ready) { + idev->fw_hb_ready = fw_hb_ready; + if (!fw_hb_ready) + dev_info(ionic->dev, "FW heartbeat stalled at %d\n", fw_hb); + else + dev_info(ionic->dev, "FW heartbeat restored at %d\n", fw_hb); + } + + if (!fw_hb_ready) + return -ENXIO; + + idev->last_hb_time = check_time; + + return 0; +} + +u8 ionic_dev_cmd_status(struct ionic_dev *idev) +{ + return ioread8(&idev->dev_cmd_regs->comp.comp.status); +} + +bool ionic_dev_cmd_done(struct ionic_dev *idev) +{ + return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE; +} + +void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp) +{ + memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp)); +} + +void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd) +{ + memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd)); + iowrite32(0, &idev->dev_cmd_regs->done); + iowrite32(1, &idev->dev_cmd_regs->doorbell); +} + +/* Device commands */ +void ionic_dev_cmd_identify(struct ionic_dev *idev, u8 ver) +{ + union ionic_dev_cmd cmd = { + .identify.opcode = IONIC_CMD_IDENTIFY, + .identify.ver = ver, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_init(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .init.opcode = IONIC_CMD_INIT, + .init.type = 0, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_reset(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .reset.opcode = IONIC_CMD_RESET, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +/* Port commands */ +void ionic_dev_cmd_port_identify(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .port_init.opcode = IONIC_CMD_PORT_IDENTIFY, + .port_init.index = 0, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_init(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .port_init.opcode = IONIC_CMD_PORT_INIT, + .port_init.index = 0, + .port_init.info_pa = cpu_to_le64(idev->port_info_pa), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_reset(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .port_reset.opcode = IONIC_CMD_PORT_RESET, + .port_reset.index = 0, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_state(struct ionic_dev *idev, u8 state) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_STATE, + .port_setattr.state = state, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_speed(struct ionic_dev *idev, u32 speed) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_SPEED, + .port_setattr.speed = cpu_to_le32(speed), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_AUTONEG, + .port_setattr.an_enable = an_enable, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_FEC, + .port_setattr.fec_type = fec_type, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_PAUSE, + .port_setattr.pause_type = pause_type, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +/* VF commands */ +int ionic_set_vf_config(struct ionic *ionic, int vf, + struct ionic_vf_setattr_cmd *vfc) +{ + union ionic_dev_cmd cmd = { + .vf_setattr.opcode = IONIC_CMD_VF_SETATTR, + .vf_setattr.attr = vfc->attr, + .vf_setattr.vf_index = cpu_to_le16(vf), + }; + int err; + + if (vf >= ionic->num_vfs) + return -EINVAL; + + memcpy(cmd.vf_setattr.pad, vfc->pad, sizeof(vfc->pad)); + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_go(&ionic->idev, &cmd); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + mutex_unlock(&ionic->dev_cmd_lock); + + return err; +} + +int ionic_dev_cmd_vf_getattr(struct ionic *ionic, int vf, u8 attr, + struct ionic_vf_getattr_comp *comp) +{ + union ionic_dev_cmd cmd = { + .vf_getattr.opcode = IONIC_CMD_VF_GETATTR, + .vf_getattr.attr = attr, + .vf_getattr.vf_index = cpu_to_le16(vf), + }; + int err; + + if (vf >= ionic->num_vfs) + return -EINVAL; + + switch (attr) { + case IONIC_VF_ATTR_SPOOFCHK: + case IONIC_VF_ATTR_TRUST: + case IONIC_VF_ATTR_LINKSTATE: + case IONIC_VF_ATTR_MAC: + case IONIC_VF_ATTR_VLAN: + case IONIC_VF_ATTR_RATE: + break; + case IONIC_VF_ATTR_STATSADDR: + default: + return -EINVAL; + } + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_go(&ionic->idev, &cmd); + err = ionic_dev_cmd_wait_nomsg(ionic, devcmd_timeout); + memcpy_fromio(comp, &ionic->idev.dev_cmd_regs->comp.vf_getattr, + sizeof(*comp)); + mutex_unlock(&ionic->dev_cmd_lock); + + if (err && comp->status != IONIC_RC_ENOSUPP) + ionic_dev_cmd_dev_err_print(ionic, cmd.vf_getattr.opcode, + comp->status, err); + + return err; +} + +void ionic_vf_start(struct ionic *ionic, int vf) +{ +#ifdef IONIC_DEV_IDENTITY_VERSION_2 + union ionic_dev_cmd cmd = { + .vf_ctrl.opcode = IONIC_CMD_VF_CTRL, + }; + + if (!(ionic->ident.dev.capabilities & IONIC_DEV_CAP_VF_CTRL)) + return; + + if (vf == -1) { + cmd.vf_ctrl.ctrl_opcode = IONIC_VF_CTRL_START_ALL; + } else { + cmd.vf_ctrl.ctrl_opcode = IONIC_VF_CTRL_START; + cmd.vf_ctrl.vf_index = cpu_to_le16(vf); + } + + ionic_dev_cmd_go(&ionic->idev, &cmd); + (void)ionic_dev_cmd_wait(ionic, devcmd_timeout); +#endif +} + +/* LIF commands */ +void ionic_dev_cmd_queue_identify(struct ionic_dev *idev, + u16 lif_type, u8 qtype, u8 qver) +{ + union ionic_dev_cmd cmd = { + .q_identify.opcode = IONIC_CMD_Q_IDENTIFY, + .q_identify.lif_type = cpu_to_le16(lif_type), + .q_identify.type = qtype, + .q_identify.ver = qver, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver) +{ + union ionic_dev_cmd cmd = { + .lif_identify.opcode = IONIC_CMD_LIF_IDENTIFY, + .lif_identify.type = type, + .lif_identify.ver = ver, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index, + dma_addr_t info_pa) +{ + union ionic_dev_cmd cmd = { + .lif_init.opcode = IONIC_CMD_LIF_INIT, + .lif_init.index = cpu_to_le16(lif_index), + .lif_init.info_pa = cpu_to_le64(info_pa), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_lif_reset(struct ionic_dev *idev, u16 lif_index) +{ + union ionic_dev_cmd cmd = { + .lif_init.opcode = IONIC_CMD_LIF_RESET, + .lif_init.index = cpu_to_le16(lif_index), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq, + u16 lif_index, u16 intr_index) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_cq *cq = &qcq->cq; + + union ionic_dev_cmd cmd = { + .q_init.opcode = IONIC_CMD_Q_INIT, + .q_init.lif_index = cpu_to_le16(lif_index), + .q_init.type = q->type, + .q_init.ver = qcq->q.lif->qtype_info[q->type].version, + .q_init.index = cpu_to_le32(q->index), + .q_init.flags = cpu_to_le16(IONIC_QINIT_F_IRQ | + IONIC_QINIT_F_ENA), + .q_init.pid = cpu_to_le16(q->pid), + .q_init.intr_index = cpu_to_le16(intr_index), + .q_init.ring_size = ilog2(q->num_descs), + .q_init.ring_base = cpu_to_le64(q->base_pa), + .q_init.cq_ring_base = cpu_to_le64(cq->base_pa), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +int ionic_db_page_num(struct ionic_lif *lif, int pid) +{ + return (lif->hw_index * lif->dbid_count) + pid; +} + +int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order) +{ + struct ionic_dev *idev = &lif->ionic->idev; + int ret; + + mutex_lock(&idev->cmb_inuse_lock); + ret = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order); + mutex_unlock(&idev->cmb_inuse_lock); + + if (ret < 0) + return ret; + + *pgid = (u32)ret; + *pgaddr = idev->phy_cmb_pages + ret * PAGE_SIZE; + + return 0; +} + +void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order) +{ + struct ionic_dev *idev = &lif->ionic->idev; + + mutex_lock(&idev->cmb_inuse_lock); + bitmap_release_region(idev->cmb_inuse, pgid, order); + mutex_unlock(&idev->cmb_inuse_lock); +} + +static void ionic_txrx_notify(struct ionic *ionic, + int lif_index, int qcq_id, bool is_tx) +{ + struct ionic_lif *lif = ionic->lif; + + if (!lif) + return; + + if (is_tx) + lif->txqcqs[qcq_id]->armed = false; + else + lif->rxqcqs[qcq_id]->armed = false; + + /* We schedule rx napi, it handles both tx and rx */ + napi_schedule_irqoff(&lif->rxqcqs[qcq_id]->napi); +} + +static bool ionic_next_eq_comp(struct ionic_eq *eq, int ring_index, + struct ionic_eq_comp *comp) +{ + struct ionic_eq_ring *ring = &eq->ring[ring_index]; + struct ionic_eq_comp *qcomp; + u8 gen_color; + + qcomp = &ring->base[ring->index]; + gen_color = qcomp->gen_color; + + if (gen_color == (u8)(ring->gen_color - 1)) + return false; + + /* Make sure ring descriptor is up-to-date before reading */ + smp_rmb(); + *comp = *qcomp; + gen_color = comp->gen_color; + + if (gen_color != ring->gen_color) { + dev_err(eq->ionic->dev, + "eq %u ring %u missed %u events\n", + eq->index, ring_index, + eq->depth * (gen_color - ring->gen_color)); + + ring->gen_color = gen_color; + } + + ring->index = (ring->index + 1) & (eq->depth - 1); + ring->gen_color += ring->index == 0; + + return true; +} + +static int ionic_poll_eq_ring(struct ionic_eq *eq, int ring_index) +{ + struct ionic_eq_comp comp; + int budget = eq->depth; + int credits = 0; + int code; + + while (credits < budget && ionic_next_eq_comp(eq, ring_index, &comp)) { + code = le16_to_cpu(comp.code); + + switch (code) { + case IONIC_EQ_COMP_CODE_NONE: + break; + case IONIC_EQ_COMP_CODE_RX_COMP: + case IONIC_EQ_COMP_CODE_TX_COMP: + ionic_txrx_notify(eq->ionic, + le16_to_cpu(comp.lif_index), + le32_to_cpu(comp.qid), + code == IONIC_EQ_COMP_CODE_TX_COMP); + break; + default: + dev_warn(eq->ionic->dev, + "eq %u ring %u unrecognized event %u\n", + eq->index, ring_index, code); + break; + } + + credits++; + } + + return credits; +} + +static irqreturn_t ionic_eq_isr(int irq, void *data) +{ + struct ionic_eq *eq = data; + int credits; + + credits = ionic_poll_eq_ring(eq, 0) + ionic_poll_eq_ring(eq, 1); + ionic_intr_credits(eq->ionic->idev.intr_ctrl, eq->intr.index, + credits, IONIC_INTR_CRED_UNMASK); + + return IRQ_HANDLED; +} + +static int ionic_request_eq_irq(struct ionic *ionic, struct ionic_eq *eq) +{ + struct device *dev = ionic->dev; + struct ionic_intr_info *intr = &eq->intr; + const char *name = dev_name(dev); + + snprintf(intr->name, sizeof(intr->name), + "%s-%s-eq%d", IONIC_DRV_NAME, name, eq->index); + + return devm_request_irq(dev, intr->vector, ionic_eq_isr, + 0, intr->name, eq); +} + +static int ionic_eq_alloc(struct ionic *ionic, int index) +{ + const int ring_bytes = sizeof(struct ionic_eq_comp) * IONIC_EQ_DEPTH; + struct ionic_eq *eq; + int err; + + eq = kzalloc(sizeof(*eq), GFP_KERNEL); + eq->ionic = ionic; + eq->index = index; + eq->depth = IONIC_EQ_DEPTH; + + err = ionic_intr_alloc(ionic, &eq->intr); + if (err) { + dev_warn(ionic->dev, "no intr for eq %u: %d\n", index, err); + goto err_out; + } + + err = ionic_bus_get_irq(ionic, eq->intr.index); + if (err < 0) { + dev_warn(ionic->dev, "no vector for eq %u: %d\n", index, err); + goto err_out_free_intr; + } + eq->intr.vector = err; + + ionic_intr_mask_assert(ionic->idev.intr_ctrl, eq->intr.index, + IONIC_INTR_MASK_SET); + + /* try to get the irq on the local numa node first */ + eq->intr.cpu = cpumask_local_spread(eq->intr.index, + dev_to_node(ionic->dev)); + if (eq->intr.cpu != -1) + cpumask_set_cpu(eq->intr.cpu, &eq->intr.affinity_mask); + + eq->ring[0].gen_color = 1; + eq->ring[0].base = dma_alloc_coherent(ionic->dev, ring_bytes, + &eq->ring[0].base_pa, + GFP_KERNEL); + + eq->ring[1].gen_color = 1; + eq->ring[1].base = dma_alloc_coherent(ionic->dev, ring_bytes, + &eq->ring[1].base_pa, + GFP_KERNEL); + + ionic->eqs[index] = eq; + + ionic_debugfs_add_eq(eq); + + return 0; + +err_out_free_intr: + ionic_intr_free(ionic, eq->intr.index); +err_out: + kfree(eq); + return err; +} + +int ionic_eqs_alloc(struct ionic *ionic) +{ + size_t eq_size; + int i, err; + + eq_size = sizeof(*ionic->eqs) * ionic->neth_eqs; + ionic->eqs = kzalloc(eq_size, GFP_KERNEL); + if (!ionic->eqs) + return -ENOMEM; + + for (i = 0; i < ionic->neth_eqs; i++) { + err = ionic_eq_alloc(ionic, i); + if (err) + return err; + } + + return 0; +} + +static void ionic_eq_free(struct ionic_eq *eq) +{ + const int ring_bytes = sizeof(struct ionic_eq_comp) * IONIC_EQ_DEPTH; + struct ionic *ionic = eq->ionic; + + eq->ionic->eqs[eq->index] = NULL; + + dma_free_coherent(ionic->dev, ring_bytes, + eq->ring[0].base, + eq->ring[0].base_pa); + dma_free_coherent(ionic->dev, ring_bytes, + eq->ring[1].base, + eq->ring[1].base_pa); + ionic_intr_free(ionic, eq->intr.index); + kfree(eq); +} + +void ionic_eqs_free(struct ionic *ionic) +{ + int i; + + if (!ionic->eqs) + return; + + for (i = 0; i < ionic->neth_eqs; i++) { + if (ionic->eqs[i]) + ionic_eq_free(ionic->eqs[i]); + } + + kfree(ionic->eqs); + ionic->eqs = NULL; + ionic->neth_eqs = 0; +} + +static void ionic_eq_deinit(struct ionic_eq *eq) +{ + struct ionic *ionic = eq->ionic; + union ionic_dev_cmd cmd = { + .q_control = { + .opcode = IONIC_CMD_Q_CONTROL, + .type = IONIC_QTYPE_EQ, + .index = cpu_to_le32(eq->index), + .oper = IONIC_Q_DISABLE, + }, + }; + + if (!eq->is_init) + return; + eq->is_init = false; + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_go(&ionic->idev, &cmd); + ionic_dev_cmd_wait(ionic, devcmd_timeout); + mutex_unlock(&ionic->dev_cmd_lock); + + ionic_intr_mask(ionic->idev.intr_ctrl, eq->intr.index, + IONIC_INTR_MASK_SET); + synchronize_irq(eq->intr.vector); + + irq_set_affinity_hint(eq->intr.vector, NULL); + devm_free_irq(ionic->dev, eq->intr.vector, eq); +} + +void ionic_eqs_deinit(struct ionic *ionic) +{ + int i; + + if (!ionic->eqs) + return; + + for (i = 0; i < ionic->neth_eqs; i++) { + if (ionic->eqs[i]) + ionic_eq_deinit(ionic->eqs[i]); + } +} + +static int ionic_eq_init(struct ionic_eq *eq) +{ + struct ionic *ionic = eq->ionic; + union ionic_q_identity __iomem *q_ident; + union ionic_dev_cmd cmd = { + .q_init = { + .opcode = IONIC_CMD_Q_INIT, + .type = IONIC_QTYPE_EQ, + .ver = 0, + .index = cpu_to_le32(eq->index), + .intr_index = cpu_to_le16(eq->intr.index), + .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | + IONIC_QINIT_F_ENA), + .ring_size = ilog2(eq->depth), + .ring_base = cpu_to_le64(eq->ring[0].base_pa), + .cq_ring_base = cpu_to_le64(eq->ring[1].base_pa), + }, + }; + int err; + + q_ident = (union ionic_q_identity __iomem *)&ionic->idev.dev_cmd_regs->data; + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_queue_identify(&ionic->idev, IONIC_LIF_TYPE_CLASSIC, + IONIC_QTYPE_EQ, 0); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + cmd.q_init.ver = ioread8(&q_ident->version); + mutex_unlock(&ionic->dev_cmd_lock); + + if (err == -EINVAL) { + dev_err(ionic->dev, "eq init failed, not supported\n"); + return err; + } else if (err == -EIO) { + dev_err(ionic->dev, "q_ident eq failed, not supported on older FW\n"); + return err; + } else if (err) { + dev_warn(ionic->dev, "eq version type request failed %d, defaulting to %d\n", + err, cmd.q_init.ver); + } + + ionic_intr_mask(ionic->idev.intr_ctrl, eq->intr.index, + IONIC_INTR_MASK_SET); + ionic_intr_clean(ionic->idev.intr_ctrl, eq->intr.index); + + err = ionic_request_eq_irq(ionic, eq); + if (err) { + dev_warn(ionic->dev, "eq %d irq request failed %d\n", + eq->index, err); + return err; + } + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_go(&ionic->idev, &cmd); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + mutex_unlock(&ionic->dev_cmd_lock); + + if (err) { + dev_err(ionic->dev, "eq %d init failed %d\n", + eq->index, err); + return err; + } + + ionic_intr_mask(ionic->idev.intr_ctrl, eq->intr.index, + IONIC_INTR_MASK_CLEAR); + + eq->is_init = true; + + return 0; +} + +int ionic_eqs_init(struct ionic *ionic) +{ + int i, err; + + for (i = 0; i < ionic->neth_eqs; i++) { + if (ionic->eqs[i]) { + err = ionic_eq_init(ionic->eqs[i]); + if (err) + return err; + } + } + + return 0; +} + +int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, + struct ionic_intr_info *intr, + unsigned int num_descs, size_t desc_size) +{ + unsigned int ring_size; + + if (desc_size == 0 || !is_power_of_2(num_descs)) + return -EINVAL; + + ring_size = ilog2(num_descs); + if (ring_size < 2 || ring_size > 16) + return -EINVAL; + + cq->lif = lif; + cq->bound_intr = intr; + cq->num_descs = num_descs; + cq->desc_size = desc_size; + cq->tail_idx = 0; + cq->done_color = 1; + + return 0; +} + +void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa) +{ + struct ionic_cq_info *cur; + unsigned int i; + + cq->base = base; + cq->base_pa = base_pa; + + for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++) + cur->cq_desc = base + (i * cq->desc_size); +} + +void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q) +{ + cq->bound_q = q; +} + +unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, + ionic_cq_cb cb, ionic_cq_done_cb done_cb, + void *done_arg) +{ + struct ionic_cq_info *cq_info; + unsigned int work_done = 0; + + if (work_to_do == 0) + return 0; + + cq_info = &cq->info[cq->tail_idx]; + while (cb(cq, cq_info)) { + if (cq->tail_idx == cq->num_descs - 1) + cq->done_color = !cq->done_color; + cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); + cq_info = &cq->info[cq->tail_idx]; + DEBUG_STATS_CQE_CNT(cq); + + if (++work_done >= work_to_do) + break; + } + + if (work_done && done_cb) + done_cb(done_arg); + + return work_done; +} + +int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, + struct ionic_queue *q, unsigned int index, const char *name, + unsigned int num_descs, size_t desc_size, + size_t sg_desc_size, unsigned int pid) +{ + unsigned int ring_size; + + if (desc_size == 0 || !is_power_of_2(num_descs)) + return -EINVAL; + + ring_size = ilog2(num_descs); + if (ring_size < 2 || ring_size > 16) + return -EINVAL; + + q->lif = lif; + q->idev = idev; + q->index = index; + q->num_descs = num_descs; + q->desc_size = desc_size; + q->sg_desc_size = sg_desc_size; + q->tail_idx = 0; + q->head_idx = 0; + q->pid = pid; + + snprintf(q->name, sizeof(q->name), "L%d-%s%u", lif->index, name, index); + + return 0; +} + +void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) +{ + struct ionic_desc_info *cur; + unsigned int i; + + q->base = base; + q->base_pa = base_pa; + + for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) + cur->desc = base + (i * q->desc_size); +} + +void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa) +{ + struct ionic_desc_info *cur; + unsigned int i; + + q->cmb_base = base; + q->cmb_base_pa = base_pa; + + for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) + cur->cmb_desc = base + (i * q->desc_size); +} + +void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) +{ + struct ionic_desc_info *cur; + unsigned int i; + + q->sg_base = base; + q->sg_base_pa = base_pa; + + for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) + cur->sg_desc = base + (i * q->sg_desc_size); +} + +void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, + void *cb_arg) +{ + struct ionic_desc_info *desc_info; + struct ionic_lif *lif = q->lif; + struct device *dev = q->dev; + + desc_info = &q->info[q->head_idx]; + desc_info->cb = cb; + desc_info->cb_arg = cb_arg; + + q->head_idx = (q->head_idx + 1) & (q->num_descs - 1); + +#ifdef IONIC_DEBUG_STATS + q->depth = q->num_descs - ionic_q_space_avail(q); + q->depth_max = max_t(u64, q->depth, q->depth_max); +#endif + + dev_dbg(dev, "%s: lif=%d qname=%s hw_type=%d hw_index=%d p_index=%d ringdb=%d\n", + __func__, q->lif->index, q->name, q->hw_type, q->hw_index, + q->head_idx, ring_doorbell); + + if (ring_doorbell) { + ionic_dbell_ring(lif->kern_dbpage, q->hw_type, + q->dbval | q->head_idx); + + q->dbell_jiffies = jiffies; + + if (q_to_qcq(q)->napi_qcq) + mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline, + jiffies + IONIC_NAPI_DEADLINE); + } +} + +static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos) +{ + unsigned int mask, tail, head; + + mask = q->num_descs - 1; + tail = q->tail_idx; + head = q->head_idx; + + return ((pos - tail) & mask) < ((head - tail) & mask); +} + +void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, + unsigned int stop_index) +{ + struct ionic_desc_info *desc_info; + ionic_desc_cb cb; + void *cb_arg; + u16 index; + + /* check for empty queue */ + if (q->tail_idx == q->head_idx) + return; + + /* stop index must be for a descriptor that is not yet completed */ + if (unlikely(!ionic_q_is_posted(q, stop_index))) + dev_err(q->dev, + "ionic stop is not posted %s stop %u tail %u head %u\n", + q->name, stop_index, q->tail_idx, q->head_idx); + + do { + desc_info = &q->info[q->tail_idx]; + index = q->tail_idx; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + + cb = desc_info->cb; + cb_arg = desc_info->cb_arg; + + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + + if (cb) + cb(q, desc_info, cq_info, cb_arg); + } while (index != stop_index); +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_dev.h b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_dev.h new file mode 100644 index 0000000000..884038d4bd --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_dev.h @@ -0,0 +1,437 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#ifndef _IONIC_DEV_H_ +#define _IONIC_DEV_H_ + +#include +#include +#include + +#include "ionic_if.h" +#include "ionic_api.h" +#include "ionic_regs.h" + +#define IONIC_MAX_TX_DESC 8192 +#define IONIC_MAX_RX_DESC 16384 +#define IONIC_MIN_TXRX_DESC 64 +#define IONIC_DEF_TXRX_DESC 4096 +#define IONIC_RX_FILL_THRESHOLD 64 +#define IONIC_RX_FILL_DIV 8 +#define IONIC_LIFS_MAX 1024 +#define IONIC_WATCHDOG_PCI_SECS 5 +#define IONIC_WATCHDOG_PLAT_MSECS 100 +#define IONIC_HEARTBEAT_SECS 1 +#define IONIC_ITR_COAL_USEC_DEFAULT 8 + +#define IONIC_DEV_CMD_REG_VERSION 1 +#define IONIC_DEV_INFO_REG_COUNT 32 +#define IONIC_DEV_CMD_REG_COUNT 32 + +#define IONIC_NAPI_DEADLINE (HZ / 200) /* 5ms */ +#define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */ +#define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */ +#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */ +#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 5) /* 5s */ + +struct ionic_dev_bar { + void __iomem *vaddr; + phys_addr_t bus_addr; + unsigned long len; + int res_index; +}; + +#ifndef __CHECKER__ +/* Registers */ +static_assert(sizeof(struct ionic_intr) == 32); + +static_assert(sizeof(struct ionic_doorbell) == 8); +static_assert(sizeof(struct ionic_intr_ctrl) == 32); +static_assert(sizeof(struct ionic_intr_status) == 8); +static_assert(sizeof(union ionic_dev_regs) == 4096); +static_assert(sizeof(union ionic_dev_info_regs) == 2048); +static_assert(sizeof(union ionic_dev_cmd_regs) == 2048); +static_assert(sizeof(struct ionic_lif_stats) == 1024); + +static_assert(sizeof(struct ionic_admin_cmd) == 64); +static_assert(sizeof(struct ionic_admin_comp) == 16); +static_assert(sizeof(struct ionic_nop_cmd) == 64); +static_assert(sizeof(struct ionic_nop_comp) == 16); + +/* Device commands */ +static_assert(sizeof(struct ionic_dev_identify_cmd) == 64); +static_assert(sizeof(struct ionic_dev_identify_comp) == 16); +static_assert(sizeof(struct ionic_dev_init_cmd) == 64); +static_assert(sizeof(struct ionic_dev_init_comp) == 16); +static_assert(sizeof(struct ionic_dev_reset_cmd) == 64); +static_assert(sizeof(struct ionic_dev_reset_comp) == 16); +static_assert(sizeof(struct ionic_dev_getattr_cmd) == 64); +static_assert(sizeof(struct ionic_dev_getattr_comp) == 16); +static_assert(sizeof(struct ionic_dev_setattr_cmd) == 64); +static_assert(sizeof(struct ionic_dev_setattr_comp) == 16); +static_assert(sizeof(struct ionic_hii_getattr_cmd) == 64); +static_assert(sizeof(struct ionic_hii_getattr_comp) == 16); + +/* Port commands */ +static_assert(sizeof(struct ionic_port_identify_cmd) == 64); +static_assert(sizeof(struct ionic_port_identify_comp) == 16); +static_assert(sizeof(struct ionic_port_init_cmd) == 64); +static_assert(sizeof(struct ionic_port_init_comp) == 16); +static_assert(sizeof(struct ionic_port_reset_cmd) == 64); +static_assert(sizeof(struct ionic_port_reset_comp) == 16); +static_assert(sizeof(struct ionic_port_getattr_cmd) == 64); +static_assert(sizeof(struct ionic_port_getattr_comp) == 16); +static_assert(sizeof(struct ionic_port_setattr_cmd) == 64); +static_assert(sizeof(struct ionic_port_setattr_comp) == 16); + +/* LIF commands */ +static_assert(sizeof(struct ionic_lif_init_cmd) == 64); +static_assert(sizeof(struct ionic_lif_init_comp) == 16); +static_assert(sizeof(struct ionic_lif_reset_cmd) == 64); +static_assert(sizeof(ionic_lif_reset_comp) == 16); +static_assert(sizeof(struct ionic_lif_getattr_cmd) == 64); +static_assert(sizeof(struct ionic_lif_getattr_comp) == 16); +static_assert(sizeof(struct ionic_lif_setattr_cmd) == 64); +static_assert(sizeof(struct ionic_lif_setattr_comp) == 16); +static_assert(sizeof(struct ionic_lif_setphc_cmd) == 64); + +static_assert(sizeof(struct ionic_q_init_cmd) == 64); +static_assert(sizeof(struct ionic_q_init_comp) == 16); +static_assert(sizeof(struct ionic_q_control_cmd) == 64); +static_assert(sizeof(ionic_q_control_comp) == 16); +static_assert(sizeof(struct ionic_q_identify_cmd) == 64); +static_assert(sizeof(struct ionic_q_identify_comp) == 16); + +static_assert(sizeof(struct ionic_rx_mode_set_cmd) == 64); +static_assert(sizeof(ionic_rx_mode_set_comp) == 16); +static_assert(sizeof(struct ionic_rx_filter_add_cmd) == 64); +static_assert(sizeof(struct ionic_rx_filter_add_comp) == 16); +static_assert(sizeof(struct ionic_rx_filter_del_cmd) == 64); +static_assert(sizeof(ionic_rx_filter_del_comp) == 16); + +/* RDMA commands */ +static_assert(sizeof(struct ionic_rdma_reset_cmd) == 64); +static_assert(sizeof(struct ionic_rdma_queue_cmd) == 64); + +/* Events */ +static_assert(sizeof(struct ionic_notifyq_cmd) == 4); +static_assert(sizeof(union ionic_notifyq_comp) == 64); +static_assert(sizeof(struct ionic_notifyq_event) == 64); +static_assert(sizeof(struct ionic_link_change_event) == 64); +static_assert(sizeof(struct ionic_reset_event) == 64); +static_assert(sizeof(struct ionic_heartbeat_event) == 64); +static_assert(sizeof(struct ionic_log_event) == 64); + +/* I/O */ +static_assert(sizeof(struct ionic_txq_desc) == 16); +static_assert(sizeof(struct ionic_txq_sg_desc) == 128); +static_assert(sizeof(struct ionic_txq_comp) == 16); + +static_assert(sizeof(struct ionic_rxq_desc) == 16); +static_assert(sizeof(struct ionic_rxq_sg_desc) == 128); +static_assert(sizeof(struct ionic_rxq_comp) == 16); + +/* SR/IOV */ +static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64); +static_assert(sizeof(struct ionic_vf_setattr_comp) == 16); +static_assert(sizeof(struct ionic_vf_getattr_cmd) == 64); +static_assert(sizeof(struct ionic_vf_getattr_comp) == 16); +#endif /* __CHECKER__ */ + +struct ionic_dev { + union ionic_dev_info_regs __iomem *dev_info_regs; + union ionic_dev_cmd_regs __iomem *dev_cmd_regs; + struct ionic_hwstamp_regs __iomem *hwstamp_regs; + + atomic_long_t last_check_time; + unsigned long last_hb_time; + u32 last_fw_hb; + bool fw_hb_ready; + bool fw_status_ready; + u8 fw_generation; + + u64 __iomem *db_pages; + dma_addr_t phy_db_pages; + + struct ionic_intr __iomem *intr_ctrl; + u64 __iomem *intr_status; + u8 *msix_cfg_base; + + struct mutex cmb_inuse_lock; /* for cmb_inuse */ + unsigned long *cmb_inuse; + dma_addr_t phy_cmb_pages; + u32 cmb_npages; + + u32 port_info_sz; + struct ionic_port_info *port_info; + dma_addr_t port_info_pa; + + struct ionic_devinfo dev_info; +}; + +struct ionic_cq_info { + union { + void *cq_desc; + struct ionic_admin_comp *admincq; + struct ionic_notifyq_event *notifyq; + }; +}; + +struct ionic_queue; +struct ionic_qcq; +struct ionic_desc_info; + +typedef void (*ionic_desc_cb)(struct ionic_queue *q, + struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, void *cb_arg); + + +#define IONIC_PAGE_ORDER 0 +#define IONIC_PAGE_SIZE (PAGE_SIZE << IONIC_PAGE_ORDER) +#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 4) +#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\ + __GFP_COMP | __GFP_MEMALLOC) + +struct ionic_buf_info { + struct page *page; + dma_addr_t dma_addr; + u32 page_offset; + u32 len; +}; + +#define IONIC_PAGE_CACHE_SIZE 2048 + +struct ionic_page_cache { + u32 head; + u32 tail; + struct ionic_buf_info ring[IONIC_PAGE_CACHE_SIZE]; +} ____cacheline_aligned_in_smp; + +#define IONIC_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1) + +struct ionic_desc_info { + union { + void *desc; + struct ionic_txq_desc *txq_desc; + struct ionic_rxq_desc *rxq_desc; + struct ionic_admin_cmd *adminq_desc; + }; + void __iomem *cmb_desc; + union { + void *sg_desc; + struct ionic_txq_sg_desc *txq_sg_desc; + struct ionic_rxq_sg_desc *rxq_sgl_desc; + }; + unsigned int bytes; + unsigned int nbufs; + struct ionic_buf_info bufs[IONIC_MAX_FRAGS]; + ionic_desc_cb cb; + void *cb_arg; +}; + +#define IONIC_QUEUE_NAME_MAX_SZ 32 + +struct ionic_queue { + struct device *dev; + struct ionic_lif *lif; + struct ionic_desc_info *info; + u64 dbval; + unsigned long dbell_deadline; + unsigned long dbell_jiffies; + u16 head_idx; + u16 tail_idx; + unsigned int index; + unsigned int num_descs; + unsigned int max_sg_elems; + u64 dbell_count; + u64 stop; + u64 wake; + u64 drop; +#ifdef IONIC_DEBUG_STATS + u64 depth; + u64 depth_max; +#endif + u64 features; + struct ionic_dev *idev; + unsigned int type; + unsigned int hw_index; + unsigned int hw_type; + union { + void *base; + struct ionic_txq_desc *txq; + struct ionic_rxq_desc *rxq; + struct ionic_admin_cmd *adminq; + }; + void __iomem *cmb_base; + union { + void *sg_base; + struct ionic_txq_sg_desc *txq_sgl; + struct ionic_rxq_sg_desc *rxq_sgl; + }; + dma_addr_t base_pa; /* must be page aligned */ + dma_addr_t cmb_base_pa; + dma_addr_t sg_base_pa; /* must be page aligned */ + unsigned int desc_size; + unsigned int sg_desc_size; + unsigned int pid; + struct ionic_page_cache page_cache; + char name[IONIC_QUEUE_NAME_MAX_SZ]; +} ____cacheline_aligned_in_smp; + +#define IONIC_INTR_INDEX_NOT_ASSIGNED -1 +#define IONIC_INTR_NAME_MAX_SZ 32 + +struct ionic_intr_info { + char name[IONIC_INTR_NAME_MAX_SZ]; + unsigned int index; + unsigned int vector; + u64 rearm_count; + unsigned int cpu; + cpumask_t affinity_mask; + u32 dim_coal_hw; +}; + +struct ionic_cq { + struct ionic_lif *lif; + struct ionic_cq_info *info; + struct ionic_queue *bound_q; + struct ionic_intr_info *bound_intr; + u16 tail_idx; + bool done_color; + unsigned int num_descs; + unsigned int desc_size; +#ifdef IONIC_DEBUG_STATS + u64 compl_count; +#endif + void *base; + dma_addr_t base_pa; /* must be page aligned */ +} ____cacheline_aligned_in_smp; + +struct ionic_eq_ring { + struct ionic_eq_comp *base; + dma_addr_t base_pa; + + int index; + u8 gen_color; +}; + +struct ionic_eq { + struct ionic *ionic; + struct ionic_eq_ring ring[2]; + struct ionic_intr_info intr; + + int index; + int depth; + + bool is_init; +}; + +#define IONIC_EQ_DEPTH 0x1000 + +struct ionic; + +static inline void ionic_intr_init(struct ionic_dev *idev, + struct ionic_intr_info *intr, + unsigned long index) +{ + ionic_intr_clean(idev->intr_ctrl, index); + intr->index = index; +} + +static inline unsigned int ionic_q_space_avail(struct ionic_queue *q) +{ + unsigned int avail = q->tail_idx; + + if (q->head_idx >= avail) + avail += q->num_descs - q->head_idx - 1; + else + avail -= q->head_idx + 1; + + return avail; +} + +static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want) +{ + return ionic_q_space_avail(q) >= want; +} + +void ionic_init_devinfo(struct ionic *ionic); +int ionic_dev_setup(struct ionic *ionic); +void ionic_dev_teardown(struct ionic *ionic); + +void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd); +u8 ionic_dev_cmd_status(struct ionic_dev *idev); +bool ionic_dev_cmd_done(struct ionic_dev *idev); +void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp); + +void ionic_dev_cmd_identify(struct ionic_dev *idev, u8 ver); +void ionic_dev_cmd_init(struct ionic_dev *idev); +void ionic_dev_cmd_reset(struct ionic_dev *idev); + +void ionic_dev_cmd_port_identify(struct ionic_dev *idev); +void ionic_dev_cmd_port_init(struct ionic_dev *idev); +void ionic_dev_cmd_port_reset(struct ionic_dev *idev); +void ionic_dev_cmd_port_state(struct ionic_dev *idev, u8 state); +void ionic_dev_cmd_port_speed(struct ionic_dev *idev, u32 speed); +void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable); +void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type); +void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type); + +int ionic_set_vf_config(struct ionic *ionic, int vf, + struct ionic_vf_setattr_cmd *vfc); +int ionic_dev_cmd_vf_getattr(struct ionic *ionic, int vf, u8 attr, + struct ionic_vf_getattr_comp *comp); +void ionic_vf_start(struct ionic *ionic, int vf); + +void ionic_dev_cmd_queue_identify(struct ionic_dev *idev, + u16 lif_type, u8 qtype, u8 qver); +void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver); +void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index, + dma_addr_t addr); +void ionic_dev_cmd_lif_reset(struct ionic_dev *idev, u16 lif_index); +void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq, + u16 lif_index, u16 intr_index); + +int ionic_db_page_num(struct ionic_lif *lif, int pid); + +int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order); +void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order); + +int ionic_eqs_alloc(struct ionic *ionic); +void ionic_eqs_free(struct ionic *ionic); +void ionic_eqs_deinit(struct ionic *ionic); +int ionic_eqs_init(struct ionic *ionic); + +int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, + struct ionic_intr_info *intr, + unsigned int num_descs, size_t desc_size); +void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa); +void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q); +typedef bool (*ionic_cq_cb)(struct ionic_cq *cq, struct ionic_cq_info *cq_info); +typedef void (*ionic_cq_done_cb)(void *done_arg); +unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, + ionic_cq_cb cb, ionic_cq_done_cb done_cb, + void *done_arg); + +int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, + struct ionic_queue *q, unsigned int index, const char *name, + unsigned int num_descs, size_t desc_size, + size_t sg_desc_size, unsigned int pid); +void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa); +void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa); +void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa); +void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, + void *cb_arg); +void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start); +void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, + unsigned int stop_index); +int ionic_heartbeat_check(struct ionic *ionic); +bool ionic_is_fw_running(struct ionic_dev *idev); +void ionic_watchdog_cb(struct timer_list *t); +void ionic_watchdog_init(struct ionic *ionic); + +bool ionic_adminq_poke_doorbell(struct ionic_queue *q); +bool ionic_txq_poke_doorbell(struct ionic_queue *q); +bool ionic_rxq_poke_doorbell(struct ionic_queue *q); + +#endif /* _IONIC_DEV_H_ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_devlink.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_devlink.c new file mode 100644 index 0000000000..ac11d68465 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_devlink.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#include +#include + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_devlink.h" + +#ifdef IONIC_DEVLINK +#ifdef HAVE_DEVLINK_UPDATE_PARAMS +static int ionic_dl_flash_update(struct devlink *dl, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +{ + struct ionic *ionic = devlink_priv(dl); + +#ifdef HAVE_DEVLINK_PREFETCH_FW + return ionic_firmware_update(ionic->lif, params->fw); +#else + return ionic_firmware_fetch_and_update(ionic->lif, params->file_name); +#endif +} +#else +static int ionic_dl_flash_update(struct devlink *dl, + const char *fwname, + const char *component, + struct netlink_ext_ack *extack) +{ + struct ionic *ionic = devlink_priv(dl); + + if (component) + return -EOPNOTSUPP; + + return ionic_firmware_fetch_and_update(ionic->lif, fwname); +} +#endif /* HAVE_DEVLINK_UPDATE_PARAMS */ + +static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct ionic *ionic = devlink_priv(dl); + struct ionic_dev *idev = &ionic->idev; + char buf[16]; + int err = 0; + u32 val; + + err = devlink_info_driver_name_put(req, IONIC_DRV_NAME); + if (err) + return err; + + err = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + idev->dev_info.fw_version); + if (err) + return err; + + val = ioread32(&idev->dev_info_regs->fw_heartbeat); + snprintf(buf, sizeof(buf), "0x%x", val); + err = devlink_info_version_running_put(req, "fw.heartbeat", buf); + if (err) + return err; + + val = ioread8(&idev->dev_info_regs->fw_status); + snprintf(buf, sizeof(buf), "0x%x", val); + err = devlink_info_version_running_put(req, "fw.status", buf); + if (err) + return err; + + snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_type); + err = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, + buf); + if (err) + return err; + + snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_rev); + err = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, + buf); + if (err) + return err; + + err = devlink_info_serial_number_put(req, idev->dev_info.serial_num); + + return err; +} + +static const struct devlink_ops ionic_dl_ops = { + .info_get = ionic_dl_info_get, + .flash_update = ionic_dl_flash_update, +}; + +struct ionic *ionic_devlink_alloc(struct device *dev) +{ + struct devlink *dl; + + dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic), dev); + + return devlink_priv(dl); +} + +void ionic_devlink_free(struct ionic *ionic) +{ + struct devlink *dl = priv_to_devlink(ionic); + + devlink_free(dl); +} + +int ionic_devlink_register(struct ionic *ionic) +{ + struct devlink *dl = priv_to_devlink(ionic); + int err; + +#ifdef HAVE_VOID_DEVLINK_REGISTER + err = devlink_port_register(dl, &ionic->dl_port, 0); + if (err) { + dev_err(ionic->dev, "devlink_port_register failed: %d\n", err); + devlink_unregister(dl); + return err; + } + + devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev); + devlink_register(dl); +#else + err = devlink_register(dl, ionic->dev); + if (err) { + dev_warn(ionic->dev, "devlink_register failed: %d\n", err); + return err; + } + + err = devlink_port_register(dl, &ionic->dl_port, 0); + if (err) { + dev_err(ionic->dev, "devlink_port_register failed: %d\n", err); + devlink_unregister(dl); + return err; + } + + devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev); +#endif + return 0; +} + +void ionic_devlink_unregister(struct ionic *ionic) +{ + struct devlink *dl = priv_to_devlink(ionic); + + devlink_port_unregister(&ionic->dl_port); + devlink_unregister(dl); +} +#endif /* IONIC_DEVLINK */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_devlink.h b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_devlink.h new file mode 100644 index 0000000000..9fb8de8c4d --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_devlink.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#ifndef _IONIC_DEVLINK_H_ +#define _IONIC_DEVLINK_H_ + +#include + +#if IS_ENABLED(CONFIG_NET_DEVLINK) +#include +#endif + +int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw); +int ionic_firmware_fetch_and_update(struct ionic_lif *lif, const char *fw_name); + +/* make sure we've got a new-enough devlink support to use dev info */ +#ifdef DEVLINK_INFO_VERSION_GENERIC_BOARD_ID + +#define IONIC_DEVLINK + +struct ionic *ionic_devlink_alloc(struct device *dev); +void ionic_devlink_free(struct ionic *ionic); +int ionic_devlink_register(struct ionic *ionic); +void ionic_devlink_unregister(struct ionic *ionic); +#else +#define ionic_devlink_alloc(dev) devm_kzalloc(dev, sizeof(struct ionic), GFP_KERNEL) +#define ionic_devlink_free(i) devm_kfree(i->dev, i) + +#define ionic_devlink_register(x) 0 +#define ionic_devlink_unregister(x) +#endif + +#if !IS_ENABLED(CONFIG_NET_DEVLINK) +#define priv_to_devlink(i) 0 +#define devlink_flash_update_begin_notify(d) +#define devlink_flash_update_end_notify(d) +#define devlink_flash_update_status_notify(d, s, c, n, t) +#endif /* CONFIG_NET_DEVLINK */ + +#endif /* _IONIC_DEVLINK_H_ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_ethtool.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_ethtool.c new file mode 100644 index 0000000000..67d004ed77 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_ethtool.c @@ -0,0 +1,1355 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#include +#include + +/* Normally we would #include here, but some of the + * older distros don't have that file, and some that do have an + * older version that doesn't include these definitions. + */ +enum { + SFF8024_ID_UNK = 0x00, + SFF8024_ID_SFF_8472 = 0x02, + SFF8024_ID_SFP = 0x03, + SFF8024_ID_DWDM_SFP = 0x0b, + SFF8024_ID_QSFP_8438 = 0x0c, + SFF8024_ID_QSFP_8436_8636 = 0x0d, + SFF8024_ID_QSFP28_8636 = 0x11, +}; + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_ethtool.h" +#include "ionic_stats.h" + +static const char ionic_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IONIC_PRIV_F_RDMA_SNIFFER BIT(0) + "rdma-sniffer", +#define IONIC_PRIV_F_DEVICE_RESET BIT(1) + "device-reset", +#define IONIC_PRIV_F_CMB_RINGS BIT(2) + "cmb-rings", + +#define IONIC_PRIV_F_SW_DBG_STATS BIT(3) +#ifdef IONIC_DEBUG_STATS + "sw-dbg-stats", +#endif +}; + +#define IONIC_PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings) + +static int ionic_validate_cmb_config(struct ionic_lif *lif, + struct ionic_queue_params *qparam) +{ + int pages_have, pages_required = 0; + unsigned long sz; + + if (!qparam->cmb_enabled) + return 0; + + sz = sizeof(struct ionic_txq_desc) * qparam->ntxq_descs * qparam->nxqs; + pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE; + + sz = sizeof(struct ionic_rxq_desc) * qparam->nrxq_descs * qparam->nxqs; + pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE; + + pages_have = lif->ionic->bars[IONIC_PCI_BAR_CMB].len / PAGE_SIZE; + if (pages_required > pages_have) { + netdev_info(lif->netdev, "Not enough CMB pages for number of queues and size of descriptor rings, need %d have %d", + pages_required, pages_have); + return -ENOMEM; + } + + return pages_required; +} + +static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf) +{ + u32 i; + + for (i = 0; i < ionic_num_stats_grps; i++) + ionic_stats_groups[i].get_strings(lif, &buf); +} + +static void ionic_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *buf) +{ + struct ionic_lif *lif = netdev_priv(netdev); + u32 i; + + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return; + + memset(buf, 0, stats->n_stats * sizeof(*buf)); + for (i = 0; i < ionic_num_stats_grps; i++) + ionic_stats_groups[i].get_values(lif, &buf); +} + +static int ionic_get_stats_count(struct ionic_lif *lif) +{ + int i, num_stats = 0; + + for (i = 0; i < ionic_num_stats_grps; i++) + num_stats += ionic_stats_groups[i].get_count(lif); + + return num_stats; +} + +static int ionic_get_sset_count(struct net_device *netdev, int sset) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int count = 0; + + switch (sset) { + case ETH_SS_STATS: + count = ionic_get_stats_count(lif); + break; + case ETH_SS_PRIV_FLAGS: + count = IONIC_PRIV_FLAGS_COUNT; + break; + } + return count; +} + +static void ionic_get_strings(struct net_device *netdev, + u32 sset, u8 *buf) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_STATS: + ionic_get_stats_strings(lif, buf); + break; + case ETH_SS_PRIV_FLAGS: + memcpy(buf, ionic_priv_flags_strings, + IONIC_PRIV_FLAGS_COUNT * ETH_GSTRING_LEN); + break; + } +} + +static void ionic_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + + strscpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, IONIC_DRV_VERSION, sizeof(drvinfo->version)); + strscpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version, + sizeof(drvinfo->fw_version)); + strscpy(drvinfo->bus_info, ionic_bus_info(ionic), + sizeof(drvinfo->bus_info)); +} + +static int ionic_get_regs_len(struct net_device *netdev) +{ + return (IONIC_DEV_INFO_REG_COUNT + IONIC_DEV_CMD_REG_COUNT) * sizeof(u32); +} + +static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct ionic_lif *lif = netdev_priv(netdev); + unsigned int offset; + unsigned int size; + + regs->version = IONIC_DEV_CMD_REG_VERSION; + + offset = 0; + size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32); + memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size); + + offset += size; + size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32); + memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size); +} + +static int ionic_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_dev *idev = &lif->ionic->idev; + int copper_seen = 0; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + + if (!idev->port_info) { + netdev_err(netdev, "port_info not initialized\n"); + return -EOPNOTSUPP; + } + + /* The port_info data is found in a DMA space that the NIC keeps + * up-to-date, so there's no need to request the data from the + * NIC, we already have it in our memory space. + */ + + switch (le16_to_cpu(idev->port_info->status.xcvr.pid)) { + /* Copper */ +#ifdef HAVE_ETHTOOL_100G_BITS + case IONIC_XCVR_PID_QSFP_100G_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseCR4_Full); + copper_seen++; + break; +#endif + case IONIC_XCVR_PID_QSFP_40GBASE_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + copper_seen++; + break; +#ifdef HAVE_ETHTOOL_25G_BITS + case IONIC_XCVR_PID_SFP_25GBASE_CR_S: + case IONIC_XCVR_PID_SFP_25GBASE_CR_L: + case IONIC_XCVR_PID_SFP_25GBASE_CR_N: + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + copper_seen++; + break; +#endif + case IONIC_XCVR_PID_SFP_10GBASE_AOC: + case IONIC_XCVR_PID_SFP_10GBASE_CU: +#ifdef HAVE_ETHTOOL_NEW_10G_BITS + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseCR_Full); +#else + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); +#endif + copper_seen++; + break; + + /* Fibre */ +#ifdef HAVE_ETHTOOL_100G_BITS + case IONIC_XCVR_PID_QSFP_100G_SR4: + case IONIC_XCVR_PID_QSFP_100G_AOC: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseSR4_Full); + break; + case IONIC_XCVR_PID_QSFP_100G_CWDM4: + case IONIC_XCVR_PID_QSFP_100G_PSM4: + case IONIC_XCVR_PID_QSFP_100G_LR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseLR4_ER4_Full); + break; + case IONIC_XCVR_PID_QSFP_100G_ER4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseLR4_ER4_Full); + break; +#endif + case IONIC_XCVR_PID_QSFP_40GBASE_SR4: + case IONIC_XCVR_PID_QSFP_40GBASE_AOC: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); + break; + case IONIC_XCVR_PID_QSFP_40GBASE_LR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + break; +#ifdef HAVE_ETHTOOL_25G_BITS + case IONIC_XCVR_PID_SFP_25GBASE_SR: + case IONIC_XCVR_PID_SFP_25GBASE_AOC: + case IONIC_XCVR_PID_SFP_25GBASE_ACC: + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + break; +#endif +#ifdef HAVE_ETHTOOL_NEW_10G_BITS + case IONIC_XCVR_PID_SFP_10GBASE_SR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + break; + case IONIC_XCVR_PID_SFP_10GBASE_LR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + break; + case IONIC_XCVR_PID_SFP_10GBASE_LRM: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLRM_Full); + break; + case IONIC_XCVR_PID_SFP_10GBASE_ER: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseER_Full); + break; + case IONIC_XCVR_PID_SFP_10GBASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + break; +#else + case IONIC_XCVR_PID_SFP_10GBASE_SR: + case IONIC_XCVR_PID_SFP_10GBASE_LR: + case IONIC_XCVR_PID_SFP_10GBASE_LRM: + case IONIC_XCVR_PID_SFP_10GBASE_ER: + case IONIC_XCVR_PID_SFP_10GBASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + break; +#endif + case IONIC_XCVR_PID_SFP_1000BASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + break; + case IONIC_XCVR_PID_QSFP_100G_ACC: + case IONIC_XCVR_PID_QSFP_40GBASE_ER4: + case IONIC_XCVR_PID_SFP_25GBASE_LR: + case IONIC_XCVR_PID_SFP_25GBASE_ER: + dev_info(lif->ionic->dev, "no decode bits for xcvr type pid=%d / 0x%x\n", + idev->port_info->status.xcvr.pid, + idev->port_info->status.xcvr.pid); + break; + case IONIC_XCVR_PID_UNKNOWN: + /* This means there's no module plugged in */ + if (lif->ionic->is_mgmt_nic) + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + break; + default: + dev_dbg(lif->ionic->dev, "unknown xcvr type pid=%d / 0x%x\n", + idev->port_info->status.xcvr.pid, + idev->port_info->status.xcvr.pid); + break; + } + + bitmap_copy(ks->link_modes.advertising, ks->link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + +#ifdef ETHTOOL_FEC_NONE + if (idev->port_info->status.fec_type == IONIC_PORT_FEC_TYPE_FC) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_BASER); + else if (idev->port_info->status.fec_type == IONIC_PORT_FEC_TYPE_RS) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); +#endif + + if (lif->ionic->is_mgmt_nic) + ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); + else + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + + ethtool_link_ksettings_add_link_mode(ks, supported, Pause); + + if (idev->port_info->status.xcvr.phy == IONIC_PHY_TYPE_COPPER || + copper_seen) + ks->base.port = PORT_DA; + else if (idev->port_info->status.xcvr.phy == IONIC_PHY_TYPE_FIBER) + ks->base.port = PORT_FIBRE; + else if (lif->ionic->is_mgmt_nic) + ks->base.port = PORT_OTHER; + else + ks->base.port = PORT_NONE; + + if (ks->base.port != PORT_NONE) { + ks->base.speed = le32_to_cpu(lif->info->status.link_speed); + + if (le16_to_cpu(lif->info->status.link_status)) + ks->base.duplex = DUPLEX_FULL; + else + ks->base.duplex = DUPLEX_UNKNOWN; + + if (ionic_is_pf(lif->ionic) && !lif->ionic->is_mgmt_nic) { + ethtool_link_ksettings_add_link_mode(ks, supported, + Autoneg); + + if (idev->port_info->config.an_enable) { + ethtool_link_ksettings_add_link_mode(ks, + advertising, + Autoneg); + ks->base.autoneg = AUTONEG_ENABLE; + } + } + } + + return 0; +} + +static int ionic_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_dev *idev = &lif->ionic->idev; + struct ionic *ionic = lif->ionic; + int err = 0; + + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + + /* set autoneg */ + if (ks->base.autoneg != idev->port_info->config.an_enable) { + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_port_autoneg(idev, ks->base.autoneg); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) + return err; + } + + /* set speed */ + if (ks->base.speed != le32_to_cpu(idev->port_info->config.speed)) { + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_port_speed(idev, ks->base.speed); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) + return err; + } + + return 0; +} + +static void ionic_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ionic_lif *lif = netdev_priv(netdev); + u8 pause_type; + + pause->autoneg = 0; + + pause_type = lif->ionic->idev.port_info->config.pause_type; + if (pause_type) { + pause->rx_pause = (pause_type & IONIC_PAUSE_F_RX) ? 1 : 0; + pause->tx_pause = (pause_type & IONIC_PAUSE_F_TX) ? 1 : 0; + } +} + +static int ionic_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + u32 requested_pause; + int err; + + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + + if (pause->autoneg) + return -EOPNOTSUPP; + + /* change both at the same time */ + requested_pause = IONIC_PORT_PAUSE_TYPE_LINK; + if (pause->rx_pause) + requested_pause |= IONIC_PAUSE_F_RX; + if (pause->tx_pause) + requested_pause |= IONIC_PAUSE_F_TX; + + if (requested_pause == lif->ionic->idev.port_info->config.pause_type) + return 0; + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_port_pause(&lif->ionic->idev, requested_pause); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) + return err; + + return 0; +} + +#ifdef ETHTOOL_FEC_NONE +static int ionic_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + switch (lif->ionic->idev.port_info->status.fec_type) { + case IONIC_PORT_FEC_TYPE_NONE: + fec->active_fec = ETHTOOL_FEC_OFF; + break; + case IONIC_PORT_FEC_TYPE_RS: + fec->active_fec = ETHTOOL_FEC_RS; + break; + case IONIC_PORT_FEC_TYPE_FC: + fec->active_fec = ETHTOOL_FEC_BASER; + break; + default: + fec->active_fec = ETHTOOL_FEC_NONE; + break; + } + + switch (lif->ionic->idev.port_info->config.fec_type) { + case IONIC_PORT_FEC_TYPE_NONE: + fec->fec = ETHTOOL_FEC_OFF; + break; + case IONIC_PORT_FEC_TYPE_RS: + fec->fec = ETHTOOL_FEC_RS; + break; + case IONIC_PORT_FEC_TYPE_FC: + fec->fec = ETHTOOL_FEC_BASER; + break; + default: + fec->fec = ETHTOOL_FEC_NONE; + break; + } + + return 0; +} + +static int ionic_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct ionic_lif *lif = netdev_priv(netdev); + u8 fec_type; + int ret = 0; + + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + + if (lif->ionic->idev.port_info->config.an_enable) { + netdev_err(netdev, "FEC request not allowed while autoneg is enabled\n"); + return -EINVAL; + } + + switch (fec->fec) { + case ETHTOOL_FEC_NONE: + fec_type = IONIC_PORT_FEC_TYPE_NONE; + break; + case ETHTOOL_FEC_OFF: + fec_type = IONIC_PORT_FEC_TYPE_NONE; + break; + case ETHTOOL_FEC_RS: + fec_type = IONIC_PORT_FEC_TYPE_RS; + break; + case ETHTOOL_FEC_BASER: + fec_type = IONIC_PORT_FEC_TYPE_FC; + break; + case ETHTOOL_FEC_AUTO: + default: + netdev_err(netdev, "FEC request 0x%04x not supported\n", + fec->fec); + return -EINVAL; + } + + if (fec_type != lif->ionic->idev.port_info->config.fec_type) { + mutex_lock(&lif->ionic->dev_cmd_lock); + ionic_dev_cmd_port_fec(&lif->ionic->idev, fec_type); + ret = ionic_dev_cmd_wait(lif->ionic, devcmd_timeout); + mutex_unlock(&lif->ionic->dev_cmd_lock); + } + + return ret; +} + +#endif /* ETHTOOL_FEC_NONE */ +#ifdef HAVE_COALESCE_EXTACK +static int ionic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else +static int ionic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce) +#endif +{ + struct ionic_lif *lif = netdev_priv(netdev); + + coalesce->tx_coalesce_usecs = lif->tx_coalesce_usecs; + coalesce->rx_coalesce_usecs = lif->rx_coalesce_usecs; + + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + coalesce->use_adaptive_tx_coalesce = test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state); + else + coalesce->use_adaptive_tx_coalesce = 0; + + coalesce->use_adaptive_rx_coalesce = test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state); + + return 0; +} + +#ifdef HAVE_COALESCE_EXTACK +static int ionic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else +static int ionic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce) +#endif +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_identity *ident; + u32 rx_coal, rx_dim; + u32 tx_coal, tx_dim; + unsigned int i; + + if (coalesce->rx_max_coalesced_frames || + coalesce->rx_coalesce_usecs_irq || + coalesce->rx_max_coalesced_frames_irq || + coalesce->tx_max_coalesced_frames || + coalesce->tx_coalesce_usecs_irq || + coalesce->tx_max_coalesced_frames_irq || + coalesce->stats_block_coalesce_usecs || + coalesce->pkt_rate_low || + coalesce->rx_coalesce_usecs_low || + coalesce->rx_max_coalesced_frames_low || + coalesce->tx_coalesce_usecs_low || + coalesce->tx_max_coalesced_frames_low || + coalesce->pkt_rate_high || + coalesce->rx_coalesce_usecs_high || + coalesce->rx_max_coalesced_frames_high || + coalesce->tx_coalesce_usecs_high || + coalesce->tx_max_coalesced_frames_high || + coalesce->rate_sample_interval) + return -EINVAL; + + if (lif->ionic->neth_eqs && + (coalesce->use_adaptive_rx_coalesce || + coalesce->use_adaptive_tx_coalesce)) { + return -EINVAL; + } + + ident = &lif->ionic->ident; + if (ident->dev.intr_coal_div == 0) { + netdev_warn(netdev, "bad HW value in dev.intr_coal_div = %d\n", + ident->dev.intr_coal_div); + return -EIO; + } + + /* Tx normally shares Rx interrupt, so only change Rx if not split */ + if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) && + (coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs || + coalesce->use_adaptive_tx_coalesce)) { + netdev_warn(netdev, "only rx parameters can be changed\n"); + return -EINVAL; + } + + /* Convert the usec request to a HW usable value. If they asked + * for non-zero and it resolved to zero, bump it up + */ + rx_coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs); + if (!rx_coal && coalesce->rx_coalesce_usecs) + rx_coal = 1; + tx_coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->tx_coalesce_usecs); + if (!tx_coal && coalesce->tx_coalesce_usecs) + tx_coal = 1; + + if (rx_coal > IONIC_INTR_CTRL_COAL_MAX || + tx_coal > IONIC_INTR_CTRL_COAL_MAX) + return -ERANGE; + + /* Save the new values */ + lif->rx_coalesce_usecs = coalesce->rx_coalesce_usecs; + lif->rx_coalesce_hw = rx_coal; + + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + lif->tx_coalesce_usecs = coalesce->tx_coalesce_usecs; + else + lif->tx_coalesce_usecs = coalesce->rx_coalesce_usecs; + lif->tx_coalesce_hw = tx_coal; + + if (coalesce->use_adaptive_rx_coalesce) { + set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state); + rx_dim = rx_coal; + } else { + clear_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state); + rx_dim = 0; + } + + if (coalesce->use_adaptive_tx_coalesce) { + set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state); + tx_dim = tx_coal; + } else { + clear_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state); + tx_dim = 0; + } + + if (test_bit(IONIC_LIF_F_UP, lif->state)) { + for (i = 0; i < lif->nxqs; i++) { + if (lif->rxqcqs[i]->flags & IONIC_QCQ_F_INTR) { + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + lif->rxqcqs[i]->intr.index, + lif->rx_coalesce_hw); + lif->rxqcqs[i]->intr.dim_coal_hw = rx_dim; + } + + if (lif->txqcqs[i]->flags & IONIC_QCQ_F_INTR) { + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + lif->txqcqs[i]->intr.index, + lif->tx_coalesce_hw); + lif->txqcqs[i]->intr.dim_coal_hw = tx_dim; + } + } + } + + return 0; +} + +#ifdef HAVE_RINGPARAM_EXTACK +static void ionic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +#else +static void ionic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif +{ + struct ionic_lif *lif = netdev_priv(netdev); + + ring->tx_max_pending = IONIC_MAX_TX_DESC; + ring->tx_pending = lif->ntxq_descs; + ring->rx_max_pending = IONIC_MAX_RX_DESC; + ring->rx_pending = lif->nrxq_descs; +} + +#ifdef HAVE_RINGPARAM_EXTACK +static int ionic_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +#else +static int ionic_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_queue_params qparam; + int err; + + ionic_init_queue_params(lif, &qparam); + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) { + netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n"); + return -EINVAL; + } + + if (!is_power_of_2(ring->tx_pending) || + !is_power_of_2(ring->rx_pending)) { + netdev_info(netdev, "Descriptor count must be a power of 2\n"); + return -EINVAL; + } + + if (ring->tx_pending > IONIC_MAX_TX_DESC || + ring->tx_pending < IONIC_MIN_TXRX_DESC) { + netdev_info(netdev, "Tx descriptor count must be in the range [%d-%d]\n", + IONIC_MIN_TXRX_DESC, IONIC_MAX_TX_DESC); + return -EINVAL; + } + + if (ring->rx_pending > IONIC_MAX_RX_DESC || + ring->rx_pending < IONIC_MIN_TXRX_DESC) { + netdev_info(netdev, "Rx descriptor count must be in the range [%d-%d]\n", + IONIC_MIN_TXRX_DESC, IONIC_MAX_RX_DESC); + return -EINVAL; + } + + /* if nothing to do return success */ + if (ring->tx_pending == lif->ntxq_descs && + ring->rx_pending == lif->nrxq_descs) + return 0; + + qparam.ntxq_descs = ring->tx_pending; + qparam.nrxq_descs = ring->rx_pending; + + err = ionic_validate_cmb_config(lif, &qparam); + if (err < 0) + return err; + + if (ring->tx_pending != lif->ntxq_descs) + netdev_info(netdev, "Changing Tx ring size from %d to %d\n", + lif->ntxq_descs, ring->tx_pending); + + if (ring->rx_pending != lif->nrxq_descs) + netdev_info(netdev, "Changing Rx ring size from %d to %d\n", + lif->nrxq_descs, ring->rx_pending); + + /* if we're not running, just set the values and return */ + if (!netif_running(lif->netdev)) { + lif->ntxq_descs = ring->tx_pending; + lif->nrxq_descs = ring->rx_pending; + return 0; + } + + mutex_lock(&lif->queue_lock); + err = ionic_reconfigure_queues(lif, &qparam); + mutex_unlock(&lif->queue_lock); + if (err) + netdev_info(netdev, "Ring reconfiguration failed, changes canceled: %d\n", err); + + return err; +} + +static void ionic_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + /* report maximum channels */ + ch->max_combined = lif->ionic->ntxqs_per_lif; + if (!ionic_use_eqs(lif)) { + ch->max_rx = lif->ionic->ntxqs_per_lif / 2; + ch->max_tx = lif->ionic->ntxqs_per_lif / 2; + } + + /* report current channels */ + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { + ch->rx_count = lif->nxqs; + ch->tx_count = lif->nxqs; + } else { + ch->combined_count = lif->nxqs; + } +} + +static int ionic_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_queue_params qparam; + int max_cnt; + int err; + + ionic_init_queue_params(lif, &qparam); + + /* Valid cases + * Combined (default): + * rx_count == tx_count: 0 + * combined_count: 1..lif->ionic->ntxqs_per_lif + * other_count: 0 + * Split: + * rx_count == tx_count: 1..lif->ionic->ntxqs_per_lif / 2 + * combined_count: 0 + * other_count: 0 + */ + if (ch->other_count) { + netdev_info(netdev, "We don't use other queues\n"); + return -EINVAL; + } + + if (ch->rx_count != ch->tx_count) { + netdev_info(netdev, "The rx and tx count must be equal\n"); + return -EINVAL; + } + + if (ionic_use_eqs(lif) && ch->rx_count) { + netdev_info(netdev, "Separate rx and tx count not available when using EventQueues\n"); + return -EINVAL; + } + + if (ch->combined_count && ch->rx_count) { + netdev_info(netdev, "Use either combined or rx and tx, not both\n"); + return -EINVAL; + } + + max_cnt = lif->ionic->ntxqs_per_lif; + if (ch->combined_count) { + if (ch->combined_count > max_cnt) + return -EINVAL; + + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + netdev_info(lif->netdev, "Sharing queue interrupts\n"); + else if (ch->combined_count == lif->nxqs) + return 0; + + if (lif->nxqs != ch->combined_count) + netdev_info(netdev, "Changing queue count from %d to %d\n", + lif->nxqs, ch->combined_count); + + qparam.nxqs = ch->combined_count; + qparam.intr_split = false; + } else { + max_cnt /= 2; + if (ch->rx_count > max_cnt) + return -EINVAL; + + if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + netdev_info(lif->netdev, "Splitting queue interrupts\n"); + else if (ch->rx_count == lif->nxqs) + return 0; + + if (lif->nxqs != ch->rx_count) + netdev_info(netdev, "Changing queue count from %d to %d\n", + lif->nxqs, ch->rx_count); + + qparam.nxqs = ch->rx_count; + qparam.intr_split = true; + } + + err = ionic_validate_cmb_config(lif, &qparam); + if (err < 0) + return err; + + /* if we're not running, just set the values and return */ + if (!netif_running(lif->netdev)) { + lif->nxqs = qparam.nxqs; + + if (qparam.intr_split) { + set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); + } else { + clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); + lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; + lif->tx_coalesce_hw = lif->rx_coalesce_hw; + } + return 0; + } + + mutex_lock(&lif->queue_lock); + err = ionic_reconfigure_queues(lif, &qparam); + mutex_unlock(&lif->queue_lock); + if (err) + netdev_info(netdev, "Queue reconfiguration failed, changes canceled: %d\n", err); + + return err; +} + +int ionic_cmb_pages_in_use(struct ionic_lif *lif) +{ + struct ionic_queue_params qparam; + + ionic_init_queue_params(lif, &qparam); + return ionic_validate_cmb_config(lif, &qparam); +} + +static int ionic_cmb_rings_toggle(struct ionic_lif *lif, bool cmb_enable) +{ + struct ionic_queue_params qparam; + int pages_used; + + if (!(lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_CMB) || + !(lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_CMB) || + !lif->ionic->idev.cmb_npages) { + netdev_info(lif->netdev, "CMB rings are not supported on this device\n"); + return -EOPNOTSUPP; + } + + if (netif_running(lif->netdev)) + return -EBUSY; + + ionic_init_queue_params(lif, &qparam); + qparam.cmb_enabled = cmb_enable; + pages_used = ionic_validate_cmb_config(lif, &qparam); + if (pages_used < 0) + return pages_used; + + if (cmb_enable) { + netdev_info(lif->netdev, "Enabling CMB rings - %d pages\n", + pages_used); + set_bit(IONIC_LIF_F_CMB_RINGS, lif->state); + } else { + netdev_info(lif->netdev, "Disabling CMB rings\n"); + clear_bit(IONIC_LIF_F_CMB_RINGS, lif->state); + } + + /* We are currently restricting CMB mode enable/disable to + * only when the driver is DOWN, in order to keep reconfig + * thrash to a minimum and to keep the reconfig code simpler. + * In the future when we relax this requirement we can call + * ionic_reconfigure_queues() here. + */ + + return 0; +} + +static u32 ionic_get_priv_flags(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + u32 priv_flags = 0; + + if (test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) + priv_flags |= IONIC_PRIV_F_SW_DBG_STATS; + + if (test_bit(IONIC_LIF_F_RDMA_SNIFFER, lif->state)) + priv_flags |= IONIC_PRIV_F_RDMA_SNIFFER; + + if (test_bit(IONIC_LIF_F_CMB_RINGS, lif->state)) + priv_flags |= IONIC_PRIV_F_CMB_RINGS; + + return priv_flags; +} + +static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct ionic_lif *lif = netdev_priv(netdev); + bool cmb_now, cmb_req; + int rdma; + int ret; + + if (priv_flags & IONIC_PRIV_F_DEVICE_RESET) + ionic_device_reset(lif); + + clear_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state); + if (priv_flags & IONIC_PRIV_F_SW_DBG_STATS) + set_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state); + + rdma = test_bit(IONIC_LIF_F_RDMA_SNIFFER, lif->state); + clear_bit(IONIC_LIF_F_RDMA_SNIFFER, lif->state); + if (priv_flags & IONIC_PRIV_F_RDMA_SNIFFER) + set_bit(IONIC_LIF_F_RDMA_SNIFFER, lif->state); + + if (rdma != test_bit(IONIC_LIF_F_RDMA_SNIFFER, lif->state)) + ionic_lif_rx_mode(lif); + + cmb_now = test_bit(IONIC_LIF_F_CMB_RINGS, lif->state); + cmb_req = !!(priv_flags & IONIC_PRIV_F_CMB_RINGS); + if (cmb_now != cmb_req) { + ret = ionic_cmb_rings_toggle(lif, cmb_req); + if (ret < 0) + return ret; + } + + return 0; +} + +static int ionic_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *info, u32 *rules) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int err = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = lif->nxqs; + break; + default: + netdev_err(netdev, "Command parameter %d is not supported\n", + info->cmd); + err = -EOPNOTSUPP; + } + + return err; +} + +static u32 ionic_get_rxfh_indir_size(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + return le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); +} + +static u32 ionic_get_rxfh_key_size(struct net_device *netdev) +{ + return IONIC_RSS_HASH_KEY_SIZE; +} + +#ifdef HAVE_RXFH_HASHFUNC +static int ionic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +#else +static int ionic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#endif +{ + struct ionic_lif *lif = netdev_priv(netdev); + unsigned int i, tbl_sz; + + if (indir) { + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); + for (i = 0; i < tbl_sz; i++) + indir[i] = lif->rss_ind_tbl[i]; + } + + if (key) + memcpy(key, lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; +#endif + + return 0; +} + +#ifdef HAVE_RXFH_HASHFUNC +static int ionic_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +#else +static int ionic_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key) +#endif +{ + struct ionic_lif *lif = netdev_priv(netdev); + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; +#endif + + return ionic_lif_rss_config(lif, lif->rss_types, key, indir); +} + +static int ionic_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct ionic_lif *lif = netdev_priv(dev); + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + lif->rx_copybreak = *(u32 *)data; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int ionic_get_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, void *data) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = lif->rx_copybreak; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int ionic_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) + +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_dev *idev = &lif->ionic->idev; + struct ionic_xcvr_status *xcvr; + + xcvr = &idev->port_info->status.xcvr; + + /* report the module data type and length */ + switch (xcvr->sprom[0]) { + case SFF8024_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + break; + case SFF8024_ID_QSFP_8436_8636: + case SFF8024_ID_QSFP28_8636: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case SFF8024_ID_UNK: + if (lif->ionic->is_mgmt_nic) + netdev_info(netdev, "no xcvr on mgmt nic\n"); + else + netdev_info(netdev, "no xcvr connected? type 0x%02x\n", + xcvr->sprom[0]); + return -EINVAL; + default: + netdev_info(netdev, "unknown xcvr type 0x%02x\n", + xcvr->sprom[0]); + modinfo->type = 0; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + break; + } + + return 0; +} + +static int ionic_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_dev *idev = &lif->ionic->idev; + struct ionic_xcvr_status *xcvr; + char tbuf[sizeof(xcvr->sprom)]; + int count = 10; + u32 len; + + /* The NIC keeps the module prom up-to-date in the DMA space + * so we can simply copy the module bytes into the data buffer. + */ + xcvr = &idev->port_info->status.xcvr; + len = min_t(u32, sizeof(xcvr->sprom), ee->len); + + do { + memcpy(data, xcvr->sprom, len); + memcpy(tbuf, xcvr->sprom, len); + + /* Let's make sure we got a consistent copy */ + if (!memcmp(data, tbuf, len)) + break; + + } while (--count); + + if (!count) + return -ETIMEDOUT; + + return 0; +} + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +static int ionic_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + __le64 mask; + + if (!lif->phc || !lif->phc->ptp) + return ethtool_op_get_ts_info(netdev, info); + + info->phc_index = ptp_clock_index(lif->phc->ptp); + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + /* tx modes */ + + info->tx_types = BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + mask = cpu_to_le64(BIT_ULL(IONIC_TXSTAMP_ONESTEP_SYNC)); + if (ionic->ident.lif.eth.hwstamp_tx_modes & mask) + info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC); + +#ifdef HAVE_HWSTAMP_TX_ONESTEP_P2P + mask = cpu_to_le64(BIT_ULL(IONIC_TXSTAMP_ONESTEP_P2P)); + if (ionic->ident.lif.eth.hwstamp_tx_modes & mask) + info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_P2P); +#endif + + /* rx filters */ + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); + +#ifdef HAVE_HWTSTAMP_FILTER_NTP_ALL + mask = cpu_to_le64(IONIC_PKT_CLS_NTP_ALL); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_NTP_ALL); +#endif + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP1_SYNC); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP1_DREQ); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP1_ALL); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L4_SYNC); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L4_DREQ); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L4_ALL); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L2_SYNC); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L2_DREQ); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L2_ALL); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_SYNC); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_SYNC); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_DREQ); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_ALL); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; +} +#endif + +static int ionic_nway_reset(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + int err = 0; + + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + + /* flap the link to force auto-negotiation */ + + mutex_lock(&ionic->dev_cmd_lock); + + ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_DOWN); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + + if (!err) { + ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_UP); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + } + + mutex_unlock(&ionic->dev_cmd_lock); + + return err; +} + +static int ionic_flash_device(struct net_device *netdev, + struct ethtool_flash *eflash) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + if (eflash->region) + return -EOPNOTSUPP; + + return ionic_firmware_fetch_and_update(lif, eflash->data); +} + +static const struct ethtool_ops ionic_ethtool_ops = { +#ifdef ETHTOOL_COALESCE_USECS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX | + ETHTOOL_COALESCE_USE_ADAPTIVE_TX, +#endif + .get_drvinfo = ionic_get_drvinfo, + .get_regs_len = ionic_get_regs_len, + .get_regs = ionic_get_regs, + .get_link = ethtool_op_get_link, + .get_link_ksettings = ionic_get_link_ksettings, + .set_link_ksettings = ionic_set_link_ksettings, + .get_coalesce = ionic_get_coalesce, + .set_coalesce = ionic_set_coalesce, + .get_ringparam = ionic_get_ringparam, + .set_ringparam = ionic_set_ringparam, + .get_channels = ionic_get_channels, + .set_channels = ionic_set_channels, + .get_strings = ionic_get_strings, + .get_ethtool_stats = ionic_get_stats, + .get_sset_count = ionic_get_sset_count, + .get_priv_flags = ionic_get_priv_flags, + .set_priv_flags = ionic_set_priv_flags, + .get_rxnfc = ionic_get_rxnfc, + .get_rxfh_indir_size = ionic_get_rxfh_indir_size, + .get_rxfh_key_size = ionic_get_rxfh_key_size, + .get_rxfh = ionic_get_rxfh, + .set_rxfh = ionic_set_rxfh, + .get_tunable = ionic_get_tunable, + .set_tunable = ionic_set_tunable, + .get_module_info = ionic_get_module_info, + .get_module_eeprom = ionic_get_module_eeprom, + .get_pauseparam = ionic_get_pauseparam, + .set_pauseparam = ionic_set_pauseparam, +#ifdef ETHTOOL_FEC_NONE + .get_fecparam = ionic_get_fecparam, + .set_fecparam = ionic_set_fecparam, +#endif +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) + .get_ts_info = ionic_get_ts_info, +#endif + .nway_reset = ionic_nway_reset, + .flash_device = ionic_flash_device, +}; + +void ionic_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ionic_ethtool_ops; +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_ethtool.h b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_ethtool.h new file mode 100644 index 0000000000..2c371f1f4d --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_ethtool.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#ifndef _IONIC_ETHTOOL_H_ +#define _IONIC_ETHTOOL_H_ + +int ionic_cmb_pages_in_use(struct ionic_lif *lif); +void ionic_ethtool_set_ops(struct net_device *netdev); + +#endif /* _IONIC_ETHTOOL_H_ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_fw.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_fw.c new file mode 100644 index 0000000000..88702a98c5 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_fw.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2022 Pensando Systems, Inc */ + +#include +#include +#include + +#include "ionic.h" +#include "ionic_dev.h" +#include "ionic_lif.h" + +/* The worst case wait for the install activity is about 25 minutes when + * installing a new CPLD, which is very seldom. Normal is about 30-35 + * seconds. Since the driver can't tell if a CPLD update will happen we + * set the timeout for the ugly case. + */ +#define IONIC_FW_INSTALL_TIMEOUT (25 * 60) +#define IONIC_FW_ACTIVATE_TIMEOUT 30 + +/* Number of periodic log updates during fw file download */ +#define IONIC_FW_INTERVAL_FRACTION 32 + +static void ionic_dev_cmd_firmware_download(struct ionic_dev *idev, u64 addr, + u32 offset, u32 length) +{ + union ionic_dev_cmd cmd = { + .fw_download.opcode = IONIC_CMD_FW_DOWNLOAD, + .fw_download.offset = cpu_to_le32(offset), + .fw_download.addr = cpu_to_le64(addr), + .fw_download.length = cpu_to_le32(length), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +static void ionic_dev_cmd_firmware_install(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .fw_control.opcode = IONIC_CMD_FW_CONTROL, + .fw_control.oper = IONIC_FW_INSTALL_ASYNC + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +static void ionic_dev_cmd_firmware_install_status(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .fw_control.opcode = IONIC_CMD_FW_CONTROL, + .fw_control.oper = IONIC_FW_INSTALL_STATUS + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +static void ionic_dev_cmd_firmware_activate(struct ionic_dev *idev, u8 slot) +{ + union ionic_dev_cmd cmd = { + .fw_control.opcode = IONIC_CMD_FW_CONTROL, + .fw_control.oper = IONIC_FW_ACTIVATE_ASYNC, + .fw_control.slot = slot + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +static void ionic_dev_cmd_firmware_activate_status(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .fw_control.opcode = IONIC_CMD_FW_CONTROL, + .fw_control.oper = IONIC_FW_ACTIVATE_STATUS, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw) +{ + struct ionic_dev *idev = &lif->ionic->idev; + struct net_device *netdev = lif->netdev; + struct ionic *ionic = lif->ionic; + union ionic_dev_cmd_comp comp; + u32 buf_sz, copy_sz, offset; + struct devlink *dl; + int next_interval; + int err = 0; + u8 fw_slot; + + dl = priv_to_devlink(ionic); + devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0); + + buf_sz = sizeof(idev->dev_cmd_regs->data); + + netdev_dbg(netdev, + "downloading firmware - size %d part_sz %d nparts %lu\n", + (int)fw->size, buf_sz, DIV_ROUND_UP(fw->size, buf_sz)); + + devlink_flash_update_status_notify(dl, "Downloading", NULL, 0, fw->size); + offset = 0; + next_interval = fw->size / IONIC_FW_INTERVAL_FRACTION; + while (offset < fw->size) { + copy_sz = min_t(unsigned int, buf_sz, fw->size - offset); + mutex_lock(&ionic->dev_cmd_lock); + memcpy_toio(&idev->dev_cmd_regs->data, fw->data + offset, copy_sz); + ionic_dev_cmd_firmware_download(idev, + offsetof(union ionic_dev_cmd_regs, data), + offset, copy_sz); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) { + netdev_err(netdev, + "download failed offset 0x%x addr 0x%lx len 0x%x\n", + offset, offsetof(union ionic_dev_cmd_regs, data), + copy_sz); + goto err_out; + } + offset += copy_sz; + + if (offset > next_interval) { + devlink_flash_update_status_notify(dl, "Downloading", + NULL, offset, fw->size); + next_interval = offset + (fw->size / IONIC_FW_INTERVAL_FRACTION); + } + } + devlink_flash_update_status_notify(dl, "Downloading", NULL, 1, 1); + + netdev_info(netdev, "installing firmware\n"); + devlink_flash_update_status_notify(dl, "Installing", NULL, 0, 2); + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_firmware_install(idev); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); + fw_slot = comp.fw_control.slot; + mutex_unlock(&ionic->dev_cmd_lock); + if (err) { + netdev_err(netdev, "failed to start firmware install\n"); + goto err_out; + } + + devlink_flash_update_status_notify(dl, "Installing", NULL, 1, 2); + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_firmware_install_status(idev); + err = ionic_dev_cmd_wait(ionic, IONIC_FW_INSTALL_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) { + netdev_err(netdev, "firmware install failed\n"); + goto err_out; + } + devlink_flash_update_status_notify(dl, "Installing", NULL, 2, 2); + + netdev_info(netdev, "selecting firmware\n"); + devlink_flash_update_status_notify(dl, "Selecting", NULL, 0, 2); + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_firmware_activate(idev, fw_slot); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) { + netdev_err(netdev, "failed to start firmware select\n"); + goto err_out; + } + + devlink_flash_update_status_notify(dl, "Selecting", NULL, 1, 2); + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_firmware_activate_status(idev); + err = ionic_dev_cmd_wait(ionic, IONIC_FW_ACTIVATE_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) { + netdev_err(netdev, "firmware select failed\n"); + goto err_out; + } + devlink_flash_update_status_notify(dl, "Selecting", NULL, 2, 2); + + netdev_info(netdev, "Firmware update completed\n"); + +err_out: + if (err) + devlink_flash_update_status_notify(dl, "Flash failed", NULL, 0, 0); + return err; +} + +int ionic_firmware_fetch_and_update(struct ionic_lif *lif, const char *fw_name) +{ + const struct firmware *fw; + struct devlink *dl; + int err; + + netdev_info(lif->netdev, "Installing firmware %s\n", fw_name); + + dl = priv_to_devlink(lif->ionic); + devlink_flash_update_begin_notify(dl); + + err = request_firmware(&fw, fw_name, lif->ionic->dev); + if (err) + goto err_out; + + err = ionic_firmware_update(lif, fw); + +err_out: + devlink_flash_update_end_notify(dl); + release_firmware(fw); + + return err; +} + diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_lif.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_lif.c new file mode 100644 index 0000000000..021cc604f9 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_lif.c @@ -0,0 +1,4368 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_dev.h" +#include "ionic_lif.h" +#include "ionic_txrx.h" +#include "ionic_ethtool.h" +#include "ionic_debugfs.h" + +/* queuetype support level */ +static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { + [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ + [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ + [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support + * 1 = ... with EQ + * 2 = ... with CMB rings + */ + [IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support + * 1 = ... with Tx SG version 1 + * 2 = ... with EQ + * 3 = ... with CMB rings + */ +}; + +static void ionic_link_status_check(struct ionic_lif *lif); +static void ionic_lif_handle_fw_down(struct ionic_lif *lif); +static void ionic_lif_handle_fw_up(struct ionic_lif *lif); + +static void ionic_txrx_deinit(struct ionic_lif *lif); +static int ionic_txrx_init(struct ionic_lif *lif); +static int ionic_start_queues(struct ionic_lif *lif); +static int ionic_open(struct net_device *netdev); +static void ionic_stop_queues(struct ionic_lif *lif); +static int ionic_stop(struct net_device *netdev); +static void ionic_lif_queue_identify(struct ionic_lif *lif); +static void ionic_lif_set_netdev_info(struct ionic_lif *lif); + +static void ionic_dim_work(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct dim_cq_moder cur_moder; + struct ionic_qcq *qcq; + u32 new_coal; + + cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + qcq = container_of(dim, struct ionic_qcq, dim); + new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec); + new_coal = new_coal ? new_coal : 1; + + if (qcq->intr.dim_coal_hw != new_coal) { + unsigned int qi = qcq->cq.bound_q->index; + struct ionic_lif *lif = qcq->q.lif; + + qcq->intr.dim_coal_hw = new_coal; + + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + lif->rxqcqs[qi]->intr.index, + qcq->intr.dim_coal_hw); + } + + dim->state = DIM_START_MEASURE; +} + +static void ionic_lif_deferred_work(struct work_struct *work) +{ + struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); + struct ionic_deferred *def = &lif->deferred; + struct ionic_deferred_work *w = NULL; + + do { + spin_lock_bh(&def->lock); + if (!list_empty(&def->list)) { + w = list_first_entry(&def->list, + struct ionic_deferred_work, list); + list_del(&w->list); + } + spin_unlock_bh(&def->lock); + + if (!w) + break; + + switch (w->type) { + case IONIC_DW_TYPE_RX_MODE: + ionic_lif_rx_mode(lif); + break; + case IONIC_DW_TYPE_LINK_STATUS: + ionic_link_status_check(lif); + break; + case IONIC_DW_TYPE_LIF_RESET: + if (w->fw_status) { + ionic_lif_handle_fw_up(lif); + } else { + ionic_lif_handle_fw_down(lif); + + /* Fire off another watchdog to see + * if the FW is already back rather than + * waiting another whole cycle + */ + mod_timer(&lif->ionic->watchdog_timer, jiffies + 1); + } + break; + default: + break; + } + kfree(w); + w = NULL; + } while (true); +} + +void ionic_lif_deferred_enqueue(struct ionic_deferred *def, + struct ionic_deferred_work *work) +{ + spin_lock_bh(&def->lock); + list_add_tail(&work->list, &def->list); + spin_unlock_bh(&def->lock); + schedule_work(&def->work); +} + +static void ionic_link_status_check(struct ionic_lif *lif) +{ + struct net_device *netdev = lif->netdev; + u16 link_status; + bool link_up; + + /* If we're here but the bit is not set, then another thread + * got here before we did and this check is unnecessary. + */ + if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) + return; + + /* Don't put carrier back up if we're in a broken state */ + if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) { + clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); + return; + } + + link_status = le16_to_cpu(lif->info->status.link_status); + link_up = link_status == IONIC_PORT_OPER_STATUS_UP; + + if (link_up) { + int err = 0; + + if (netdev->flags & IFF_UP && netif_running(netdev)) { + mutex_lock(&lif->queue_lock); + err = ionic_start_queues(lif); + if (err && err != -EBUSY) { + netdev_err(netdev, + "Failed to start queues: %d\n", err); + set_bit(IONIC_LIF_F_BROKEN, lif->state); + netif_carrier_off(netdev); + } + mutex_unlock(&lif->queue_lock); + } + + if (!err && !netif_carrier_ok(netdev)) { + netdev_info(netdev, "Link up - %d Gbps\n", + le32_to_cpu(lif->info->status.link_speed) / 1000); + netif_carrier_on(netdev); + } + } else { + if (netif_carrier_ok(netdev)) { + netdev_info(netdev, "Link down\n"); + netif_carrier_off(netdev); + } + + if (netdev->flags & IFF_UP && netif_running(netdev)) { + mutex_lock(&lif->queue_lock); + ionic_stop_queues(lif); + mutex_unlock(&lif->queue_lock); + } + } + + clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); +} + +void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep) +{ + struct ionic_deferred_work *work; + + /* we only need one request outstanding at a time */ + if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) + return; + + if (!can_sleep) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); + return; + } + + work->type = IONIC_DW_TYPE_LINK_STATUS; + ionic_lif_deferred_enqueue(&lif->deferred, work); + } else { + ionic_link_status_check(lif); + } +} + +static void ionic_napi_deadline(struct timer_list *timer) +{ + struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline); + + napi_schedule(&qcq->napi); +} + +static irqreturn_t ionic_napi_isr(int irq, void *data) +{ + struct napi_struct *napi = data; + + napi_schedule_irqoff(napi); + + return IRQ_HANDLED; +} + +static int ionic_request_napi_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct ionic_intr_info *intr = &qcq->intr; + struct device *dev = lif->ionic->dev; + struct ionic_queue *q = &qcq->q; + const char *name; + + if (lif->registered) + name = lif->netdev->name; + else + name = dev_name(dev); + + snprintf(intr->name, sizeof(intr->name), + "%s-%s-%s", IONIC_DRV_NAME, name, q->name); + + return devm_request_irq(dev, intr->vector, ionic_napi_isr, + 0, intr->name, &qcq->napi); +} + +int ionic_intr_alloc(struct ionic *ionic, struct ionic_intr_info *intr) +{ + int index; + + index = find_first_zero_bit(ionic->intrs, ionic->nintrs); + if (index == ionic->nintrs) { + dev_warn(ionic->dev, "%s: no intr, index=%d nintrs=%d\n", + __func__, index, ionic->nintrs); + return -ENOSPC; + } + + set_bit(index, ionic->intrs); + ionic_intr_init(&ionic->idev, intr, index); + + return 0; +} + +void ionic_intr_free(struct ionic *ionic, int index) +{ + if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs) + clear_bit(index, ionic->intrs); +} + +static int ionic_qcq_enable(struct ionic_qcq *qcq) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_lif *lif = q->lif; + struct ionic_dev *idev; + struct device *dev; + + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.q_control = { + .opcode = IONIC_CMD_Q_CONTROL, + .lif_index = cpu_to_le16(lif->index), + .type = q->type, + .index = cpu_to_le32(q->index), + .oper = IONIC_Q_ENABLE, + }, + }; + int ret; + + idev = &lif->ionic->idev; + dev = lif->ionic->dev; + + dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", + ctx.cmd.q_control.index, ctx.cmd.q_control.type); + + if (qcq->flags & IONIC_QCQ_F_INTR) + ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); + + ret = ionic_adminq_post_wait(lif, &ctx); + if (ret) + return ret; + + if (qcq->napi.poll) + napi_enable(&qcq->napi); + + if (lif->ionic->neth_eqs) { + qcq->armed = true; + ionic_dbell_ring(lif->kern_dbpage, + qcq->q.hw_type, + IONIC_DBELL_RING_1 | + IONIC_DBELL_QID(qcq->q.hw_index) | + qcq->cq.tail_idx); + } else if (qcq->flags & IONIC_QCQ_F_INTR) { + irq_set_affinity_hint(qcq->intr.vector, + &qcq->intr.affinity_mask); + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_CLEAR); + } + + return 0; +} + +static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err) +{ + struct ionic_queue *q; + + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.q_control = { + .opcode = IONIC_CMD_Q_CONTROL, + .oper = IONIC_Q_DISABLE, + }, + }; + + if (!qcq) { + netdev_err(lif->netdev, "%s: bad qcq\n", __func__); + return -ENXIO; + } + + q = &qcq->q; + + if (qcq->napi.poll) { + napi_disable(&qcq->napi); + del_timer_sync(&qcq->napi_deadline); + } + + if (qcq->flags & IONIC_QCQ_F_INTR) { + struct ionic_dev *idev = &lif->ionic->idev; + + cancel_work_sync(&qcq->dim.work); + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_SET); + synchronize_irq(qcq->intr.vector); + irq_set_affinity_hint(qcq->intr.vector, NULL); + } + + /* If there was a previous fw communcation error, don't bother with + * sending the adminq command and just return the same error value. + */ + if (fw_err == -ETIMEDOUT || fw_err == -ENXIO) + return fw_err; + + ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index); + ctx.cmd.q_control.type = q->type; + ctx.cmd.q_control.index = cpu_to_le32(q->index); + dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n", + ctx.cmd.q_control.index, ctx.cmd.q_control.type); + + return ionic_adminq_post_wait(lif, &ctx); +} + +static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct ionic_dev *idev = &lif->ionic->idev; + + if (!qcq) + return; + + if (!(qcq->flags & IONIC_QCQ_F_INITED)) + return; + + if (qcq->flags & IONIC_QCQ_F_INTR) { + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_SET); + netif_napi_del(&qcq->napi); + } + + qcq->flags &= ~IONIC_QCQ_F_INITED; +} + +static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0) + return; + + irq_set_affinity_hint(qcq->intr.vector, NULL); + devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi); + qcq->intr.vector = 0; + ionic_intr_free(lif->ionic, qcq->intr.index); + qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; +} + +static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct device *dev = lif->ionic->dev; + + if (!qcq) + return; + + ionic_debugfs_del_qcq(qcq); + + if (qcq->q_base) { + dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa); + qcq->q_base = NULL; + qcq->q_base_pa = 0; + } + + if (qcq->cmb_q_base) { + iounmap(qcq->cmb_q_base); + ionic_put_cmb(lif, qcq->cmb_pgid, qcq->cmb_order); + qcq->cmb_pgid = 0; + qcq->cmb_order = 0; + qcq->cmb_q_base = NULL; + qcq->cmb_q_base_pa = 0; + } + + if (qcq->cq_base) { + dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); + qcq->cq_base = NULL; + qcq->cq_base_pa = 0; + } + + if (qcq->sg_base) { + dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa); + qcq->sg_base = NULL; + qcq->sg_base_pa = 0; + } + + ionic_qcq_intr_free(lif, qcq); + + if (qcq->cq.info) { + vfree(qcq->cq.info); + qcq->cq.info = NULL; + } + if (qcq->q.info) { + vfree(qcq->q.info); + qcq->q.info = NULL; + } +} + +static void ionic_qcqs_free(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + struct ionic_qcq *adminqcq; + unsigned long irqflags; + + if (lif->notifyqcq) { + ionic_qcq_free(lif, lif->notifyqcq); + devm_kfree(dev, lif->notifyqcq); + lif->notifyqcq = NULL; + } + + if (lif->adminqcq) { + spin_lock_irqsave(&lif->adminq_lock, irqflags); + adminqcq = READ_ONCE(lif->adminqcq); + lif->adminqcq = NULL; + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); + if (adminqcq) { + ionic_qcq_free(lif, adminqcq); + devm_kfree(dev, adminqcq); + } + } + + if (lif->rxqcqs) { + devm_kfree(dev, lif->rxqstats); + lif->rxqstats = NULL; + devm_kfree(dev, lif->rxqcqs); + lif->rxqcqs = NULL; + } + + if (lif->txqcqs) { + devm_kfree(dev, lif->txqstats); + lif->txqstats = NULL; + devm_kfree(dev, lif->txqcqs); + lif->txqcqs = NULL; + } +} + +static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, + struct ionic_qcq *n_qcq) +{ + if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) { + ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index); + n_qcq->flags &= ~IONIC_QCQ_F_INTR; + } + + n_qcq->intr.vector = src_qcq->intr.vector; + n_qcq->intr.index = src_qcq->intr.index; + n_qcq->napi_qcq = src_qcq->napi_qcq; +} + +static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + unsigned int cpu; + int err; + + if (!(qcq->flags & IONIC_QCQ_F_INTR)) { + qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; + return 0; + } + + err = ionic_intr_alloc(lif->ionic, &qcq->intr); + if (err) { + netdev_warn(lif->netdev, "no intr for %s: %d\n", + qcq->q.name, err); + goto err_out; + } + + err = ionic_bus_get_irq(lif->ionic, qcq->intr.index); + if (err < 0) { + netdev_warn(lif->netdev, "no vector for %s: %d\n", + qcq->q.name, err); + goto err_out_free_intr; + } + qcq->intr.vector = err; + ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_SET); + + err = ionic_request_napi_irq(lif, qcq); + if (err) { + netdev_warn(lif->netdev, "irq request failed %d\n", err); + goto err_out_free_intr; + } + + if (affinity_mask_override) { + cpumask_copy(&qcq->intr.affinity_mask, cpu_none_mask); + + netdev_dbg(lif->netdev, "%s: setting irq affinity_mask 0x%lx\n", + qcq->q.name, affinity_mask_override); + for (cpu = 0; cpu < num_present_cpus(); cpu++) { + if (BIT(cpu) & affinity_mask_override) + cpumask_set_cpu(cpu, &qcq->intr.affinity_mask); + } + + /* set the affinity */ + irq_set_affinity_hint(qcq->intr.vector, &qcq->intr.affinity_mask); + + } else { + netdev_dbg(lif->netdev, "%s: using default irq affinity", qcq->q.name); + /* try to get the irq on the local numa node first */ + qcq->intr.cpu = cpumask_local_spread(qcq->intr.index, + dev_to_node(lif->ionic->dev)); + if (qcq->intr.cpu != -1) + cpumask_set_cpu(qcq->intr.cpu, + &qcq->intr.affinity_mask); + } + + netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index); + return 0; + +err_out_free_intr: + ionic_intr_free(lif->ionic, qcq->intr.index); +err_out: + return err; +} + +static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, + unsigned int index, + const char *name, unsigned int flags, + unsigned int num_descs, unsigned int desc_size, + unsigned int cq_desc_size, + unsigned int sg_desc_size, + unsigned int pid, struct ionic_qcq **qcq) +{ + struct ionic_dev *idev = &lif->ionic->idev; + struct device *dev = lif->ionic->dev; + void *q_base, *cq_base, *sg_base; + dma_addr_t cq_base_pa = 0; + dma_addr_t sg_base_pa = 0; + dma_addr_t q_base_pa = 0; + struct ionic_qcq *new; + int err; + + *qcq = NULL; + + new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); + if (!new) { + netdev_err(lif->netdev, "Cannot allocate queue structure\n"); + err = -ENOMEM; + goto err_out; + } + + new->q.dev = dev; + new->flags = flags; + + new->q.info = vzalloc(num_descs * sizeof(*new->q.info)); + if (!new->q.info) { + netdev_err(lif->netdev, "Cannot allocate queue info\n"); + err = -ENOMEM; + goto err_out_free_qcq; + } + + new->q.type = type; + new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems; + + err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, + desc_size, sg_desc_size, pid); + if (err) { + netdev_err(lif->netdev, "Cannot initialize queue\n"); + goto err_out_free_q_info; + } + + err = ionic_alloc_qcq_interrupt(lif, new); + if (err) + goto err_out; + + new->cq.info = vzalloc(num_descs * sizeof(*new->cq.info)); + if (!new->cq.info) { + netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); + err = -ENOMEM; + goto err_out_free_irq; + } + + err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); + if (err) { + netdev_err(lif->netdev, "Cannot initialize completion queue\n"); + goto err_out_free_cq_info; + } + + if (flags & IONIC_QCQ_F_NOTIFYQ) { + /* q & cq need to be contiguous in case of notifyq */ + new->q_size = PAGE_SIZE + ALIGN(num_descs * desc_size, PAGE_SIZE) + + ALIGN(num_descs * cq_desc_size, PAGE_SIZE); + new->q_base = dma_alloc_coherent(dev, new->q_size + new->cq_size, + &new->q_base_pa, GFP_KERNEL); + if (!new->q_base) { + netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n"); + err = -ENOMEM; + goto err_out_free_cq_info; + } + q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); + q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); + ionic_q_map(&new->q, q_base, q_base_pa); + + cq_base = PTR_ALIGN(q_base + + ALIGN(num_descs * desc_size, PAGE_SIZE), PAGE_SIZE); + cq_base_pa = ALIGN(new->q_base_pa + + ALIGN(num_descs * desc_size, PAGE_SIZE), PAGE_SIZE); + ionic_cq_map(&new->cq, cq_base, cq_base_pa); + ionic_cq_bind(&new->cq, &new->q); + } else { + /* regular DMA q descriptors */ + new->q_size = PAGE_SIZE + (num_descs * desc_size); + new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, + GFP_KERNEL); + if (!new->q_base) { + netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); + err = -ENOMEM; + goto err_out_free_cq_info; + } + q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); + q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); + ionic_q_map(&new->q, q_base, q_base_pa); + + if (flags & IONIC_QCQ_F_CMB_RINGS) { + /* on-chip CMB q descriptors */ + new->cmb_q_size = num_descs * desc_size; + new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE); + + err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa, + new->cmb_order); + if (err) { + netdev_err(lif->netdev, + "Cannot allocate queue order %d from cmb: err %d\n", + new->cmb_order, err); + goto err_out_free_q; + } + + new->cmb_q_base = ioremap_wc(new->cmb_q_base_pa, new->cmb_q_size); + if (!new->cmb_q_base) { + netdev_err(lif->netdev, "Cannot map queue from cmb\n"); + ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order); + err = -ENOMEM; + goto err_out_free_q; + } + + new->cmb_q_base_pa -= idev->phy_cmb_pages; + ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa); + } + + /* cq DMA descriptors */ + new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); + new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, + GFP_KERNEL); + if (!new->cq_base) { + netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n"); + err = -ENOMEM; + goto err_out_free_q; + } + cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE); + cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE); + ionic_cq_map(&new->cq, cq_base, cq_base_pa); + ionic_cq_bind(&new->cq, &new->q); + } + + if (flags & IONIC_QCQ_F_SG) { + new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size); + new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa, + GFP_KERNEL); + if (!new->sg_base) { + netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n"); + err = -ENOMEM; + goto err_out_free_cq; + } + sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE); + sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE); + ionic_q_sg_map(&new->q, sg_base, sg_base_pa); + } + + INIT_WORK(&new->dim.work, ionic_dim_work); + new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + + *qcq = new; + + return 0; + +err_out_free_cq: + dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); +err_out_free_q: + if (new->cmb_q_base) { + iounmap(new->cmb_q_base); + ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order); + } + dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); +err_out_free_cq_info: + vfree(new->cq.info); +err_out_free_irq: + if (flags & IONIC_QCQ_F_INTR) { + devm_free_irq(dev, new->intr.vector, &new->napi); + ionic_intr_free(lif->ionic, new->intr.index); + } +err_out_free_q_info: + vfree(new->q.info); +err_out_free_qcq: + devm_kfree(dev, new); +err_out: + dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); + return err; +} + +static int ionic_qcqs_alloc(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + unsigned int flags; + int err; + + flags = IONIC_QCQ_F_INTR; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, + IONIC_ADMINQ_LENGTH, + sizeof(struct ionic_admin_cmd), + sizeof(struct ionic_admin_comp), + 0, lif->kern_pid, &lif->adminqcq); + if (err) + return err; + ionic_debugfs_add_qcq(lif, lif->adminqcq); + + if (lif->ionic->nnqs_per_lif) { + flags = IONIC_QCQ_F_NOTIFYQ; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", + flags, IONIC_NOTIFYQ_LENGTH, + sizeof(struct ionic_notifyq_cmd), + sizeof(union ionic_notifyq_comp), + 0, lif->kern_pid, &lif->notifyqcq); + if (err) + goto err_out; + ionic_debugfs_add_qcq(lif, lif->notifyqcq); + + /* Let the notifyq ride on the adminq interrupt */ + ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); + } + + err = -ENOMEM; + lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, + sizeof(*lif->txqcqs), GFP_KERNEL); + if (!lif->txqcqs) + goto err_out; + lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, + sizeof(*lif->rxqcqs), GFP_KERNEL); + if (!lif->rxqcqs) + goto err_out; + + lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1, + sizeof(*lif->txqstats), GFP_KERNEL); + if (!lif->txqstats) + goto err_out; + lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1, + sizeof(*lif->rxqstats), GFP_KERNEL); + if (!lif->rxqstats) + goto err_out; + + return 0; + +err_out: + ionic_qcqs_free(lif); + return err; +} + +static inline int ionic_choose_eq(struct ionic_lif *lif, int q_index) +{ + unsigned int abs_q; + + if (lif->index) + abs_q = (lif->ionic->nrxqs_per_lif + lif->index); + else + abs_q = q_index; + + return abs_q % lif->ionic->neth_eqs; +} + +static void ionic_qcq_sanitize(struct ionic_qcq *qcq) +{ + qcq->q.tail_idx = 0; + qcq->q.head_idx = 0; + qcq->cq.tail_idx = 0; + qcq->cq.done_color = 1; + + memset(qcq->q_base, 0, qcq->q_size); + if (qcq->cmb_q_base) + memset_io(qcq->cmb_q_base, 0, qcq->cmb_q_size); + memset(qcq->cq_base, 0, qcq->cq_size); + memset(qcq->sg_base, 0, qcq->sg_size); +} + +static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct device *dev = lif->ionic->dev; + struct ionic_queue *q = &qcq->q; + struct ionic_cq *cq = &qcq->cq; + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.q_init = { + .opcode = IONIC_CMD_Q_INIT, + .lif_index = cpu_to_le16(lif->index), + .type = q->type, + .ver = lif->qtype_info[q->type].version, + .index = cpu_to_le32(q->index), + .pid = cpu_to_le16(q->pid), + .ring_size = ilog2(q->num_descs), + .ring_base = cpu_to_le64(q->base_pa), + .cq_ring_base = cpu_to_le64(cq->base_pa), + .sg_ring_base = cpu_to_le64(q->sg_base_pa), + .features = cpu_to_le64(q->features), + }, + }; + int err; + + if (lif->ionic->neth_eqs && + lif->qtype_info[q->type].features & IONIC_QIDENT_F_EQ) { + unsigned int eq_index = ionic_choose_eq(lif, q->index); + + ctx.cmd.q_init.flags = cpu_to_le16(IONIC_QINIT_F_EQ | + IONIC_QINIT_F_SG); + ctx.cmd.q_init.intr_index = cpu_to_le16(eq_index); + } else { + unsigned int intr_index; + + intr_index = qcq->intr.index; + + ctx.cmd.q_init.flags = cpu_to_le16(IONIC_QINIT_F_IRQ | + IONIC_QINIT_F_SG); + ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index); + } + + if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) { + ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB); + ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa); + } + + dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); + dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); + dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); + dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); + dev_dbg(dev, "txq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base); + dev_dbg(dev, "txq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base); + dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); + dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); + dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); + + ionic_qcq_sanitize(qcq); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + q->hw_type = ctx.comp.q_init.hw_type; + q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); + q->dbval = IONIC_DBELL_QID(q->hw_index); + + dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); + dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); + + q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE; + q->dbell_jiffies = jiffies; + + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { + netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi); + qcq->napi_qcq = qcq; + timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); + } + + qcq->flags |= IONIC_QCQ_F_INITED; + + return 0; +} + +static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct device *dev = lif->ionic->dev; + struct ionic_queue *q = &qcq->q; + struct ionic_cq *cq = &qcq->cq; + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.q_init = { + .opcode = IONIC_CMD_Q_INIT, + .lif_index = cpu_to_le16(lif->index), + .type = q->type, + .ver = lif->qtype_info[q->type].version, + .index = cpu_to_le32(q->index), + .pid = cpu_to_le16(q->pid), + .ring_size = ilog2(q->num_descs), + .ring_base = cpu_to_le64(q->base_pa), + .cq_ring_base = cpu_to_le64(cq->base_pa), + .sg_ring_base = cpu_to_le64(q->sg_base_pa), + .features = cpu_to_le64(q->features), + }, + }; + int err; + + if (lif->ionic->neth_eqs && + lif->qtype_info[q->type].features & IONIC_QIDENT_F_EQ) { + unsigned int eq_index = ionic_choose_eq(lif, q->index); + + ctx.cmd.q_init.flags = cpu_to_le16(IONIC_QINIT_F_EQ | + IONIC_QINIT_F_SG); + ctx.cmd.q_init.intr_index = cpu_to_le16(eq_index); + } else { + ctx.cmd.q_init.flags = cpu_to_le16(IONIC_QINIT_F_IRQ | + IONIC_QINIT_F_SG); + ctx.cmd.q_init.intr_index = cpu_to_le16(cq->bound_intr->index); + } + + if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) { + ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB); + ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa); + } + + dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); + dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); + dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); + dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); + dev_dbg(dev, "rxq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base); + dev_dbg(dev, "rxq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base); + dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); + dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); + dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); + + ionic_qcq_sanitize(qcq); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + q->hw_type = ctx.comp.q_init.hw_type; + q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); + q->dbval = IONIC_DBELL_QID(q->hw_index); + + dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); + dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); + + q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE; + q->dbell_jiffies = jiffies; + + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi); + else + netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi); + + qcq->napi_qcq = qcq; + timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); + + qcq->flags |= IONIC_QCQ_F_INITED; + + return 0; +} + +int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) +{ + unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; + unsigned int txq_i, flags; + struct ionic_qcq *txq; + u64 features; + int err; + + if (lif->hwstamp_txq) + return 0; + + features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP; + + num_desc = IONIC_MIN_TXRX_DESC; + desc_sz = sizeof(struct ionic_txq_desc); + comp_sz = 2 * sizeof(struct ionic_txq_comp); + + if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && + lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1)) + sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); + else + sg_desc_sz = sizeof(struct ionic_txq_sg_desc); + + txq_i = lif->ionic->ntxqs_per_lif; + flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; + + err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags, + num_desc, desc_sz, comp_sz, sg_desc_sz, + lif->kern_pid, &txq); + if (err) + goto err_qcq_alloc; + + txq->q.features = features; + + ionic_link_qcq_interrupts(lif->adminqcq, txq); + ionic_debugfs_add_qcq(lif, txq); + + lif->hwstamp_txq = txq; + + if (netif_running(lif->netdev)) { + err = ionic_lif_txq_init(lif, txq); + if (err) + goto err_qcq_init; + + if (test_bit(IONIC_LIF_F_UP, lif->state)) { + err = ionic_qcq_enable(txq); + if (err) + goto err_qcq_enable; + } + } + + return 0; + +err_qcq_enable: + ionic_lif_qcq_deinit(lif, txq); +err_qcq_init: + lif->hwstamp_txq = NULL; + ionic_debugfs_del_qcq(txq); + ionic_qcq_free(lif, txq); + devm_kfree(lif->ionic->dev, txq); +err_qcq_alloc: + return err; +} + +int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) +{ + unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; + unsigned int rxq_i, flags; + struct ionic_qcq *rxq; + u64 features; + int err; + + if (lif->hwstamp_rxq) + return 0; + + features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; + + num_desc = IONIC_MIN_TXRX_DESC; + desc_sz = sizeof(struct ionic_rxq_desc); + comp_sz = 2 * sizeof(struct ionic_rxq_comp); + sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); + + rxq_i = lif->ionic->nrxqs_per_lif; + flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; + + err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags, + num_desc, desc_sz, comp_sz, sg_desc_sz, + lif->kern_pid, &rxq); + if (err) + goto err_qcq_alloc; + + rxq->q.features = features; + + ionic_link_qcq_interrupts(lif->adminqcq, rxq); + ionic_debugfs_add_qcq(lif, rxq); + + lif->hwstamp_rxq = rxq; + + if (netif_running(lif->netdev)) { + err = ionic_lif_rxq_init(lif, rxq); + if (err) + goto err_qcq_init; + + if (test_bit(IONIC_LIF_F_UP, lif->state)) { + ionic_rx_fill(&rxq->q); + err = ionic_qcq_enable(rxq); + if (err) + goto err_qcq_enable; + } + } + + return 0; + +err_qcq_enable: + ionic_lif_qcq_deinit(lif, rxq); +err_qcq_init: + lif->hwstamp_rxq = NULL; + ionic_debugfs_del_qcq(rxq); + ionic_qcq_free(lif, rxq); + devm_kfree(lif->ionic->dev, rxq); +err_qcq_alloc: + return err; +} + +int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all) +{ + struct ionic_queue_params qparam; + + ionic_init_queue_params(lif, &qparam); + + if (rx_all) + qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; + else + qparam.rxq_features = 0; + + /* if we're not running, just set the values and return */ + if (!netif_running(lif->netdev)) { + lif->rxq_features = qparam.rxq_features; + return 0; + } + + return ionic_reconfigure_queues(lif, &qparam); +} + +int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_TXSTAMP, + .txstamp_mode = cpu_to_le16(txstamp_mode), + }, + }; + + return ionic_adminq_post_wait(lif, &ctx); +} + +static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + .lif_index = cpu_to_le16(lif->index), + }, + }; + struct ionic_rx_filter *f; + u32 filter_id; + int err; + + spin_lock_bh(&lif->rx_filters.lock); + + f = ionic_rx_filter_rxsteer(lif); + if (!f) { + spin_unlock_bh(&lif->rx_filters.lock); + return; + } + + filter_id = f->filter_id; + ionic_rx_filter_free(lif, f); + + spin_unlock_bh(&lif->rx_filters.lock); + + netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id); + + ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err && err != -EEXIST) + netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id); +} + +static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_add = { + .opcode = IONIC_CMD_RX_FILTER_ADD, + .lif_index = cpu_to_le16(lif->index), + .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS), + .pkt_class = cpu_to_le64(pkt_class), + }, + }; + u8 qtype; + u32 qid; + int err; + + if (!lif->hwstamp_rxq) + return -EINVAL; + + qtype = lif->hwstamp_rxq->q.type; + ctx.cmd.rx_filter_add.qtype = qtype; + + qid = lif->hwstamp_rxq->q.index; + ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid); + + netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n"); + err = ionic_adminq_post_wait(lif, &ctx); + if (err && err != -EEXIST) + return err; + + spin_lock_bh(&lif->rx_filters.lock); + err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED); + spin_unlock_bh(&lif->rx_filters.lock); + + return err; +} + +int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) +{ + ionic_lif_del_hwstamp_rxfilt(lif); + + if (!pkt_class) + return 0; + + return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class); +} + +static bool ionic_notifyq_service(struct ionic_cq *cq, + struct ionic_cq_info *cq_info) +{ + union ionic_notifyq_comp *comp = cq_info->cq_desc; + struct ionic_deferred_work *work; + struct net_device *netdev; + struct ionic_queue *q; + struct ionic_lif *lif; + u64 eid; + + q = cq->bound_q; + lif = q->info[0].cb_arg; + netdev = lif->netdev; + eid = le64_to_cpu(comp->event.eid); + + /* Have we run out of new completions to process? */ + if ((s64)(eid - lif->last_eid) <= 0) + return false; + + /* Have we missed any events? */ + if (lif->last_eid && eid != lif->last_eid + 1) + netdev_warn(netdev, "Notifyq missed events, eid=%lld, expected=%lld\n", + eid, lif->last_eid + 1); + + lif->last_eid = eid; + + dev_dbg(lif->ionic->dev, "notifyq event:\n"); + dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, + comp, sizeof(*comp), true); + + switch (le16_to_cpu(comp->event.ecode)) { + case IONIC_EVENT_LINK_CHANGE: + ionic_link_status_check_request(lif, CAN_NOT_SLEEP); + break; + case IONIC_EVENT_RESET: + if (lif->ionic->idev.fw_status_ready && + !test_bit(IONIC_LIF_F_FW_RESET, lif->state) && + !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "Reset event dropped\n"); + clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state); + } else { + work->type = IONIC_DW_TYPE_LIF_RESET; + ionic_lif_deferred_enqueue(&lif->deferred, work); + } + } + break; + case IONIC_EVENT_HEARTBEAT: + netdev_info(netdev, "Notifyq IONIC_EVENT_HEARTBEAT eid=%lld\n", + eid); + break; + case IONIC_EVENT_LOG: + netdev_info(netdev, "Notifyq IONIC_EVENT_LOG eid=%lld\n", eid); + print_hex_dump(KERN_INFO, "notifyq ", DUMP_PREFIX_OFFSET, 16, 1, + comp->log.data, sizeof(comp->log.data), true); + break; + case IONIC_EVENT_XCVR: + netdev_info(netdev, "Notifyq IONIC_EVENT_XCVR eid=%lld\n", + eid); + break; + default: + netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n", + comp->event.ecode, eid); + break; + } + + return true; +} + +static bool ionic_adminq_service(struct ionic_cq *cq, + struct ionic_cq_info *cq_info) +{ + struct ionic_admin_comp *comp = cq_info->cq_desc; + + if (!color_match(comp->color, cq->done_color)) + return false; + + ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); + + return true; +} + +static int ionic_adminq_napi(struct napi_struct *napi, int budget) +{ + struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; + struct ionic_lif *lif = napi_to_cq(napi)->lif; + struct ionic_dev *idev = &lif->ionic->idev; + unsigned long irqflags; + unsigned int flags = 0; + bool resched = false; + int rx_work = 0; + int tx_work = 0; + int n_work = 0; + int a_work = 0; + int work_done; + int credits; + + if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED) + n_work = ionic_cq_service(&lif->notifyqcq->cq, budget, + ionic_notifyq_service, NULL, NULL); + + spin_lock_irqsave(&lif->adminq_lock, irqflags); + if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) + a_work = ionic_cq_service(&lif->adminqcq->cq, budget, + ionic_adminq_service, NULL, NULL); + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); + + if (lif->hwstamp_rxq) + rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget, + ionic_rx_service, NULL, NULL); + + if (lif->hwstamp_txq) + tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget, + ionic_tx_service, NULL, NULL); + + work_done = max(max(n_work, a_work), max(rx_work, tx_work)); + if (work_done < budget && napi_complete_done(napi, work_done)) { + flags |= IONIC_INTR_CRED_UNMASK; + intr->rearm_count++; + } + + if (work_done || flags) { + flags |= IONIC_INTR_CRED_RESET_COALESCE; + credits = n_work + a_work + rx_work + tx_work; + ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags); + } + + if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q)) + resched = true; + if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q)) + resched = true; + if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q)) + resched = true; + if (resched) + mod_timer(&lif->adminqcq->napi_deadline, + jiffies + IONIC_NAPI_DEADLINE); + + return work_done; +} + +#ifdef HAVE_VOID_NDO_GET_STATS64 +void ionic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *ns) +#else +struct rtnl_link_stats64 *ionic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *ns) +#endif +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_lif_stats *ls; + + memset(ns, 0, sizeof(*ns)); + ls = &lif->info->stats; + + ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + + le64_to_cpu(ls->rx_mcast_packets) + + le64_to_cpu(ls->rx_bcast_packets); + + ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + + le64_to_cpu(ls->tx_mcast_packets) + + le64_to_cpu(ls->tx_bcast_packets); + + ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + + le64_to_cpu(ls->rx_mcast_bytes) + + le64_to_cpu(ls->rx_bcast_bytes); + + ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + + le64_to_cpu(ls->tx_mcast_bytes) + + le64_to_cpu(ls->tx_bcast_bytes); + + ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + + le64_to_cpu(ls->rx_mcast_drop_packets) + + le64_to_cpu(ls->rx_bcast_drop_packets); + + ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + + le64_to_cpu(ls->tx_mcast_drop_packets) + + le64_to_cpu(ls->tx_bcast_drop_packets); + + ns->multicast = le64_to_cpu(ls->rx_mcast_packets); + + ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); + + ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + + le64_to_cpu(ls->rx_queue_disabled) + + le64_to_cpu(ls->rx_desc_fetch_error) + + le64_to_cpu(ls->rx_desc_data_error); + + ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + + le64_to_cpu(ls->tx_queue_disabled) + + le64_to_cpu(ls->tx_desc_fetch_error) + + le64_to_cpu(ls->tx_desc_data_error); + + ns->rx_errors = ns->rx_over_errors + + ns->rx_missed_errors; + + ns->tx_errors = ns->tx_aborted_errors; + +#ifndef HAVE_VOID_NDO_GET_STATS64 + return ns; +#endif +} + +static int ionic_addr_add(struct net_device *netdev, const u8 *addr) +{ + return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR); +} + +static int ionic_addr_del(struct net_device *netdev, const u8 *addr) +{ + /* Don't delete our own address from the uc list */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR); +} + +void ionic_lif_rx_mode(struct ionic_lif *lif) +{ + struct net_device *netdev = lif->netdev; + unsigned int nfilters; + unsigned int nd_flags; + char buf[128]; + u16 rx_mode; + int i; +#define REMAIN(__x) (sizeof(buf) - (__x)) + + mutex_lock(&lif->config_lock); + + /* grab the flags once for local use */ + nd_flags = netdev->flags; + + rx_mode = IONIC_RX_MODE_F_UNICAST; + rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; + rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; + rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; + rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; + + if (test_bit(IONIC_LIF_F_RDMA_SNIFFER, lif->state)) + rx_mode |= IONIC_RX_MODE_F_RDMA_SNIFFER; + + /* sync the filters */ + ionic_rx_filter_sync(lif); + + /* check for overflow state + * if so, we track that we overflowed and enable NIC PROMISC + * else if the overflow is set and not needed + * we remove our overflow flag and check the netdev flags + * to see if we can disable NIC PROMISC + */ + nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); + + if (((lif->nucast + lif->nmcast) >= nfilters) || + (lif->max_vlans && lif->nvlans >= lif->max_vlans)) { + rx_mode |= IONIC_RX_MODE_F_PROMISC; + rx_mode |= IONIC_RX_MODE_F_ALLMULTI; + } else { + if (!(nd_flags & IFF_PROMISC)) + rx_mode &= ~IONIC_RX_MODE_F_PROMISC; + if (!(nd_flags & IFF_ALLMULTI)) + rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; + } + + i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", + lif->rx_mode, rx_mode); + if (rx_mode & IONIC_RX_MODE_F_UNICAST) + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); + if (rx_mode & IONIC_RX_MODE_F_MULTICAST) + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); + if (rx_mode & IONIC_RX_MODE_F_BROADCAST) + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); + if (rx_mode & IONIC_RX_MODE_F_PROMISC) + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); + if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); + if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER) + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER"); + netdev_dbg(netdev, "lif%d %s\n", lif->index, buf); + + if (lif->rx_mode != rx_mode) { + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_mode_set = { + .opcode = IONIC_CMD_RX_MODE_SET, + .lif_index = cpu_to_le16(lif->index), + }, + }; + int err; + + ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode); + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n", + rx_mode, err); + else + lif->rx_mode = rx_mode; + } + + mutex_unlock(&lif->config_lock); +} + +static void ionic_ndo_set_rx_mode(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_deferred_work *work; + + /* Sync the kernel filter list with the driver filter list */ + __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); + __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); + + /* Shove off the rest of the rxmode work to the work task + * which will include syncing the filters to the firmware. + */ + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "rxmode change dropped\n"); + return; + } + work->type = IONIC_DW_TYPE_RX_MODE; + netdev_dbg(lif->netdev, "deferred: rx_mode\n"); + ionic_lif_deferred_enqueue(&lif->deferred, work); +} + +static __le64 ionic_netdev_features_to_nic(netdev_features_t features) +{ + u64 wanted = 0; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + wanted |= IONIC_ETH_HW_VLAN_TX_TAG; + if (features & NETIF_F_HW_VLAN_CTAG_RX) + wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; + if (features & NETIF_F_RXHASH) + wanted |= IONIC_ETH_HW_RX_HASH; + if (features & NETIF_F_RXCSUM) + wanted |= IONIC_ETH_HW_RX_CSUM; + if (features & NETIF_F_SG) + wanted |= IONIC_ETH_HW_TX_SG; + if (features & NETIF_F_HW_CSUM) + wanted |= IONIC_ETH_HW_TX_CSUM; + if (features & NETIF_F_TSO) + wanted |= IONIC_ETH_HW_TSO; + if (features & NETIF_F_TSO6) + wanted |= IONIC_ETH_HW_TSO_IPV6; + if (features & NETIF_F_TSO_ECN) + wanted |= IONIC_ETH_HW_TSO_ECN; + if (features & NETIF_F_GSO_GRE) + wanted |= IONIC_ETH_HW_TSO_GRE; + if (features & NETIF_F_GSO_GRE_CSUM) + wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; +#ifdef NETIF_F_GSO_IPXIP4 + if (features & NETIF_F_GSO_IPXIP4) + wanted |= IONIC_ETH_HW_TSO_IPXIP4; +#endif +#ifdef NETIF_F_GSO_IPXIP6 + if (features & NETIF_F_GSO_IPXIP6) + wanted |= IONIC_ETH_HW_TSO_IPXIP6; +#endif +#ifdef NETIF_F_GSO_IPIP + if (features & NETIF_F_GSO_IPIP) + wanted |= IONIC_ETH_HW_TSO_IPXIP4; +#endif +#ifdef NETIF_F_GSO_SIT + if (features & NETIF_F_GSO_SIT) + wanted |= IONIC_ETH_HW_TSO_IPXIP4; +#endif + if (features & NETIF_F_GSO_UDP_TUNNEL) + wanted |= IONIC_ETH_HW_TSO_UDP; + if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; + + return cpu_to_le64(wanted); +} + +static int ionic_set_nic_features(struct ionic_lif *lif, + netdev_features_t features) +{ + struct device *dev = lif->ionic->dev; + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_FEATURES, + }, + }; + u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | + IONIC_ETH_HW_VLAN_RX_STRIP | + IONIC_ETH_HW_VLAN_RX_FILTER; + u64 old_hw_features; + int err; + + ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); + + if (lif->phc) + ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + old_hw_features = lif->hw_features; + lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & + ctx.comp.lif_setattr.features); + + if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) + ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); + + if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) && + !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) + dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); + + if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) + dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) + dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) + dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); + if (lif->hw_features & IONIC_ETH_HW_RX_HASH) + dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); + if (lif->hw_features & IONIC_ETH_HW_TX_SG) + dev_dbg(dev, "feature ETH_HW_TX_SG\n"); + if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) + dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); + if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) + dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO) + dev_dbg(dev, "feature ETH_HW_TSO\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) + dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) + dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) + dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) + dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) + dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) + dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) + dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) + dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); + if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP) + dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n"); + + return 0; +} + +static int ionic_init_nic_features(struct ionic_lif *lif) +{ + struct net_device *netdev = lif->netdev; + netdev_features_t features; + int err; + + /* set up what we expect to support by default */ + features = NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | +#ifdef NETIF_F_GSO_IPXIP4 + NETIF_F_GSO_IPXIP4 | +#endif +#ifdef NETIF_F_GSO_IPXIP6 + NETIF_F_GSO_IPXIP6 | +#endif +#ifdef NETIF_F_GSO_IPIP + NETIF_F_GSO_IPIP | +#endif +#ifdef NETIF_F_GSO_SIT + NETIF_F_GSO_SIT | +#endif + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + if (lif->nxqs > 1) + features |= NETIF_F_RXHASH; + + err = ionic_set_nic_features(lif, features); + if (err) + return err; + + /* tell the netdev what we actually can support */ + netdev->features |= NETIF_F_HIGHDMA; + + if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (lif->hw_features & IONIC_ETH_HW_RX_HASH) + netdev->hw_features |= NETIF_F_RXHASH; + if (lif->hw_features & IONIC_ETH_HW_TX_SG) + netdev->hw_features |= NETIF_F_SG; + + if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) + netdev->hw_enc_features |= NETIF_F_HW_CSUM; + if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) + netdev->hw_enc_features |= NETIF_F_RXCSUM; + if (lif->hw_features & IONIC_ETH_HW_TSO) + netdev->hw_enc_features |= NETIF_F_TSO; + if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) + netdev->hw_enc_features |= NETIF_F_TSO6; + if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) + netdev->hw_enc_features |= NETIF_F_TSO_ECN; + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) + netdev->hw_enc_features |= NETIF_F_GSO_GRE; + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) + netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; +#ifdef NETIF_F_GSO_IPXIP4 + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) + netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; +#endif +#ifdef NETIF_F_GSO_IPXIP6 + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) + netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; +#endif +#ifdef NETIF_F_GSO_IPIP + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) + netdev->hw_enc_features |= NETIF_F_GSO_IPIP; +#endif + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->hw_features |= netdev->hw_enc_features; + netdev->features |= netdev->hw_features; + + /* some earlier kernels complain if the vlan device inherits + * the NETIF_F_HW_VLAN... flags, so strip them out + */ + netdev->vlan_features |= netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER); + + netdev->priv_flags |= IFF_UNICAST_FLT | + IFF_LIVE_ADDR_CHANGE; + + return 0; +} + +static int ionic_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int err; + + netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", + __func__, (u64)lif->netdev->features, (u64)features); + + err = ionic_set_nic_features(lif, features); + + return err; +} + +static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_MAC, + }, + }; + + ether_addr_copy(ctx.cmd.lif_setattr.mac, mac); + return ionic_adminq_post_wait(lif, &ctx); +} + +static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_getattr = { + .opcode = IONIC_CMD_LIF_GETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_MAC, + }, + }; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac); + return 0; +} + +static int ionic_program_mac(struct ionic_lif *lif, u8 *mac) +{ + u8 get_mac[ETH_ALEN]; + int err; + + err = ionic_set_attr_mac(lif, mac); + if (err) + return err; + + err = ionic_get_attr_mac(lif, get_mac); + if (err) + return err; + + /* To deal with older firmware that silently ignores the set attr mac: + * doesn't actually change the mac and doesn't return an error, so we + * do the get attr to verify whether or not the set actually happened + */ + if (ether_addr_equal(get_mac, mac)) + return 0; + + return 1; +} + +static int ionic_set_mac_address(struct net_device *netdev, void *sa) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct sockaddr *addr = sa; + u8 *mac; + int err; + + mac = (u8 *)addr->sa_data; + + if (ether_addr_equal(netdev->dev_addr, mac)) + return 0; + + err = ionic_program_mac(lif, mac); + if (err < 0) + return err; + + if (err > 0) + netdev_dbg(netdev, "%s:SET and GET ATTR Mac is not equal-due to old FW running\n", + __func__); + + err = eth_prepare_mac_addr_change(netdev, addr); + if (err) + return err; + + if (!is_zero_ether_addr(netdev->dev_addr)) { + netdev_info(netdev, "deleting mac addr %pM\n", + netdev->dev_addr); + ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr); + } + + eth_commit_mac_addr_change(netdev, addr); + netdev_info(netdev, "updating mac addr %pM\n", mac); + + return ionic_lif_addr_add(netdev_priv(netdev), mac); +} + +static void ionic_stop_queues_reconfig(struct ionic_lif *lif) +{ + /* Stop and clean the queues before reconfiguration */ + netif_device_detach(lif->netdev); + ionic_stop_queues(lif); + ionic_txrx_deinit(lif); +} + +static int ionic_start_queues_reconfig(struct ionic_lif *lif) +{ + int err; + + /* Re-init the queues after reconfiguration */ + + /* The only way txrx_init can fail here is if communication + * with FW is suddenly broken. There's not much we can do + * at this point - error messages have already been printed, + * so we can continue on and the user can eventually do a + * DOWN and UP to try to reset and clear the issue. + */ + err = ionic_txrx_init(lif); + ionic_link_status_check_request(lif, CAN_NOT_SLEEP); + netif_device_attach(lif->netdev); + + return err; +} + +static int ionic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_MTU, + .mtu = cpu_to_le32(new_mtu), + }, + }; + int err; + int fs; + + fs = new_mtu + ETH_HLEN + VLAN_HLEN; + if (fs < le32_to_cpu(lif->identity->eth.min_frame_size) || + fs > le32_to_cpu(lif->identity->eth.max_frame_size)) { + netdev_err(netdev, "Invalid MTU %d\n", new_mtu); + return -EINVAL; + } + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + netdev_info(netdev, "Changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + + /* if we're not running, nothing much to do */ + if (!netif_running(lif->netdev)) { + netdev->mtu = new_mtu; + return 0; + } + + mutex_lock(&lif->queue_lock); + ionic_stop_queues_reconfig(lif); + netdev->mtu = new_mtu; + err = ionic_start_queues_reconfig(lif); + mutex_unlock(&lif->queue_lock); + + return err; +} + +static void ionic_tx_timeout_work(struct work_struct *ws) +{ + struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); + + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return; + + // TODO: queue specific reset + + /* if we were stopped before this scheduled job was launched, + * don't bother the queues as they are already stopped. + */ + if (!netif_running(lif->netdev)) + return; + + mutex_lock(&lif->queue_lock); + ionic_stop_queues_reconfig(lif); + ionic_start_queues_reconfig(lif); + mutex_unlock(&lif->queue_lock); +} + +#ifdef HAVE_TX_TIMEOUT_TXQUEUE +static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) +#else +static void ionic_tx_timeout(struct net_device *netdev) +#endif +{ + struct ionic_lif *lif = netdev_priv(netdev); +#if !defined(HAVE_TX_TIMEOUT_TXQUEUE) + unsigned int txqueue = -1; +#endif + + netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue); + schedule_work(&lif->tx_timeout_work); +} + +static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int err; + + err = ionic_lif_vlan_add(lif, vid); + if (err) + return err; + + ionic_lif_rx_mode(lif); + + return 0; +} + +static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int err; + + err = ionic_lif_vlan_del(lif, vid); + if (err) + return err; + + ionic_lif_rx_mode(lif); + + return 0; +} + +int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, + const u8 *key, const u32 *indir) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .attr = IONIC_LIF_ATTR_RSS, + .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), + }, + }; + unsigned int i, tbl_sz; + + if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { + lif->rss_types = types; + ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); + } + + if (key) + memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); + + if (indir) { + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); + for (i = 0; i < tbl_sz; i++) + lif->rss_ind_tbl[i] = indir[i]; + } + + memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, + IONIC_RSS_HASH_KEY_SIZE); + + return ionic_adminq_post_wait(lif, &ctx); +} + +static int ionic_lif_rss_init(struct ionic_lif *lif) +{ + unsigned int tbl_sz; + unsigned int i; + + lif->rss_types = IONIC_RSS_TYPE_IPV4 | + IONIC_RSS_TYPE_IPV4_TCP | + IONIC_RSS_TYPE_IPV4_UDP | + IONIC_RSS_TYPE_IPV6 | + IONIC_RSS_TYPE_IPV6_TCP | + IONIC_RSS_TYPE_IPV6_UDP; + + /* Fill indirection table with 'default' values */ + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); + for (i = 0; i < tbl_sz; i++) + lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); + + return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); +} + +static void ionic_lif_rss_deinit(struct ionic_lif *lif) +{ + int tbl_sz; + + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); + memset(lif->rss_ind_tbl, 0, tbl_sz); + memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); + + ionic_lif_rss_config(lif, 0x0, NULL, NULL); +} + +static void ionic_lif_quiesce(struct ionic_lif *lif) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_STATE, + .state = IONIC_LIF_QUIESCE, + }, + }; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err); +} + +static void ionic_txrx_disable(struct ionic_lif *lif) +{ + unsigned int i; + int err = 0; + + if (lif->txqcqs) { + for (i = 0; i < lif->nxqs; i++) + err = ionic_qcq_disable(lif, lif->txqcqs[i], err); + } + + if (lif->hwstamp_txq) + err = ionic_qcq_disable(lif, lif->hwstamp_txq, err); + + if (lif->rxqcqs) { + for (i = 0; i < lif->nxqs; i++) + err = ionic_qcq_disable(lif, lif->rxqcqs[i], err); + } + + if (lif->hwstamp_rxq) + err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err); + + ionic_lif_quiesce(lif); +} + +static void ionic_txrx_deinit(struct ionic_lif *lif) +{ + unsigned int i; + + if (lif->txqcqs && lif->txqcqs[0]) { + for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) { + ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); + ionic_tx_flush(&lif->txqcqs[i]->cq); + ionic_tx_empty(&lif->txqcqs[i]->q); + } + } + + if (lif->rxqcqs && lif->rxqcqs[0]) { + for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) { + ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); + ionic_rx_empty(&lif->rxqcqs[i]->q); + } + } + lif->rx_mode = 0; + + if (lif->hwstamp_txq) { + ionic_lif_qcq_deinit(lif, lif->hwstamp_txq); + ionic_tx_flush(&lif->hwstamp_txq->cq); + ionic_tx_empty(&lif->hwstamp_txq->q); + } + + if (lif->hwstamp_rxq) { + ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq); + ionic_rx_empty(&lif->hwstamp_rxq->q); + } +} + +static void ionic_txrx_free(struct ionic_lif *lif) +{ + unsigned int i; + + if (lif->txqcqs) { + for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) { + ionic_qcq_free(lif, lif->txqcqs[i]); + devm_kfree(lif->ionic->dev, lif->txqcqs[i]); + lif->txqcqs[i] = NULL; + } + } + + if (lif->rxqcqs) { + for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { + ionic_qcq_free(lif, lif->rxqcqs[i]); + devm_kfree(lif->ionic->dev, lif->rxqcqs[i]); + lif->rxqcqs[i] = NULL; + } + } + + if (lif->hwstamp_txq) { + ionic_qcq_free(lif, lif->hwstamp_txq); + devm_kfree(lif->ionic->dev, lif->hwstamp_txq); + lif->hwstamp_txq = NULL; + } + + if (lif->hwstamp_rxq) { + ionic_qcq_free(lif, lif->hwstamp_rxq); + devm_kfree(lif->ionic->dev, lif->hwstamp_rxq); + lif->hwstamp_rxq = NULL; + } +} + +static int ionic_txrx_alloc(struct ionic_lif *lif) +{ + unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; + unsigned int flags, i; + int err = 0; + + num_desc = lif->ntxq_descs; + desc_sz = sizeof(struct ionic_txq_desc); + comp_sz = sizeof(struct ionic_txq_comp); + + if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && + lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == + sizeof(struct ionic_txq_sg_desc_v1)) + sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); + else + sg_desc_sz = sizeof(struct ionic_txq_sg_desc); + + flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; + + if (test_bit(IONIC_LIF_F_CMB_RINGS, lif->state)) + flags |= IONIC_QCQ_F_CMB_RINGS; + + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) && + !(lif->ionic->neth_eqs && + lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_EQ)) + flags |= IONIC_QCQ_F_INTR; + + for (i = 0; i < lif->nxqs; i++) { + err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, + num_desc, desc_sz, comp_sz, sg_desc_sz, + lif->kern_pid, &lif->txqcqs[i]); + if (err) + goto err_out; + + if (flags & IONIC_QCQ_F_INTR) { + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + lif->txqcqs[i]->intr.index, + lif->tx_coalesce_hw); + if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) + lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; + } + + ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); + } + + flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; + if (!ionic_use_eqs(lif)) + flags |= IONIC_QCQ_F_INTR; + + if (test_bit(IONIC_LIF_F_CMB_RINGS, lif->state)) + flags |= IONIC_QCQ_F_CMB_RINGS; + + num_desc = lif->nrxq_descs; + desc_sz = sizeof(struct ionic_rxq_desc); + comp_sz = sizeof(struct ionic_rxq_comp); + sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); + + if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC) + comp_sz *= 2; + + for (i = 0; i < lif->nxqs; i++) { + err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, + num_desc, desc_sz, comp_sz, sg_desc_sz, + lif->kern_pid, &lif->rxqcqs[i]); + if (err) + goto err_out; + + lif->rxqcqs[i]->q.features = lif->rxq_features; + + if (flags & IONIC_QCQ_F_INTR) { + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + lif->rxqcqs[i]->intr.index, + lif->rx_coalesce_hw); + if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state)) + lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw; + + if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + ionic_link_qcq_interrupts(lif->rxqcqs[i], + lif->txqcqs[i]); + } + + ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); + } + + lif->n_txrx_alloc++; + + return 0; + +err_out: + ionic_txrx_free(lif); + + return err; +} + +static int ionic_txrx_init(struct ionic_lif *lif) +{ + unsigned int i; + int err; + + for (i = 0; i < lif->nxqs; i++) { + err = ionic_lif_txq_init(lif, lif->txqcqs[i]); + if (err) + goto err_out; + + err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]); + if (err) { + ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); + goto err_out; + } + } + + if (lif->netdev->features & NETIF_F_RXHASH) + ionic_lif_rss_init(lif); + + ionic_lif_rx_mode(lif); + + return 0; + +err_out: + while (i--) { + ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); + ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); + } + + return err; +} + +static int ionic_txrx_enable(struct ionic_lif *lif) +{ + int derr = 0; + int i, err; + + for (i = 0; i < lif->nxqs; i++) { + if (!(lif->rxqcqs[i] && lif->txqcqs[i])) { + dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i); + err = -ENXIO; + goto err_out; + } + + ionic_rx_fill(&lif->rxqcqs[i]->q); + err = ionic_qcq_enable(lif->rxqcqs[i]); + if (err) + goto err_out; + + err = ionic_qcq_enable(lif->txqcqs[i]); + if (err) { + derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err); + goto err_out; + } + } + + if (lif->hwstamp_rxq) { + ionic_rx_fill(&lif->hwstamp_rxq->q); + err = ionic_qcq_enable(lif->hwstamp_rxq); + if (err) + goto err_out_hwstamp_rx; + } + + if (lif->hwstamp_txq) { + err = ionic_qcq_enable(lif->hwstamp_txq); + if (err) + goto err_out_hwstamp_tx; + } + + return 0; + +err_out_hwstamp_tx: + if (lif->hwstamp_rxq) + derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr); +err_out_hwstamp_rx: + i = lif->nxqs; +err_out: + while (i--) { + derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr); + derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr); + } + + return err; +} + +static int ionic_start_queues(struct ionic_lif *lif) +{ + int err; + + /* If we've noticed that the device is in a broken state, don't + * attempt to bring the queues back up. + */ + if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) + return -EIO; + + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + + if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) + return 0; + + err = ionic_txrx_enable(lif); + if (err) { + clear_bit(IONIC_LIF_F_UP, lif->state); + return err; + } + + netif_tx_wake_all_queues(lif->netdev); + + return 0; +} + +static int ionic_open(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int err; + + if (test_bit(IONIC_LIF_F_UP, lif->state)) { + dev_dbg(lif->ionic->dev, "%s: %s called when state=UP\n", + __func__, lif->name); + return 0; + } + + /* If recovering from a broken state, clear the bit and we'll try again */ + if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) + netdev_info(netdev, "clearing broken state\n"); + + mutex_lock(&lif->queue_lock); + + err = ionic_txrx_alloc(lif); + if (err) + goto err_unlock; + + err = ionic_txrx_init(lif); + if (err) + goto err_txrx_free; + + err = netif_set_real_num_tx_queues(netdev, lif->nxqs); + if (err) + goto err_txrx_deinit; + + err = netif_set_real_num_rx_queues(netdev, lif->nxqs); + if (err) + goto err_txrx_deinit; + + /* If hardware timestamping is enabled, but the queues were freed by + * ionic_stop, those need to be reallocated and initialized, too. + */ + ionic_lif_hwstamp_recreate_queues(lif); + + /* don't start the queues until we have link */ + if (netif_carrier_ok(netdev)) { + err = ionic_start_queues(lif); + if (err) + goto err_txrx_deinit; + } + + mutex_unlock(&lif->queue_lock); + return 0; + +err_txrx_deinit: + ionic_txrx_deinit(lif); +err_txrx_free: + ionic_txrx_free(lif); +err_unlock: + mutex_unlock(&lif->queue_lock); + return err; +} + +static void ionic_stop_queues(struct ionic_lif *lif) +{ + if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) + return; + + netif_tx_disable(lif->netdev); + ionic_txrx_disable(lif); +} + +static int ionic_stop(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return 0; + + mutex_lock(&lif->queue_lock); + ionic_stop_queues(lif); + ionic_txrx_deinit(lif); + ionic_txrx_free(lif); + mutex_unlock(&lif->queue_lock); + + return 0; +} + +static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + switch (cmd) { + case SIOCSHWTSTAMP: + return ionic_lif_hwstamp_set(lif, ifr); + case SIOCGHWTSTAMP: + return ionic_lif_hwstamp_get(lif, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int ionic_get_fw_vf_config(struct ionic *ionic, int vf, struct ionic_vf *vfdata) +{ + struct ionic_vf_getattr_comp comp = { 0 }; + int err; + u8 attr; + + attr = IONIC_VF_ATTR_VLAN; + err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp); + if (err && comp.status != IONIC_RC_ENOSUPP) + goto err_out; + if (!err) + vfdata->vlanid = comp.vlanid; + + attr = IONIC_VF_ATTR_SPOOFCHK; + err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp); + if (err && comp.status != IONIC_RC_ENOSUPP) + goto err_out; + if (!err) + vfdata->spoofchk = comp.spoofchk; + + attr = IONIC_VF_ATTR_LINKSTATE; + err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp); + if (err && comp.status != IONIC_RC_ENOSUPP) + goto err_out; + if (!err) { + switch (comp.linkstate) { + case IONIC_VF_LINK_STATUS_UP: + vfdata->linkstate = IFLA_VF_LINK_STATE_ENABLE; + break; + case IONIC_VF_LINK_STATUS_DOWN: + vfdata->linkstate = IFLA_VF_LINK_STATE_DISABLE; + break; + case IONIC_VF_LINK_STATUS_AUTO: + vfdata->linkstate = IFLA_VF_LINK_STATE_AUTO; + break; + default: + dev_warn(ionic->dev, "Unexpected link state %u\n", comp.linkstate); + break; + } + } + + attr = IONIC_VF_ATTR_RATE; + err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp); + if (err && comp.status != IONIC_RC_ENOSUPP) + goto err_out; + if (!err) + vfdata->maxrate = comp.maxrate; + + attr = IONIC_VF_ATTR_TRUST; + err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp); + if (err && comp.status != IONIC_RC_ENOSUPP) + goto err_out; + if (!err) + vfdata->trusted = comp.trust; + + attr = IONIC_VF_ATTR_MAC; + err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp); + if (err && comp.status != IONIC_RC_ENOSUPP) + goto err_out; + if (!err) + ether_addr_copy(vfdata->macaddr, comp.macaddr); + +err_out: + if (err) + dev_err(ionic->dev, "Failed to get %s for VF %d\n", + ionic_vf_attr_to_str(attr), vf); + + return err; +} + +static int ionic_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivf) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + struct ionic_vf vfdata = { 0 }; + int ret = 0; + + if (!netif_device_present(netdev)) + return -EBUSY; + + down_read(&ionic->vf_op_lock); + + if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { + ret = -EINVAL; + } else { + ivf->vf = vf; + ivf->qos = 0; + + ret = ionic_get_fw_vf_config(ionic, vf, &vfdata); + if (!ret) { + ivf->vlan = le16_to_cpu(vfdata.vlanid); + ivf->spoofchk = vfdata.spoofchk; + ivf->linkstate = vfdata.linkstate; + ivf->max_tx_rate = le32_to_cpu(vfdata.maxrate); + ivf->trusted = vfdata.trusted; + ether_addr_copy(ivf->mac, vfdata.macaddr); + } + } + + up_read(&ionic->vf_op_lock); + return ret; +} + +static int ionic_get_vf_stats(struct net_device *netdev, int vf, + struct ifla_vf_stats *vf_stats) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + struct ionic_lif_stats *vs; + int ret = 0; + + if (!netif_device_present(netdev)) + return -EBUSY; + + down_read(&ionic->vf_op_lock); + + if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { + ret = -EINVAL; + } else { + memset(vf_stats, 0, sizeof(*vf_stats)); + vs = &ionic->vfs[vf].stats; + + vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets); + vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets); + vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes); + vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes); + vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets); + vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets); +#if (KERNEL_VERSION(4, 16, 0) < LINUX_VERSION_CODE) + vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) + + le64_to_cpu(vs->rx_mcast_drop_packets) + + le64_to_cpu(vs->rx_bcast_drop_packets); + vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) + + le64_to_cpu(vs->tx_mcast_drop_packets) + + le64_to_cpu(vs->tx_bcast_drop_packets); +#endif + } + + up_read(&ionic->vf_op_lock); + return ret; +} + +static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC }; + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + int ret; + + if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) + return -EINVAL; + + if (!netif_device_present(netdev)) + return -EBUSY; + + down_write(&ionic->vf_op_lock); + + if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { + ret = -EINVAL; + } else { + ether_addr_copy(vfc.macaddr, mac); + dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n", + __func__, vf, vfc.macaddr); + + ret = ionic_set_vf_config(ionic, vf, &vfc); + if (!ret) + ether_addr_copy(ionic->vfs[vf].macaddr, mac); + } + + up_write(&ionic->vf_op_lock); + return ret; +} + +#if (RHEL_RELEASE_CODE == 0 || \ + defined(HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN) || \ + RHEL_RELEASE_VERSION(8, 0) < RHEL_RELEASE_CODE) + +#if (RHEL_RELEASE_CODE == 0 && KERNEL_VERSION(4, 9, 0) >= LINUX_VERSION_CODE) +static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) +#else +static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 proto) +#endif +{ + struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN }; + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + int ret; + + /* until someday when we support qos */ + if (qos) + return -EINVAL; + + if (vlan > 4095) + return -EINVAL; + + if (!netif_device_present(netdev)) + return -EBUSY; + + down_write(&ionic->vf_op_lock); + + if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { + ret = -EINVAL; + } else { + vfc.vlanid = cpu_to_le16(vlan); + dev_dbg(ionic->dev, "%s: vf %d vlan %d\n", + __func__, vf, le16_to_cpu(vfc.vlanid)); + + ret = ionic_set_vf_config(ionic, vf, &vfc); + if (!ret) + ionic->vfs[vf].vlanid = cpu_to_le16(vlan); + } + + up_write(&ionic->vf_op_lock); + return ret; +} +#endif + +static int ionic_set_vf_rate(struct net_device *netdev, int vf, + int tx_min, int tx_max) +{ + struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE }; + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + int ret; + + /* setting the min just seems silly */ + if (tx_min) + return -EINVAL; + + if (!netif_device_present(netdev)) + return -EBUSY; + + down_write(&ionic->vf_op_lock); + + if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { + ret = -EINVAL; + } else { + vfc.maxrate = cpu_to_le32(tx_max); + dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n", + __func__, vf, le32_to_cpu(vfc.maxrate)); + + ret = ionic_set_vf_config(ionic, vf, &vfc); + if (!ret) + ionic->vfs[vf].maxrate = cpu_to_le32(tx_max); + } + + up_write(&ionic->vf_op_lock); + return ret; +} + +static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set) +{ + struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK }; + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + u8 data = set; /* convert to u8 for config */ + int ret; + + if (!netif_device_present(netdev)) + return -EBUSY; + + down_write(&ionic->vf_op_lock); + + if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { + ret = -EINVAL; + } else { + vfc.spoofchk = set; + dev_dbg(ionic->dev, "%s: vf %d spoof %d\n", + __func__, vf, vfc.spoofchk); + + ret = ionic_set_vf_config(ionic, vf, &vfc); + if (!ret) + ionic->vfs[vf].spoofchk = data; + } + + up_write(&ionic->vf_op_lock); + return ret; +} + +#ifdef HAVE_NDO_SET_VF_TRUST +static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set) +{ + struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST }; + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + u8 data = set; /* convert to u8 for config */ + int ret; + + if (!netif_device_present(netdev)) + return -EBUSY; + + down_write(&ionic->vf_op_lock); + + if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { + ret = -EINVAL; + } else { + vfc.trust = set; + dev_dbg(ionic->dev, "%s: vf %d trust %d\n", + __func__, vf, vfc.trust); + + ret = ionic_set_vf_config(ionic, vf, &vfc); + if (!ret) + ionic->vfs[vf].trusted = data; + } + + up_write(&ionic->vf_op_lock); + return ret; +} +#endif + +static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) +{ + struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE }; + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + u8 data; + int ret; + + switch (set) { + case IFLA_VF_LINK_STATE_ENABLE: + data = IONIC_VF_LINK_STATUS_UP; + break; + case IFLA_VF_LINK_STATE_DISABLE: + data = IONIC_VF_LINK_STATUS_DOWN; + break; + case IFLA_VF_LINK_STATE_AUTO: + data = IONIC_VF_LINK_STATUS_AUTO; + break; + default: + return -EINVAL; + } + + if (!netif_device_present(netdev)) + return -EBUSY; + + down_write(&ionic->vf_op_lock); + + if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { + ret = -EINVAL; + } else { + vfc.linkstate = data; + dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n", + __func__, vf, vfc.linkstate); + + ret = ionic_set_vf_config(ionic, vf, &vfc); + if (!ret) + ionic->vfs[vf].linkstate = set; + } + + up_write(&ionic->vf_op_lock); + return ret; +} + +static void ionic_vf_attr_replay(struct ionic_lif *lif) +{ + struct ionic_vf_setattr_cmd vfc = { 0 }; + struct ionic *ionic = lif->ionic; + struct ionic_vf *v; + int i; + + if (!ionic->vfs) + return; + + down_read(&ionic->vf_op_lock); + + for (i = 0; i < ionic->num_vfs; i++) { + v = &ionic->vfs[i]; + + if (v->stats_pa) { + vfc.attr = IONIC_VF_ATTR_STATSADDR; + vfc.stats_pa = cpu_to_le64(v->stats_pa); + (void)ionic_set_vf_config(ionic, i, &vfc); + vfc.stats_pa = 0; + } + + if (!is_zero_ether_addr(v->macaddr)) { + vfc.attr = IONIC_VF_ATTR_MAC; + ether_addr_copy(vfc.macaddr, v->macaddr); + (void)ionic_set_vf_config(ionic, i, &vfc); + eth_zero_addr(vfc.macaddr); + } + + if (v->vlanid) { + vfc.attr = IONIC_VF_ATTR_VLAN; + vfc.vlanid = v->vlanid; + (void)ionic_set_vf_config(ionic, i, &vfc); + vfc.vlanid = 0; + } + + if (v->maxrate) { + vfc.attr = IONIC_VF_ATTR_RATE; + vfc.maxrate = v->maxrate; + (void)ionic_set_vf_config(ionic, i, &vfc); + vfc.maxrate = 0; + } + + if (v->spoofchk) { + vfc.attr = IONIC_VF_ATTR_SPOOFCHK; + vfc.spoofchk = v->spoofchk; + (void)ionic_set_vf_config(ionic, i, &vfc); + vfc.spoofchk = 0; + } + + if (v->trusted) { + vfc.attr = IONIC_VF_ATTR_TRUST; + vfc.trust = v->trusted; + (void)ionic_set_vf_config(ionic, i, &vfc); + vfc.trust = 0; + } + + if (v->linkstate) { + vfc.attr = IONIC_VF_ATTR_LINKSTATE; + vfc.linkstate = v->linkstate; + (void)ionic_set_vf_config(ionic, i, &vfc); + vfc.linkstate = 0; + } + } + + up_read(&ionic->vf_op_lock); + + ionic_vf_start(ionic, -1); +} + +static const struct net_device_ops ionic_netdev_ops = { + .ndo_open = ionic_open, + .ndo_stop = ionic_stop, + .ndo_eth_ioctl = ionic_eth_ioctl, + .ndo_start_xmit = ionic_start_xmit, + .ndo_get_stats64 = ionic_get_stats64, + .ndo_set_rx_mode = ionic_ndo_set_rx_mode, + .ndo_set_features = ionic_set_features, + .ndo_set_mac_address = ionic_set_mac_address, + .ndo_validate_addr = eth_validate_addr, +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + .extended.ndo_change_mtu = ionic_change_mtu, +#else + .ndo_change_mtu = ionic_change_mtu, +#endif + .ndo_tx_timeout = ionic_tx_timeout, + .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, + +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN + .extended.ndo_set_vf_vlan = ionic_set_vf_vlan, +#endif +#ifdef HAVE_NDO_SET_VF_TRUST + .extended.ndo_set_vf_trust = ionic_set_vf_trust, +#endif +#else +#if (RHEL_RELEASE_CODE == 0 || RHEL_RELEASE_VERSION(8, 0) < RHEL_RELEASE_CODE) + .ndo_set_vf_vlan = ionic_set_vf_vlan, +#endif + .ndo_set_vf_trust = ionic_set_vf_trust, +#endif + .ndo_set_vf_mac = ionic_set_vf_mac, + .ndo_set_vf_rate = ionic_set_vf_rate, + .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk, + .ndo_get_vf_config = ionic_get_vf_config, + .ndo_set_vf_link_state = ionic_set_vf_link_state, + .ndo_get_vf_stats = ionic_get_vf_stats, + +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT +/* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the + * function get_ndo_ext to retrieve offsets for extended fields from with the + * net_device_ops struct and ndo_size is checked to determine whether or not + * the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif +}; + +static const struct net_device_ops ionic_mnic_netdev_ops = { + .ndo_open = ionic_open, + .ndo_stop = ionic_stop, + .ndo_eth_ioctl = ionic_eth_ioctl, + .ndo_start_xmit = ionic_start_xmit, + .ndo_get_stats64 = ionic_get_stats64, + .ndo_set_rx_mode = ionic_ndo_set_rx_mode, + .ndo_set_features = ionic_set_features, + .ndo_set_mac_address = ionic_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = ionic_tx_timeout, + .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + .extended.ndo_change_mtu = ionic_change_mtu, +#else + .ndo_change_mtu = ionic_change_mtu, +#endif + +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT +/* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the + * function get_ndo_ext to retrieve offsets for extended fields from with the + * net_device_ops struct and ndo_size is checked to determine whether or not + * the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif +}; + +static int ionic_cmb_reconfig(struct ionic_lif *lif, + struct ionic_queue_params *qparam) +{ + struct ionic_queue_params start_qparams; + int err = 0; + + /* When changing CMB queue parameters, we're using limited + * on-device memory and don't have extra memory to use for + * duplicate allocations, so we free it all first then + * re-allocate with the new parameters. + */ + + /* Checkpoint for possible unwind */ + ionic_init_queue_params(lif, &start_qparams); + + /* Stop and free the queues */ + ionic_stop_queues_reconfig(lif); + ionic_txrx_free(lif); + + /* Set up new qparams */ + ionic_set_queue_params(lif, qparam); + + if (netif_running(lif->netdev)) { + /* Alloc and start the new configuration */ + err = ionic_txrx_alloc(lif); + if (err) { + dev_warn(lif->ionic->dev, + "CMB reconfig failed, restoring values: %d\n", err); + + /* Back out the changes */ + ionic_set_queue_params(lif, &start_qparams); + err = ionic_txrx_alloc(lif); + if (err) { + dev_err(lif->ionic->dev, + "CMB restore failed: %d\n", err); + goto errout; + } + } + + ionic_start_queues_reconfig(lif); + } else { + /* This was detached in ionic_stop_queues_reconfig() */ + netif_device_attach(lif->netdev); + } + +errout: + return err; +} + +static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) +{ + /* only swapping the queues, not the napi, flags, or other stuff */ + swap(a->q.features, b->q.features); + swap(a->q.num_descs, b->q.num_descs); + swap(a->q.desc_size, b->q.desc_size); + swap(a->q.base, b->q.base); + swap(a->q.base_pa, b->q.base_pa); + swap(a->q.info, b->q.info); + swap(a->q_base, b->q_base); + swap(a->q_base_pa, b->q_base_pa); + swap(a->q_size, b->q_size); + + swap(a->q.sg_desc_size, b->q.sg_desc_size); + swap(a->q.sg_base, b->q.sg_base); + swap(a->q.sg_base_pa, b->q.sg_base_pa); + swap(a->sg_base, b->sg_base); + swap(a->sg_base_pa, b->sg_base_pa); + swap(a->sg_size, b->sg_size); + + swap(a->cq.num_descs, b->cq.num_descs); + swap(a->cq.desc_size, b->cq.desc_size); + swap(a->cq.base, b->cq.base); + swap(a->cq.base_pa, b->cq.base_pa); + swap(a->cq.info, b->cq.info); + swap(a->cq_base, b->cq_base); + swap(a->cq_base_pa, b->cq_base_pa); + swap(a->cq_size, b->cq_size); + + ionic_debugfs_del_qcq(a); + ionic_debugfs_add_qcq(a->q.lif, a); +} + +int ionic_reconfigure_queues(struct ionic_lif *lif, + struct ionic_queue_params *qparam) +{ + unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; + struct ionic_qcq **tx_qcqs = NULL; + struct ionic_qcq **rx_qcqs = NULL; + unsigned int flags, i; + int err = 0; + + /* Are we changing q params while CMB is on */ + if (test_bit(IONIC_LIF_F_CMB_RINGS, lif->state) && qparam->cmb_enabled) + return ionic_cmb_reconfig(lif, qparam); + + /* allocate temporary qcq arrays to hold new queue structs */ + if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { + tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif, + sizeof(struct ionic_qcq *), GFP_KERNEL); + if (!tx_qcqs) { + err = -ENOMEM; + goto err_out; + } + } + if (qparam->nxqs != lif->nxqs || + qparam->nrxq_descs != lif->nrxq_descs || + qparam->rxq_features != lif->rxq_features) { + rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, + sizeof(struct ionic_qcq *), GFP_KERNEL); + if (!rx_qcqs) { + err = -ENOMEM; + goto err_out; + } + } + + /* allocate new desc_info and rings, but leave the interrupt setup + * until later so as to not mess with the still-running queues + */ + if (tx_qcqs) { + num_desc = qparam->ntxq_descs; + desc_sz = sizeof(struct ionic_txq_desc); + comp_sz = sizeof(struct ionic_txq_comp); + + if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && + lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == + sizeof(struct ionic_txq_sg_desc_v1)) + sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); + else + sg_desc_sz = sizeof(struct ionic_txq_sg_desc); + + for (i = 0; i < qparam->nxqs; i++) { + /* If missing, short placeholder qcq needed for swap */ + if (!lif->txqcqs[i]) { + flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, + 4, desc_sz, comp_sz, sg_desc_sz, + lif->kern_pid, &lif->txqcqs[i]); + } + + flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, + num_desc, desc_sz, comp_sz, sg_desc_sz, + lif->kern_pid, &tx_qcqs[i]); + if (err) + goto err_out; + } + } + + if (rx_qcqs) { + num_desc = qparam->nrxq_descs; + desc_sz = sizeof(struct ionic_rxq_desc); + comp_sz = sizeof(struct ionic_rxq_comp); + sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); + + if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC) + comp_sz *= 2; + + for (i = 0; i < qparam->nxqs; i++) { + /* If missing, short placeholder qcq needed for swap */ + if (!lif->rxqcqs[i]) { + flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, + 4, desc_sz, comp_sz, sg_desc_sz, + lif->kern_pid, &lif->rxqcqs[i]); + } + + flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, + num_desc, desc_sz, comp_sz, sg_desc_sz, + lif->kern_pid, &rx_qcqs[i]); + if (err) + goto err_out; + + rx_qcqs[i]->q.features = qparam->rxq_features; + } + } + + /* stop and clean the queues */ + ionic_stop_queues_reconfig(lif); + + if (qparam->nxqs != lif->nxqs) { + err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs); + if (err) + goto err_out_reinit_unlock; + err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs); + if (err) { + netif_set_real_num_tx_queues(lif->netdev, lif->nxqs); + goto err_out_reinit_unlock; + } + } + + /* swap new desc_info and rings, keeping existing interrupt config */ + if (tx_qcqs) { + lif->ntxq_descs = qparam->ntxq_descs; + for (i = 0; i < qparam->nxqs; i++) + ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]); + } + + if (rx_qcqs) { + lif->nrxq_descs = qparam->nrxq_descs; + for (i = 0; i < qparam->nxqs; i++) + ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]); + } + + /* if we need to change the interrupt layout, this is the time */ + if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) || + qparam->nxqs != lif->nxqs) { + if (qparam->intr_split) { + set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); + } else { + clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); + lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; + lif->tx_coalesce_hw = lif->rx_coalesce_hw; + } + + /* Clear existing interrupt assignments. We check for NULL here + * because we're checking the whole array for potential qcqs, not + * just those qcqs that have just been set up. + */ + for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) { + if (lif->txqcqs[i]) + ionic_qcq_intr_free(lif, lif->txqcqs[i]); + if (lif->rxqcqs[i]) + ionic_qcq_intr_free(lif, lif->rxqcqs[i]); + } + + /* re-assign the interrupts */ + for (i = 0; i < qparam->nxqs; i++) { + lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR; + err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]); + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + lif->rxqcqs[i]->intr.index, + lif->rx_coalesce_hw); + + if (qparam->intr_split) { + lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR; + err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]); + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + lif->txqcqs[i]->intr.index, + lif->tx_coalesce_hw); + if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) + lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; + } else { + lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; + ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]); + } + } + } + + /* now we can rework the debugfs mappings */ + if (tx_qcqs) { + for (i = 0; i < qparam->nxqs; i++) { + ionic_debugfs_del_qcq(lif->txqcqs[i]); + ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); + } + } + + if (rx_qcqs) { + for (i = 0; i < qparam->nxqs; i++) { + ionic_debugfs_del_qcq(lif->rxqcqs[i]); + ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); + } + } + + swap(lif->nxqs, qparam->nxqs); + swap(lif->rxq_features, qparam->rxq_features); + +err_out_reinit_unlock: + /* re-init the queues, but don't lose an error code */ + if (err) + ionic_start_queues_reconfig(lif); + else + err = ionic_start_queues_reconfig(lif); + +err_out: + /* free old allocs without cleaning intr */ + for (i = 0; i < qparam->nxqs; i++) { + if (tx_qcqs && tx_qcqs[i]) { + tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; + ionic_qcq_free(lif, tx_qcqs[i]); + devm_kfree(lif->ionic->dev, tx_qcqs[i]); + tx_qcqs[i] = NULL; + } + if (rx_qcqs && rx_qcqs[i]) { + rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; + ionic_qcq_free(lif, rx_qcqs[i]); + devm_kfree(lif->ionic->dev, rx_qcqs[i]); + rx_qcqs[i] = NULL; + } + } + + /* free q array */ + if (rx_qcqs) { + devm_kfree(lif->ionic->dev, rx_qcqs); + rx_qcqs = NULL; + } + if (tx_qcqs) { + devm_kfree(lif->ionic->dev, tx_qcqs); + tx_qcqs = NULL; + } + + /* clean the unused dma and info allocations when new set is smaller + * than the full array, but leave the qcq shells in place + */ + for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { + if (lif->txqcqs && lif->txqcqs[i]) { + lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; + ionic_qcq_free(lif, lif->txqcqs[i]); + } + + if (lif->rxqcqs && lif->rxqcqs[i]) { + lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; + ionic_qcq_free(lif, lif->rxqcqs[i]); + } + } + + if (err) + netdev_info(lif->netdev, "%s: failed %d\n", __func__, err); + + return err; +} + +int ionic_lif_alloc(struct ionic *ionic) +{ + struct device *dev = ionic->dev; + union ionic_lif_identity *lid; + struct net_device *netdev; + struct ionic_lif *lif; + u32 minfs, maxfs; + int tbl_sz; + int err; + + lid = kzalloc(sizeof(*lid), GFP_KERNEL); + if (!lid) + return -ENOMEM; + + netdev = ionic_alloc_netdev(ionic); + if (!netdev) { + dev_err(dev, "Cannot allocate netdev, aborting\n"); + err = -ENOMEM; + goto err_out_free_lid; + } + + SET_NETDEV_DEV(netdev, dev); + + lif = netdev_priv(netdev); + lif->netdev = netdev; + ionic->lif = lif; + + if (ionic->is_mgmt_nic || ionic->pfdev) + netdev->netdev_ops = &ionic_mnic_netdev_ops; + else + netdev->netdev_ops = &ionic_netdev_ops; + + ionic_ethtool_set_ops(netdev); + netdev->watchdog_timeo = 2 * HZ; + netif_carrier_off(netdev); + + lif->nrdma_eqs_avail = ionic->nrdma_eqs_per_lif; + lif->nrdma_eqs = ionic->nrdma_eqs_per_lif; + lif->nxqs = ionic->ntxqs_per_lif; + + lif->identity = lid; + lif->lif_type = IONIC_LIF_TYPE_CLASSIC; + ionic_lif_identify(ionic, lif->lif_type, lif->identity); + + lif->ionic = ionic; + lif->index = 0; + if (is_kdump_kernel()) { + lif->ntxq_descs = IONIC_MIN_TXRX_DESC; + lif->nrxq_descs = IONIC_MIN_TXRX_DESC; + } else { + lif->ntxq_descs = IONIC_DEF_TXRX_DESC; + lif->nrxq_descs = IONIC_DEF_TXRX_DESC; + } + + /* find mtu limits */ + minfs = __le32_to_cpu(lif->identity->eth.min_frame_size); + minfs = max_t(unsigned int, minfs, ETH_MIN_MTU); + maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN; +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + lif->netdev->extended->min_mtu = minfs; + lif->netdev->extended->max_mtu = maxfs; +#else + lif->netdev->min_mtu = minfs; + lif->netdev->max_mtu = maxfs; +#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */ +#endif /* HAVE_NETDEVICE_MIN_MAX_MTU */ + + /* Convert the default coalesce value to actual hw resolution */ + lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; + lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, + lif->rx_coalesce_usecs); + lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; + lif->tx_coalesce_hw = lif->rx_coalesce_hw; + set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state); + set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state); + + snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); + + mutex_init(&lif->queue_lock); + mutex_init(&lif->config_lock); + mutex_init(&lif->dbid_inuse_lock); + + spin_lock_init(&lif->adminq_lock); + + spin_lock_init(&lif->deferred.lock); + INIT_LIST_HEAD(&lif->deferred.list); + INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); + + /* allocate lif info */ + lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); + lif->info = dma_alloc_coherent(dev, lif->info_sz, + &lif->info_pa, GFP_KERNEL); + if (!lif->info) { + dev_err(dev, "Failed to allocate lif info, aborting\n"); + err = -ENOMEM; + goto err_out_free_mutex; + } + + ionic_debugfs_add_lif(lif); + + /* allocate control queues and txrx queue arrays */ + ionic_lif_queue_identify(lif); + err = ionic_qcqs_alloc(lif); + if (err) + goto err_out_free_lif_info; + + /* allocate rss indirection table */ + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); + lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; + lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, + &lif->rss_ind_tbl_pa, + GFP_KERNEL); + + if (!lif->rss_ind_tbl) { + err = -ENOMEM; + dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); + goto err_out_free_qcqs; + } + netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); + + ionic_lif_alloc_phc(lif); + + return 0; + +err_out_free_qcqs: + ionic_qcqs_free(lif); +err_out_free_lif_info: + dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); + lif->info = NULL; + lif->info_pa = 0; +err_out_free_mutex: + mutex_destroy(&lif->config_lock); + mutex_destroy(&lif->queue_lock); + mutex_destroy(&lif->dbid_inuse_lock); + free_netdev(lif->netdev); + lif = NULL; +err_out_free_lid: + kfree(lid); + + return err; +} + +static void ionic_lif_reset(struct ionic_lif *lif) +{ + struct ionic_dev *idev = &lif->ionic->idev; + + mutex_lock(&lif->ionic->dev_cmd_lock); + ionic_dev_cmd_lif_reset(idev, lif->index); + ionic_dev_cmd_wait(lif->ionic, devcmd_timeout); + mutex_unlock(&lif->ionic->dev_cmd_lock); +} + +static void ionic_lif_handle_fw_down(struct ionic_lif *lif) +{ + struct ionic *ionic = lif->ionic; + + if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return; + + dev_info(ionic->dev, "FW Down: Stopping LIFs\n"); + + /* put off the next watchdog if it has been set up */ + netif_device_detach(lif->netdev); + + mutex_lock(&lif->queue_lock); + if (test_bit(IONIC_LIF_F_UP, lif->state)) { + dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); + ionic_stop_queues(lif); + } + + if (netif_running(lif->netdev)) { + ionic_txrx_deinit(lif); + ionic_txrx_free(lif); + } + ionic_lif_deinit(lif); + ionic_reset(ionic); + ionic_qcqs_free(lif); + + mutex_unlock(&lif->queue_lock); + + clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state); + dev_info(ionic->dev, "FW Down: LIFs stopped\n"); +} + +static void ionic_lif_handle_fw_up(struct ionic_lif *lif) +{ + struct ionic *ionic = lif->ionic; + int err; + + if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return; + + dev_info(ionic->dev, "FW Up: restarting LIFs\n"); + + ionic_init_devinfo(ionic); + err = ionic_identify(ionic); + if (err) + goto err_out; + err = ionic_port_identify(ionic); + if (err) + goto err_out; + err = ionic_port_init(ionic); + if (err) + goto err_out; + + mutex_lock(&lif->queue_lock); + + if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) + dev_info(ionic->dev, "FW Up: clearing broken state\n"); + + err = ionic_qcqs_alloc(lif); + if (err) + goto err_unlock; + + err = ionic_lif_init(lif); + if (err) + goto err_qcqs_free; + + ionic_vf_attr_replay(lif); + + if (lif->registered) + ionic_lif_set_netdev_info(lif); + + ionic_rx_filter_replay(lif); + + if (netif_running(lif->netdev)) { + err = ionic_txrx_alloc(lif); + if (err) + goto err_lifs_deinit; + + err = ionic_txrx_init(lif); + if (err) + goto err_txrx_free; + } + + mutex_unlock(&lif->queue_lock); + + clear_bit(IONIC_LIF_F_FW_RESET, lif->state); + ionic_link_status_check_request(lif, CAN_SLEEP); + netif_device_attach(lif->netdev); + dev_info(ionic->dev, "FW Up: LIFs restarted\n"); + + /* restore the hardware timestamping queues */ + ionic_lif_hwstamp_replay(lif); + + return; + +err_txrx_free: + ionic_txrx_free(lif); +err_lifs_deinit: + ionic_lif_deinit(lif); +err_qcqs_free: + ionic_qcqs_free(lif); +err_unlock: + mutex_unlock(&lif->queue_lock); +err_out: + dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); +} + +static void ionic_lif_dbid_inuse_free(struct ionic_lif *lif) +{ + mutex_lock(&lif->dbid_inuse_lock); + bitmap_free(lif->dbid_inuse); + lif->dbid_inuse = NULL; + mutex_unlock(&lif->dbid_inuse_lock); +} + +void ionic_lif_free(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + + ionic_lif_free_phc(lif); + + /* free rss indirection table */ + dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, + lif->rss_ind_tbl_pa); + lif->rss_ind_tbl = NULL; + lif->rss_ind_tbl_pa = 0; + + /* free queues */ + ionic_qcqs_free(lif); + if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + ionic_lif_reset(lif); + + /* free lif info */ + kfree(lif->identity); + dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); + lif->info = NULL; + lif->info_pa = 0; + + /* unmap doorbell page */ + ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); + lif->kern_dbpage = NULL; + ionic_lif_dbid_inuse_free(lif); + + mutex_destroy(&lif->config_lock); + mutex_destroy(&lif->queue_lock); + mutex_destroy(&lif->dbid_inuse_lock); + + /* free netdev & lif */ + ionic_debugfs_del_lif(lif); + free_netdev(lif->netdev); +} + +void ionic_lif_deinit(struct ionic_lif *lif) +{ + if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state)) + return; + + if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { + cancel_work_sync(&lif->deferred.work); + cancel_work_sync(&lif->tx_timeout_work); + ionic_rx_filters_deinit(lif); + if (lif->netdev->features & NETIF_F_RXHASH) + ionic_lif_rss_deinit(lif); + } + + ionic_eqs_deinit(lif->ionic); + ionic_eqs_free(lif->ionic); + + napi_disable(&lif->adminqcq->napi); + ionic_lif_qcq_deinit(lif, lif->notifyqcq); + ionic_lif_qcq_deinit(lif, lif->adminqcq); + + ionic_lif_dbid_inuse_free(lif); + + ionic_lif_reset(lif); +} + +static int ionic_lif_adminq_init(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + struct ionic_q_init_comp comp; + struct ionic_dev *idev; + struct ionic_qcq *qcq; + struct ionic_queue *q; + int err; + + idev = &lif->ionic->idev; + qcq = lif->adminqcq; + q = &qcq->q; + + mutex_lock(&lif->ionic->dev_cmd_lock); + ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); + err = ionic_dev_cmd_wait(lif->ionic, devcmd_timeout); + ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); + mutex_unlock(&lif->ionic->dev_cmd_lock); + if (err) { + netdev_err(lif->netdev, "adminq init failed %d\n", err); + return err; + } + + q->hw_type = comp.hw_type; + q->hw_index = le32_to_cpu(comp.hw_index); + q->dbval = IONIC_DBELL_QID(q->hw_index); + + dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); + dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); + + q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE; + q->dbell_jiffies = jiffies; + + netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi); + + qcq->napi_qcq = qcq; + timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); + + napi_enable(&qcq->napi); + + if (qcq->flags & IONIC_QCQ_F_INTR) + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_CLEAR); + + qcq->flags |= IONIC_QCQ_F_INITED; + + return 0; +} + +static int ionic_lif_notifyq_init(struct ionic_lif *lif) +{ + struct ionic_qcq *qcq = lif->notifyqcq; + struct device *dev = lif->ionic->dev; + struct ionic_queue *q = &qcq->q; + int err; + + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.q_init = { + .opcode = IONIC_CMD_Q_INIT, + .lif_index = cpu_to_le16(lif->index), + .type = q->type, + .ver = lif->qtype_info[q->type].version, + .index = cpu_to_le32(q->index), + .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | + IONIC_QINIT_F_ENA), + .intr_index = cpu_to_le16(lif->adminqcq->intr.index), + .pid = cpu_to_le16(q->pid), + .ring_size = ilog2(q->num_descs), + .ring_base = cpu_to_le64(q->base_pa), + } + }; + + dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); + dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); + dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); + dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) { + netdev_err(lif->netdev, "notifyq init failed %d\n", err); + return err; + } + + lif->last_eid = 0; + q->hw_type = ctx.comp.q_init.hw_type; + q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); + q->dbval = IONIC_DBELL_QID(q->hw_index); + + dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); + dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); + + /* preset the callback info */ + q->info[0].cb_arg = lif; + + qcq->flags |= IONIC_QCQ_F_INITED; + + return 0; +} + +static int ionic_station_set(struct ionic_lif *lif) +{ + struct net_device *netdev = lif->netdev; + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_getattr = { + .opcode = IONIC_CMD_LIF_GETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_MAC, + }, + }; + u8 mac_address[ETH_ALEN]; + struct sockaddr addr; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + netdev_dbg(lif->netdev, "found initial MAC addr %pM\n", + ctx.comp.lif_getattr.mac); + ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac); + + if (is_zero_ether_addr(mac_address)) { + eth_hw_addr_random(netdev); + netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr); + ether_addr_copy(mac_address, netdev->dev_addr); + + err = ionic_program_mac(lif, mac_address); + if (err < 0) + return err; + + if (err > 0) { + netdev_dbg(netdev, "%s:SET/GET ATTR Mac is not same-due to old FW running\n", + __func__); + return 0; + } + } + + if (!is_zero_ether_addr(netdev->dev_addr)) { + /* If the netdev mac is non-zero and doesn't match the default + * device address, it was set by something earlier and we're + * likely here again after a fw-upgrade reset. We need to be + * sure the netdev mac is in our filter list. + */ + if (!ether_addr_equal(mac_address, + netdev->dev_addr)) + ionic_lif_addr_add(lif, netdev->dev_addr); + } else { + /* Update the netdev mac with the device's mac */ + ether_addr_copy(addr.sa_data, mac_address); + addr.sa_family = AF_INET; + err = eth_prepare_mac_addr_change(netdev, &addr); + if (err) { + netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n", + addr.sa_data, err); + return 0; + } + + eth_commit_mac_addr_change(netdev, &addr); + } + + netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", + netdev->dev_addr); + ionic_lif_addr_add(lif, netdev->dev_addr); + + return 0; +} + +int ionic_lif_init(struct ionic_lif *lif) +{ + struct ionic_dev *idev = &lif->ionic->idev; + struct device *dev = lif->ionic->dev; + struct ionic_lif_init_comp comp; + int dbpage_num; + int err; + + mutex_lock(&lif->ionic->dev_cmd_lock); + ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); + err = ionic_dev_cmd_wait(lif->ionic, devcmd_timeout); + ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); + mutex_unlock(&lif->ionic->dev_cmd_lock); + if (err) + return err; + + lif->hw_index = le16_to_cpu(comp.hw_index); + + /* now that we have the hw_index we can figure out our doorbell page */ + lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); + if (!lif->dbid_count) { + dev_err(dev, "No doorbell pages, aborting\n"); + return -EINVAL; + } + + mutex_lock(&lif->dbid_inuse_lock); + lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL); + if (!lif->dbid_inuse) { + dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); + mutex_unlock(&lif->dbid_inuse_lock); + return -ENOMEM; + } + + /* first doorbell id reserved for kernel (dbid aka pid == zero) */ + set_bit(0, lif->dbid_inuse); + mutex_unlock(&lif->dbid_inuse_lock); + lif->kern_pid = 0; + + dbpage_num = ionic_db_page_num(lif, lif->kern_pid); + lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); + if (!lif->kern_dbpage) { + dev_err(dev, "Cannot map dbpage, aborting\n"); + err = -ENOMEM; + goto err_out_free_dbid; + } + + if (lif->ionic->neth_eqs) { + err = ionic_eqs_alloc(lif->ionic); + if (err) { + dev_err(dev, "Cannot allocate EQs: %d\n", err); + lif->ionic->neth_eqs = 0; + } else { + err = ionic_eqs_init(lif->ionic); + if (err) { + dev_err(dev, "Cannot init EQs: %d\n", err); + ionic_eqs_free(lif->ionic); + lif->ionic->neth_eqs = 0; + } + } + } + + err = ionic_lif_adminq_init(lif); + if (err) + goto err_out_adminq_deinit; + + if (lif->ionic->nnqs_per_lif) { + err = ionic_lif_notifyq_init(lif); + if (err) + goto err_out_notifyq_deinit; + } + + err = ionic_init_nic_features(lif); + if (err) + goto err_out_notifyq_deinit; + + if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { + err = ionic_rx_filters_init(lif); + if (err) + goto err_out_notifyq_deinit; + } + + err = ionic_station_set(lif); + if (err) + goto err_out_notifyq_deinit; + + lif->rx_copybreak = rx_copybreak; + + set_bit(IONIC_LIF_F_INITED, lif->state); + + INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); + + return 0; + +err_out_notifyq_deinit: + napi_disable(&lif->adminqcq->napi); + ionic_lif_qcq_deinit(lif, lif->notifyqcq); +err_out_adminq_deinit: + ionic_lif_qcq_deinit(lif, lif->adminqcq); + ionic_eqs_deinit(lif->ionic); + ionic_eqs_free(lif->ionic); + ionic_lif_reset(lif); + ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); + lif->kern_dbpage = NULL; +err_out_free_dbid: + ionic_lif_dbid_inuse_free(lif); + + return err; +} + +static void ionic_lif_notify_work(struct work_struct *ws) +{ +} + +static void ionic_lif_set_netdev_info(struct ionic_lif *lif) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_NAME, + }, + }; + + strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name, + sizeof(ctx.cmd.lif_setattr.name)); + + ionic_adminq_post_wait(lif, &ctx); +} + +struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) +{ + if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) + return NULL; + + return netdev_priv(netdev); +} + +static int ionic_lif_notify(struct notifier_block *nb, + unsigned long event, void *info) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(info); + struct ionic *ionic = container_of(nb, struct ionic, nb); + struct ionic_lif *lif = ionic_netdev_lif(ndev); + + if (!lif || lif->ionic != ionic) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGENAME: + ionic_lif_set_netdev_info(lif); + break; + } + + return NOTIFY_DONE; +} + +int ionic_lif_register(struct ionic_lif *lif) +{ + int err; + + ionic_lif_register_phc(lif); + + INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work); + + lif->ionic->nb.notifier_call = ionic_lif_notify; + + err = register_netdevice_notifier(&lif->ionic->nb); + if (err) + lif->ionic->nb.notifier_call = NULL; + + /* only register LIF0 for now */ + err = register_netdev(lif->netdev); + if (err) { + dev_err(lif->ionic->dev, "Cannot register net device, aborting\n"); + ionic_lif_unregister_phc(lif); + return err; + } + + ionic_link_status_check_request(lif, CAN_SLEEP); + lif->registered = true; + ionic_lif_set_netdev_info(lif); + + return 0; +} + +void ionic_lif_unregister(struct ionic_lif *lif) +{ + if (lif->ionic->nb.notifier_call) { + unregister_netdevice_notifier(&lif->ionic->nb); + cancel_work_sync(&lif->ionic->nb_work); + lif->ionic->nb.notifier_call = NULL; + } + + if (lif->netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(lif->netdev); + + ionic_lif_unregister_phc(lif); + + lif->registered = false; +} + +static void ionic_lif_queue_identify(struct ionic_lif *lif) +{ + union ionic_q_identity __iomem *q_ident; + struct ionic *ionic = lif->ionic; + struct ionic_dev *idev; + int qtype; + int err; + + idev = &lif->ionic->idev; + q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data; + + for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) { + struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; + + /* filter out the ones we know about */ + switch (qtype) { + case IONIC_QTYPE_ADMINQ: + case IONIC_QTYPE_NOTIFYQ: + case IONIC_QTYPE_RXQ: + case IONIC_QTYPE_TXQ: + break; + default: + continue; + } + + memset(qti, 0, sizeof(*qti)); + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype, + ionic_qtype_versions[qtype]); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + if (!err) { + qti->version = ioread8(&q_ident->version); + qti->supported = ioread8(&q_ident->supported); + qti->features = readq(&q_ident->features); + qti->desc_sz = ioread16(&q_ident->desc_sz); + qti->comp_sz = ioread16(&q_ident->comp_sz); + qti->sg_desc_sz = ioread16(&q_ident->sg_desc_sz); + qti->max_sg_elems = ioread16(&q_ident->max_sg_elems); + qti->sg_desc_stride = ioread16(&q_ident->sg_desc_stride); + } + mutex_unlock(&ionic->dev_cmd_lock); + + if (err == -EINVAL) { + dev_err(ionic->dev, "qtype %d not supported\n", qtype); + continue; + } else if (err == -EIO) { + dev_err(ionic->dev, "q_ident failed, not supported on older FW\n"); + return; + } else if (err) { + dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n", + qtype, err); + return; + } + + dev_dbg(ionic->dev, " qtype[%d].version = %d\n", + qtype, qti->version); + dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n", + qtype, qti->supported); + dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n", + qtype, qti->features); + dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n", + qtype, qti->desc_sz); + dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n", + qtype, qti->comp_sz); + dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n", + qtype, qti->sg_desc_sz); + dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n", + qtype, qti->max_sg_elems); + dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", + qtype, qti->sg_desc_stride); + } + + /* Make sure that EQ support is disabled if not all the + * bits are in place. + * + * This is to support internal testing with intermediate FW + * versions, especially with testing FW upgrade, and shouldn't + * be needed in released versions. + */ + if ((lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_EQ) != + (lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_EQ)) { + dev_warn(ionic->dev, "EQ version bugfix\n"); + lif->qtype_info[IONIC_QTYPE_RXQ].features &= ~IONIC_QIDENT_F_EQ; + lif->qtype_info[IONIC_QTYPE_TXQ].features &= ~IONIC_QIDENT_F_EQ; + ionic->neth_eqs = 0; + } +} + +int ionic_lif_identify(struct ionic *ionic, u8 lif_type, + union ionic_lif_identity *lid) +{ + struct ionic_dev *idev = &ionic->idev; + size_t sz; + int err; + + sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) + return (err); + + dev_dbg(ionic->dev, "capabilities 0x%llx\n", + le64_to_cpu(lid->capabilities)); + + dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", + le32_to_cpu(lid->eth.max_ucast_filters)); + dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", + le32_to_cpu(lid->eth.max_mcast_filters)); + dev_dbg(ionic->dev, "eth.features 0x%llx\n", + le64_to_cpu(lid->eth.config.features)); + dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", + le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); + dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", + le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); + dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", + le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); + dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", + le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); + dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_EQ] %d\n", + le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_EQ])); + dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); + dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); + dev_dbg(ionic->dev, "eth.config.mtu %d\n", + le32_to_cpu(lid->eth.config.mtu)); + + return 0; +} + +int ionic_lif_size(struct ionic *ionic) +{ + struct ionic_identity *ident = &ionic->ident; + unsigned int nrdma_eqs_per_lif; + union ionic_lif_config *lc; + unsigned int ntxqs_per_lif; + unsigned int nrxqs_per_lif; + unsigned int nnqs_per_lif; + unsigned int dev_neth_eqs; + unsigned int dev_nintrs; + unsigned int min_intrs; + unsigned int nrdma_eqs; + unsigned int neth_eqs; + unsigned int nintrs; + unsigned int nxqs; + int err; + + /* retrieve basic values from FW */ + lc = &ident->lif.eth.config; + dev_nintrs = le32_to_cpu(ident->dev.nintrs); + + if (ionic->is_mgmt_nic) + dev_neth_eqs = 0; + else + dev_neth_eqs = le32_to_cpu(ident->dev.eq_count); + dev_neth_eqs = min_t(int, dev_neth_eqs, MAX_ETH_EQS); + + nrdma_eqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); + nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); + ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); + nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); + + /* limit values to play nice with kdump */ + if (is_kdump_kernel()) { + dev_nintrs = 2; + nnqs_per_lif = 0; + ntxqs_per_lif = 1; + nrxqs_per_lif = 1; + } + + /* Queue counts are driven by CPU count and interrupt availability. + * In the best case, we'd like to have an individual interrupt + * per CPU and one queuepair per interrupt. For systems with + * small CPU counts, or when we limit the queues-per-lif, this + * works out pretty easily. However, this can get out of hand and + * have the driver requesting hundreds of interrupt vectors if we + * allow lots of queues per lif and lots of RDMA queues. + * + * One way of managing this is that when the interrupt count gets + * out of hand we cut down on the number of things that need + * interrupts until we get down to what we can get from the OS. + * + * Another way of managing this is by using a smaller number of + * EventQueues on which we can multiplex interrupt events. + */ + + /* reserve last queue id for hardware timestamping */ + if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) { + if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) { + lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP); + } else { + ntxqs_per_lif -= 1; + nrxqs_per_lif -= 1; + } + } + + /* limit TxRx queuepairs and RDMA event queues to num cpu */ + nxqs = min(ntxqs_per_lif, nrxqs_per_lif); + nxqs = min(nxqs, num_online_cpus()); + nrdma_eqs = min(nrdma_eqs_per_lif, num_online_cpus()); + neth_eqs = min(dev_neth_eqs, num_online_cpus()); + + /* EventQueue interrupt usage: (if eq_count != 0) + * 1 aq intr + n EQs + m RDMA + * + * Default interrupt usage: + * lif0 has n TxRx queues and 1 Adminq + * (1 aq interrupt + n TxRx queue interrupts) + * + whatever's left is for RDMA queues + */ +try_again: + if (neth_eqs) + nintrs = 1 + neth_eqs + nrdma_eqs; + else + nintrs = 1 + nxqs + nrdma_eqs; + min_intrs = 2; /* adminq + 1 TxRx queue pair */ + + if (nintrs > dev_nintrs) + goto try_fewer; + + err = ionic_bus_alloc_irq_vectors(ionic, nintrs); + if (err == -ENOSPC) { + goto try_fewer; + } else if (err < 0) { + dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); + return err; + } else if (err != nintrs) { + ionic_bus_free_irq_vectors(ionic); + goto try_fewer; + } + + /* At this point we have the interrupts we need */ + ionic->nnqs_per_lif = nnqs_per_lif; + ionic->nrdma_eqs_per_lif = nrdma_eqs; + ionic->ntxqs_per_lif = nxqs; + ionic->nrxqs_per_lif = nxqs; + ionic->nintrs = nintrs; + ionic->nlifs = 1; + ionic->neth_eqs = neth_eqs; + + ionic_debugfs_add_sizes(ionic); + + return 0; + +try_fewer: + /* If we can't get enough interrupts, we start cutting + * back on the requirements and try again. + */ + /* Cut NotifyQ's per lif in half (but probably already at 1) */ + if (nnqs_per_lif > 1) { + nnqs_per_lif >>= 1; + goto try_again; + } + /* Cut RDMA EQs in half */ + if (nrdma_eqs > 1) { + nrdma_eqs >>= 1; + goto try_again; + } + /* Cut Eth EQs in half */ + if (neth_eqs > 1) { + neth_eqs >>= 1; + goto try_again; + } + /* Cut number of TxRx queuepairs */ + if (nxqs > 1) { + nxqs >>= 1; + goto try_again; + } + dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); + return -ENOSPC; +} + +void ionic_device_reset(struct ionic_lif *lif) +{ + struct ionic *ionic = lif->ionic; + int err; + + dev_info(ionic->dev, "Device reset starting\n"); + + mutex_lock(&lif->queue_lock); + ionic_stop_queues_reconfig(lif); + ionic_txrx_free(lif); + ionic_lif_deinit(lif); + ionic_reset(ionic); + ionic_qcqs_free(lif); + mutex_unlock(&lif->queue_lock); + + ionic_port_reset(ionic); + ionic_reset(ionic); + + ionic_init_devinfo(ionic); + err = ionic_identify(ionic); + if (err) + goto err_out; + err = ionic_port_identify(ionic); + if (err) + goto err_out; + err = ionic_port_init(ionic); + if (err) + goto err_out; + + mutex_lock(&lif->queue_lock); + + err = ionic_qcqs_alloc(lif); + if (err) + goto err_unlock; + + err = ionic_lif_init(lif); + if (err) + goto err_qcqs_free; + + ionic_lif_set_netdev_info(lif); + ionic_rx_filter_replay(lif); + + if (netif_running(lif->netdev)) { + err = ionic_txrx_alloc(lif); + if (err) + goto err_lifs_deinit; + + ionic_start_queues_reconfig(lif); + } + + mutex_unlock(&lif->queue_lock); + + netif_device_attach(lif->netdev); + + dev_info(ionic->dev, "Device reset done\n"); + return; + +err_lifs_deinit: + ionic_lif_deinit(lif); +err_qcqs_free: + ionic_qcqs_free(lif); +err_unlock: + mutex_unlock(&lif->queue_lock); +err_out: + return; +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_lif.h b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_lif.h new file mode 100644 index 0000000000..8d261bee25 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_lif.h @@ -0,0 +1,498 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#ifndef _IONIC_LIF_H_ +#define _IONIC_LIF_H_ + +#include +#include + +#ifdef CONFIG_DIMLIB +#include +#else +#include "dim.h" +#endif + +#include "ionic_rx_filter.h" + +#define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */ +#define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */ + +#ifdef IONIC_DEBUG_STATS +#define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1) +#define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1) +#endif + +#define ADD_ADDR true +#define DEL_ADDR false +#define CAN_SLEEP true +#define CAN_NOT_SLEEP false + +/* Tunables */ +#define IONIC_RX_COPYBREAK_DEFAULT 256 +#define IONIC_TX_BUDGET_DEFAULT 256 + +struct ionic_tx_stats { + u64 pkts; + u64 bytes; + u64 csum_none; + u64 csum; + u64 tso; + u64 tso_bytes; + u64 frags; + u64 vlan_inserted; + u64 clean; + u64 linearize; + u64 crc32_csum; +#ifdef IONIC_DEBUG_STATS + u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR]; +#endif + u64 dma_map_err; + u64 hwstamp_valid; + u64 hwstamp_invalid; +}; + +struct ionic_rx_stats { + u64 pkts; + u64 bytes; + u64 csum_none; + u64 csum_complete; +#ifdef IONIC_DEBUG_STATS + u64 buffers_posted; +#endif + u64 dropped; + u64 vlan_stripped; + u64 csum_error; + u64 dma_map_err; + u64 alloc_err; + u64 hwstamp_valid; + u64 hwstamp_invalid; + u64 cache_full; + u64 cache_empty; + u64 cache_busy; + u64 cache_get; + u64 cache_put; + u64 buf_reused; + u64 buf_exhausted; + u64 buf_not_reusable; +}; + +#define IONIC_QCQ_F_INITED BIT(0) +#define IONIC_QCQ_F_SG BIT(1) +#define IONIC_QCQ_F_INTR BIT(2) +#define IONIC_QCQ_F_TX_STATS BIT(3) +#define IONIC_QCQ_F_RX_STATS BIT(4) +#define IONIC_QCQ_F_NOTIFYQ BIT(5) +#define IONIC_QCQ_F_CMB_RINGS BIT(6) + +#ifdef IONIC_DEBUG_STATS +struct ionic_napi_stats { + u64 poll_count; + u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR]; +}; +#endif + +struct ionic_qcq { + void *q_base; + dma_addr_t q_base_pa; /* might not be page aligned */ + u32 q_size; + u32 cq_size; + void *cq_base; + dma_addr_t cq_base_pa; /* might not be page aligned */ + void *sg_base; + dma_addr_t sg_base_pa; /* might not be page aligned */ + u32 sg_size; + void __iomem *cmb_q_base; + phys_addr_t cmb_q_base_pa; + u32 cmb_q_size; + u32 cmb_pgid; + u32 cmb_order; + bool armed; + struct dim dim; + struct ionic_queue q; + struct ionic_cq cq; + struct ionic_intr_info intr; + struct timer_list napi_deadline; + struct napi_struct napi; +#ifdef IONIC_DEBUG_STATS + struct ionic_napi_stats napi_stats; +#endif + unsigned int flags; + struct ionic_qcq *napi_qcq; + struct dentry *dentry; +}; + +#define q_to_qcq(q) container_of(q, struct ionic_qcq, q) +#define q_to_tx_stats(q) (&(q)->lif->txqstats[(q)->index]) +#define q_to_rx_stats(q) (&(q)->lif->rxqstats[(q)->index]) +#define napi_to_qcq(napi) container_of(napi, struct ionic_qcq, napi) +#define napi_to_cq(napi) (&napi_to_qcq(napi)->cq) + +enum ionic_deferred_work_type { + IONIC_DW_TYPE_RX_MODE, + IONIC_DW_TYPE_LINK_STATUS, + IONIC_DW_TYPE_LIF_RESET, +}; + +struct ionic_deferred_work { + struct list_head list; + enum ionic_deferred_work_type type; + union { + u8 addr[ETH_ALEN]; + u8 fw_status; + }; +}; + +struct ionic_deferred { + spinlock_t lock; /* lock for deferred work list */ + struct list_head list; + struct work_struct work; +}; + +struct ionic_lif_sw_stats { + u64 tx_packets; + u64 tx_bytes; + u64 rx_packets; + u64 rx_bytes; + u64 tx_tso; + u64 tx_tso_bytes; + u64 tx_csum_none; + u64 tx_csum; + u64 rx_csum_none; + u64 rx_csum_complete; + u64 rx_csum_error; + u64 tx_hwstamp_valid; + u64 tx_hwstamp_invalid; + u64 rx_hwstamp_valid; + u64 rx_hwstamp_invalid; + u64 hw_tx_dropped; + u64 hw_rx_dropped; + u64 hw_rx_over_errors; + u64 hw_rx_missed_errors; + u64 hw_tx_aborted_errors; +}; + +enum ionic_lif_state_flags { + IONIC_LIF_F_INITED, + IONIC_LIF_F_SW_DEBUG_STATS, + IONIC_LIF_F_UP, + IONIC_LIF_F_LINK_CHECK_REQUESTED, + IONIC_LIF_F_FILTER_SYNC_NEEDED, + IONIC_LIF_F_FW_RESET, + IONIC_LIF_F_FW_STOPPING, + IONIC_LIF_F_RDMA_SNIFFER, + IONIC_LIF_F_SPLIT_INTR, + IONIC_LIF_F_BROKEN, + IONIC_LIF_F_TX_DIM_INTR, + IONIC_LIF_F_RX_DIM_INTR, + IONIC_LIF_F_CMB_RINGS, + + /* leave this as last */ + IONIC_LIF_F_STATE_SIZE +}; + +struct ionic_lif_cfg { + int index; + enum ionic_api_prsn prsn; + + void *priv; + void (*reset_cb)(void *priv); +}; + +struct ionic_qtype_info { + u8 version; + u8 supported; + u64 features; + u16 desc_sz; + u16 comp_sz; + u16 sg_desc_sz; + u16 max_sg_elems; + u16 sg_desc_stride; +}; + +struct ionic_phc; + +#define IONIC_LIF_NAME_MAX_SZ 32 +struct ionic_lif { + struct net_device *netdev; + DECLARE_BITMAP(state, IONIC_LIF_F_STATE_SIZE); + struct ionic *ionic; + u64 __iomem *kern_dbpage; + u32 rx_copybreak; + unsigned int nxqs; + + struct ionic_qcq **txqcqs; + struct ionic_tx_stats *txqstats; + struct ionic_qcq **rxqcqs; + struct ionic_rx_stats *rxqstats; + struct ionic_qcq *hwstamp_txq; + struct ionic_qcq *hwstamp_rxq; + + struct ionic_qcq *adminqcq; + struct ionic_qcq *notifyqcq; + struct mutex queue_lock; /* lock for queue structures */ + struct mutex config_lock; /* lock for config actions */ + spinlock_t adminq_lock; /* lock for AdminQ operations */ + unsigned int kern_pid; + + struct work_struct tx_timeout_work; + struct ionic_deferred deferred; + + u64 last_eid; + unsigned int nrdma_eqs; + unsigned int nrdma_eqs_avail; + unsigned int ntxq_descs; + unsigned int nrxq_descs; + u64 rxq_features; + u16 rx_mode; + bool registered; + u64 hw_features; + unsigned int index; + unsigned int hw_index; + + u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE]; + u8 *rss_ind_tbl; + dma_addr_t rss_ind_tbl_pa; + u32 rss_ind_tbl_sz; + u16 rss_types; + + u16 lif_type; + unsigned int nmcast; + unsigned int nucast; + unsigned int nvlans; + unsigned int max_vlans; + char name[IONIC_LIF_NAME_MAX_SZ]; + + struct ionic_lif_info *info; + dma_addr_t info_pa; + u32 info_sz; + + unsigned int dbid_count; + struct mutex dbid_inuse_lock; /* lock the dbid bit list */ + unsigned long *dbid_inuse; + + union ionic_lif_identity *identity; + struct ionic_qtype_info qtype_info[IONIC_QTYPE_MAX]; + + struct ionic_rx_filters rx_filters; + u32 rx_coalesce_usecs; /* what the user asked for */ + u32 rx_coalesce_hw; /* what the hw is using */ + u32 tx_coalesce_usecs; /* what the user asked for */ + u32 tx_coalesce_hw; /* what the hw is using */ + + struct ionic_phc *phc; + + /* TODO: Make this a list if more than one child is supported */ + struct ionic_lif_cfg child_lif_cfg; + + u64 n_txrx_alloc; + + struct dentry *dentry; +}; + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +struct ionic_phc { + spinlock_t lock; /* lock for cc and tc */ + struct cyclecounter cc; + struct timecounter tc; + + struct mutex config_lock; /* lock for ts_config */ + struct hwtstamp_config ts_config; + u64 ts_config_rx_filt; + u32 ts_config_tx_mode; + + u32 init_cc_mult; + long aux_work_delay; + + struct ptp_clock_info ptp_info; + struct ptp_clock *ptp; + struct ionic_lif *lif; +#ifndef HAVE_PTP_CLOCK_DO_AUX_WORK + struct delayed_work dwork; +#endif +}; +#endif + +struct ionic_queue_params { + unsigned int nxqs; + unsigned int ntxq_descs; + unsigned int nrxq_descs; + u64 rxq_features; + bool intr_split; + bool cmb_enabled; +}; + +static inline void ionic_init_queue_params(struct ionic_lif *lif, + struct ionic_queue_params *qparam) +{ + qparam->nxqs = lif->nxqs; + qparam->ntxq_descs = lif->ntxq_descs; + qparam->nrxq_descs = lif->nrxq_descs; + qparam->rxq_features = lif->rxq_features; + qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); + qparam->cmb_enabled = test_bit(IONIC_LIF_F_CMB_RINGS, lif->state); +} + +static inline void ionic_set_queue_params(struct ionic_lif *lif, + struct ionic_queue_params *qparam) +{ + lif->nxqs = qparam->nxqs; + lif->ntxq_descs = qparam->ntxq_descs; + lif->nrxq_descs = qparam->nrxq_descs; + lif->rxq_features = qparam->rxq_features; + + if (qparam->intr_split) + set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); + else + clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); + + if (qparam->cmb_enabled) + set_bit(IONIC_LIF_F_CMB_RINGS, lif->state); + else + clear_bit(IONIC_LIF_F_CMB_RINGS, lif->state); +} + +static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs) +{ + u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult); + u32 div = le32_to_cpu(ionic->ident.dev.intr_coal_div); + + /* Div-by-zero should never be an issue, but check anyway */ + if (!div || !mult) + return 0; + + /* Round up in case usecs is close to the next hw unit */ + usecs += (div / mult) >> 1; + + /* Convert from usecs to device units */ + return (usecs * mult) / div; +} + +static inline bool ionic_is_pf(struct ionic *ionic) +{ + return ionic->pdev && + ionic->pdev->device == PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF; +} + +static inline bool ionic_use_eqs(struct ionic_lif *lif) +{ + return lif->ionic->neth_eqs && + lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_EQ; +} + +void ionic_lif_deferred_enqueue(struct ionic_deferred *def, + struct ionic_deferred_work *work); +void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep); +#ifdef HAVE_VOID_NDO_GET_STATS64 +void ionic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *ns); +#else +struct rtnl_link_stats64 *ionic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *ns); +#endif +int ionic_lif_register(struct ionic_lif *lif); +void ionic_lif_unregister(struct ionic_lif *lif); +int ionic_lif_identify(struct ionic *ionic, u8 lif_type, + union ionic_lif_identity *lif_ident); +int ionic_lif_size(struct ionic *ionic); + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +void ionic_lif_hwstamp_replay(struct ionic_lif *lif); +void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif); +int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr); +int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr); +ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter); +void ionic_lif_register_phc(struct ionic_lif *lif); +void ionic_lif_unregister_phc(struct ionic_lif *lif); +void ionic_lif_alloc_phc(struct ionic_lif *lif); +void ionic_lif_free_phc(struct ionic_lif *lif); +#else +static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {} +static inline void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif) {} + +static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} + +static inline int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} + +static inline ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter) +{ + return ns_to_ktime(0); +} + +static inline void ionic_lif_register_phc(struct ionic_lif *lif) {} +static inline void ionic_lif_unregister_phc(struct ionic_lif *lif) {} +static inline void ionic_lif_alloc_phc(struct ionic_lif *lif) {} +static inline void ionic_lif_free_phc(struct ionic_lif *lif) {} +#endif + +int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif); +int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif); +int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all); +int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode); +int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class); + +int ionic_lif_rss_config(struct ionic_lif *lif, u16 types, + const u8 *key, const u32 *indir); + +int ionic_intr_alloc(struct ionic *ionic, struct ionic_intr_info *intr); +void ionic_intr_free(struct ionic *ionic, int index); +void ionic_lif_rx_mode(struct ionic_lif *lif); +int ionic_reconfigure_queues(struct ionic_lif *lif, + struct ionic_queue_params *qparam); +int ionic_lif_alloc(struct ionic *ionic); +int ionic_lif_init(struct ionic_lif *lif); +void ionic_lif_free(struct ionic_lif *lif); +void ionic_lif_deinit(struct ionic_lif *lif); + +int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); +int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); + +struct ionic_lif *ionic_netdev_lif(struct net_device *netdev); +void ionic_device_reset(struct ionic_lif *lif); + +#ifdef IONIC_DEBUG_STATS +static inline void debug_stats_txq_post(struct ionic_queue *q, bool dbell) +{ + struct ionic_txq_desc *desc = &q->txq[q->head_idx]; + u8 num_sg_elems; + + q->dbell_count += dbell; + + num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT) + & IONIC_TXQ_DESC_NSGE_MASK); + if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1)) + num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1; + + q->lif->txqstats[q->index].sg_cntr[num_sg_elems]++; +} + +static inline void debug_stats_napi_poll(struct ionic_qcq *qcq, + unsigned int work_done) +{ + qcq->napi_stats.poll_count++; + + if (work_done > (IONIC_MAX_NUM_NAPI_CNTR - 1)) + work_done = IONIC_MAX_NUM_NAPI_CNTR - 1; + + qcq->napi_stats.work_done_cntr[work_done]++; +} + +#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++) +#define DEBUG_STATS_RX_BUFF_CNT(q) ((q)->lif->rxqstats[q->index].buffers_posted++) +#define DEBUG_STATS_TXQ_POST(q, dbell) debug_stats_txq_post(q, dbell) +#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \ + debug_stats_napi_poll(qcq, work_done) +#else +#define DEBUG_STATS_CQE_CNT(cq) +#define DEBUG_STATS_RX_BUFF_CNT(q) +#define DEBUG_STATS_TXQ_POST(q, dbell) +#define DEBUG_STATS_NAPI_POLL(qcq, work_done) +#endif + +#endif /* _IONIC_LIF_H_ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_main.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_main.c new file mode 100644 index 0000000000..f3da81e6bc --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_main.c @@ -0,0 +1,831 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#include +#include +#include +#include +#include +#include + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_debugfs.h" + +bool port_init_up = 1; +module_param(port_init_up, bool, 0); +MODULE_PARM_DESC(max_slaves, "Set port to ADMIN_UP on init (default 1, 0 to disable)"); + +MODULE_DESCRIPTION(IONIC_DRV_DESCRIPTION); +MODULE_AUTHOR("Pensando Systems, Inc"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(IONIC_DRV_VERSION); +MODULE_INFO(supported, "external"); + +unsigned int rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; +module_param(rx_copybreak, uint, 0600); +MODULE_PARM_DESC(rx_copybreak, "Maximum size of packet that is copied to a bounce buffer on RX"); + +unsigned int rx_fill_threshold = IONIC_RX_FILL_THRESHOLD; +module_param(rx_fill_threshold, uint, 0600); +MODULE_PARM_DESC(rx_fill_threshold, "Minimum number of buffers to fill"); + +unsigned int tx_budget = IONIC_TX_BUDGET_DEFAULT; +module_param(tx_budget, uint, 0600); +MODULE_PARM_DESC(tx_budget, "Number of tx completions to process per NAPI poll"); + +unsigned int devcmd_timeout = DEVCMD_TIMEOUT; +module_param(devcmd_timeout, uint, 0600); +MODULE_PARM_DESC(devcmd_timeout, "Devcmd timeout in seconds (default 30 secs)"); + +unsigned long affinity_mask_override; +module_param(affinity_mask_override, ulong, 0600); +MODULE_PARM_DESC(affinity_mask_override, "IRQ affinity mask to override (max 64 bits)"); + +static const char *ionic_error_to_str(enum ionic_status_code code) +{ + switch (code) { + case IONIC_RC_SUCCESS: + return "IONIC_RC_SUCCESS"; + case IONIC_RC_EVERSION: + return "IONIC_RC_EVERSION"; + case IONIC_RC_EOPCODE: + return "IONIC_RC_EOPCODE"; + case IONIC_RC_EIO: + return "IONIC_RC_EIO"; + case IONIC_RC_EPERM: + return "IONIC_RC_EPERM"; + case IONIC_RC_EQID: + return "IONIC_RC_EQID"; + case IONIC_RC_EQTYPE: + return "IONIC_RC_EQTYPE"; + case IONIC_RC_ENOENT: + return "IONIC_RC_ENOENT"; + case IONIC_RC_EINTR: + return "IONIC_RC_EINTR"; + case IONIC_RC_EAGAIN: + return "IONIC_RC_EAGAIN"; + case IONIC_RC_ENOMEM: + return "IONIC_RC_ENOMEM"; + case IONIC_RC_EFAULT: + return "IONIC_RC_EFAULT"; + case IONIC_RC_EBUSY: + return "IONIC_RC_EBUSY"; + case IONIC_RC_EEXIST: + return "IONIC_RC_EEXIST"; + case IONIC_RC_EINVAL: + return "IONIC_RC_EINVAL"; + case IONIC_RC_ENOSPC: + return "IONIC_RC_ENOSPC"; + case IONIC_RC_ERANGE: + return "IONIC_RC_ERANGE"; + case IONIC_RC_BAD_ADDR: + return "IONIC_RC_BAD_ADDR"; + case IONIC_RC_DEV_CMD: + return "IONIC_RC_DEV_CMD"; + case IONIC_RC_ENOSUPP: + return "IONIC_RC_ENOSUPP"; + case IONIC_RC_ERROR: + return "IONIC_RC_ERROR"; + case IONIC_RC_ERDMA: + return "IONIC_RC_ERDMA"; + case IONIC_RC_BAD_FW: + return "IONIC_RC_BAD_FW"; + default: + return "IONIC_RC_UNKNOWN"; + } +} + +int ionic_error_to_errno(enum ionic_status_code code) +{ + switch (code) { + case IONIC_RC_SUCCESS: + return 0; + case IONIC_RC_EVERSION: + case IONIC_RC_EQTYPE: + case IONIC_RC_EQID: + case IONIC_RC_EINVAL: + case IONIC_RC_ENOSUPP: + return -EINVAL; + case IONIC_RC_EPERM: + return -EPERM; + case IONIC_RC_ENOENT: + return -ENOENT; + case IONIC_RC_EAGAIN: + return -EAGAIN; + case IONIC_RC_ENOMEM: + return -ENOMEM; + case IONIC_RC_EFAULT: + return -EFAULT; + case IONIC_RC_EBUSY: + return -EBUSY; + case IONIC_RC_EEXIST: + return -EEXIST; + case IONIC_RC_ENOSPC: + return -ENOSPC; + case IONIC_RC_ERANGE: + return -ERANGE; + case IONIC_RC_BAD_ADDR: + return -EFAULT; + case IONIC_RC_BAD_FW: + return -ENOEXEC; + case IONIC_RC_EOPCODE: + case IONIC_RC_EINTR: + case IONIC_RC_DEV_CMD: + case IONIC_RC_ERROR: + case IONIC_RC_ERDMA: + case IONIC_RC_EIO: + default: + return -EIO; + } +} +EXPORT_SYMBOL_GPL(ionic_error_to_errno); + +static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode) +{ + switch (opcode) { + case IONIC_CMD_NOP: + return "IONIC_CMD_NOP"; + case IONIC_CMD_INIT: + return "IONIC_CMD_INIT"; + case IONIC_CMD_RESET: + return "IONIC_CMD_RESET"; + case IONIC_CMD_IDENTIFY: + return "IONIC_CMD_IDENTIFY"; + case IONIC_CMD_GETATTR: + return "IONIC_CMD_GETATTR"; + case IONIC_CMD_SETATTR: + return "IONIC_CMD_SETATTR"; + case IONIC_CMD_PORT_IDENTIFY: + return "IONIC_CMD_PORT_IDENTIFY"; + case IONIC_CMD_PORT_INIT: + return "IONIC_CMD_PORT_INIT"; + case IONIC_CMD_PORT_RESET: + return "IONIC_CMD_PORT_RESET"; + case IONIC_CMD_PORT_GETATTR: + return "IONIC_CMD_PORT_GETATTR"; + case IONIC_CMD_PORT_SETATTR: + return "IONIC_CMD_PORT_SETATTR"; + case IONIC_CMD_LIF_INIT: + return "IONIC_CMD_LIF_INIT"; + case IONIC_CMD_LIF_RESET: + return "IONIC_CMD_LIF_RESET"; + case IONIC_CMD_LIF_IDENTIFY: + return "IONIC_CMD_LIF_IDENTIFY"; + case IONIC_CMD_LIF_SETATTR: + return "IONIC_CMD_LIF_SETATTR"; + case IONIC_CMD_LIF_GETATTR: + return "IONIC_CMD_LIF_GETATTR"; + case IONIC_CMD_LIF_SETPHC: + return "IONIC_CMD_LIF_SETPHC"; + case IONIC_CMD_RX_MODE_SET: + return "IONIC_CMD_RX_MODE_SET"; + case IONIC_CMD_RX_FILTER_ADD: + return "IONIC_CMD_RX_FILTER_ADD"; + case IONIC_CMD_RX_FILTER_DEL: + return "IONIC_CMD_RX_FILTER_DEL"; + case IONIC_CMD_Q_IDENTIFY: + return "IONIC_CMD_Q_IDENTIFY"; + case IONIC_CMD_Q_INIT: + return "IONIC_CMD_Q_INIT"; + case IONIC_CMD_Q_CONTROL: + return "IONIC_CMD_Q_CONTROL"; + case IONIC_CMD_RDMA_RESET_LIF: + return "IONIC_CMD_RDMA_RESET_LIF"; + case IONIC_CMD_RDMA_CREATE_EQ: + return "IONIC_CMD_RDMA_CREATE_EQ"; + case IONIC_CMD_RDMA_CREATE_CQ: + return "IONIC_CMD_RDMA_CREATE_CQ"; + case IONIC_CMD_RDMA_CREATE_ADMINQ: + return "IONIC_CMD_RDMA_CREATE_ADMINQ"; + case IONIC_CMD_FW_DOWNLOAD: + return "IONIC_CMD_FW_DOWNLOAD"; + case IONIC_CMD_FW_CONTROL: + return "IONIC_CMD_FW_CONTROL"; + case IONIC_CMD_FW_DOWNLOAD_V1: + return "IONIC_CMD_FW_DOWNLOAD_V1"; + case IONIC_CMD_FW_CONTROL_V1: + return "IONIC_CMD_FW_CONTROL_V1"; + case IONIC_CMD_VF_GETATTR: + return "IONIC_CMD_VF_GETATTR"; + case IONIC_CMD_VF_SETATTR: + return "IONIC_CMD_VF_SETATTR"; + default: + return "DEVCMD_UNKNOWN"; + } +} + +const char *ionic_vf_attr_to_str(enum ionic_vf_attr attr) +{ + switch (attr) { + case IONIC_VF_ATTR_SPOOFCHK: + return "IONIC_VF_ATTR_SPOOFCHK"; + case IONIC_VF_ATTR_TRUST: + return "IONIC_VF_ATTR_TRUST"; + case IONIC_VF_ATTR_LINKSTATE: + return "IONIC_VF_ATTR_LINKSTATE"; + case IONIC_VF_ATTR_MAC: + return "IONIC_VF_ATTR_MAC"; + case IONIC_VF_ATTR_VLAN: + return "IONIC_VF_ATTR_VLAN"; + case IONIC_VF_ATTR_RATE: + return "IONIC_VF_ATTR_RATE"; + case IONIC_VF_ATTR_STATSADDR: + return "IONIC_VF_ATTR_STATSADDR"; + default: + return "IONIC_VF_ATTR_UNKNOWN"; + } +} + +static void ionic_adminq_flush(struct ionic_lif *lif) +{ + struct ionic_desc_info *desc_info; + unsigned long irqflags; + struct ionic_queue *q; + + spin_lock_irqsave(&lif->adminq_lock, irqflags); + if (!lif->adminqcq) { + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); + return; + } + + q = &lif->adminqcq->q; + + while (q->tail_idx != q->head_idx) { + desc_info = &q->info[q->tail_idx]; + memset(desc_info->desc, 0, sizeof(union ionic_adminq_cmd)); + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + } + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); +} + +void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode, + u8 status, int err) +{ + const char *stat_str; + + stat_str = (err == -ETIMEDOUT) ? "TIMEOUT" : + ionic_error_to_str(status); + + netdev_err(lif->netdev, "%s (%d) failed: %s (%d)\n", + ionic_opcode_to_str(opcode), opcode, stat_str, err); +} + +static int ionic_adminq_check_err(struct ionic_lif *lif, + struct ionic_admin_ctx *ctx, + const bool timeout, + const bool do_msg) +{ + int err = 0; + + if (ctx->comp.comp.status || timeout) { + err = timeout ? -ETIMEDOUT : + ionic_error_to_errno(ctx->comp.comp.status); + + if (do_msg) + ionic_adminq_netdev_err_print(lif, ctx->cmd.cmd.opcode, + ctx->comp.comp.status, err); + + if (timeout) + ionic_adminq_flush(lif); + } + + return err; +} + +static void ionic_adminq_cb(struct ionic_queue *q, + struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, void *cb_arg) +{ + struct ionic_admin_ctx *ctx = cb_arg; + struct ionic_admin_comp *comp; + + if (!ctx) + return; + + comp = cq_info->cq_desc; + + memcpy(&ctx->comp, comp, sizeof(*comp)); + + dev_dbg(q->dev, "comp admin queue command:\n"); + dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1, + &ctx->comp, sizeof(ctx->comp), true); + + complete_all(&ctx->work); +} + +bool ionic_adminq_poke_doorbell(struct ionic_queue *q) +{ + struct ionic_lif *lif = q->lif; + unsigned long now, then, dif; + unsigned long irqflags; + + spin_lock_irqsave(&lif->adminq_lock, irqflags); + + if (q->tail_idx == q->head_idx) { + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); + return false; + } + + now = READ_ONCE(jiffies); + then = q->dbell_jiffies; + dif = now - then; + + if (dif > q->dbell_deadline) { + ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, + q->dbval | q->head_idx); + + q->dbell_jiffies = now; + } + + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); + + return true; +} + +int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +{ + struct ionic_desc_info *desc_info; + unsigned long irqflags; + struct ionic_queue *q; + int err = 0; + + spin_lock_irqsave(&lif->adminq_lock, irqflags); + if (!lif->adminqcq) { + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); + return -EIO; + } + + q = &lif->adminqcq->q; + + if (!ionic_q_has_space(q, 1)) { + err = -ENOSPC; + goto err_out; + } + + err = ionic_heartbeat_check(lif->ionic); + if (err) + goto err_out; + + desc_info = &q->info[q->head_idx]; + memcpy(desc_info->desc, &ctx->cmd, sizeof(ctx->cmd)); + + dev_dbg(&lif->netdev->dev, "post admin queue command:\n"); + dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1, + &ctx->cmd, sizeof(ctx->cmd), true); + + ionic_q_post(q, true, ionic_adminq_cb, ctx); + +err_out: + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); + + return err; +} + +int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, + const int err, const bool do_msg) +{ + struct net_device *netdev = lif->netdev; + unsigned long time_limit; + unsigned long time_start; + unsigned long time_done; + unsigned long remaining; + const char *name; + + name = ionic_opcode_to_str(ctx->cmd.cmd.opcode); + + if (err) { + if (do_msg && !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + netdev_err(netdev, "Posting of %s (%d) failed: %d\n", + name, ctx->cmd.cmd.opcode, err); + + ctx->comp.comp.status = IONIC_RC_ERROR; + return err; + } + + time_start = jiffies; + time_limit = time_start + HZ * (ulong)devcmd_timeout; + do { + remaining = wait_for_completion_timeout(&ctx->work, + IONIC_ADMINQ_TIME_SLICE); + + /* check for done */ + if (remaining) + break; + + /* force a check of FW status and break out if FW reset */ + (void) ionic_heartbeat_check(lif->ionic); + if ((test_bit(IONIC_LIF_F_FW_RESET, lif->state) && + !lif->ionic->idev.fw_status_ready) || + test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) { + if (do_msg) + netdev_warn(netdev, "%s (%d) interrupted, FW in reset\n", + name, ctx->cmd.cmd.opcode); + ctx->comp.comp.status = IONIC_RC_ERROR; + return -ENXIO; + } + + } while (time_before(jiffies, time_limit)); + time_done = jiffies; + + dev_dbg(lif->ionic->dev, "%s: elapsed %d msecs\n", + __func__, jiffies_to_msecs(time_done - time_start)); + + return ionic_adminq_check_err(lif, ctx, + time_after_eq(time_done, time_limit), + do_msg); +} + +static int __ionic_adminq_post_wait(struct ionic_lif *lif, + struct ionic_admin_ctx *ctx, + const bool do_msg) +{ + int err; + + /* if platform dev is resetting, don't bother with AdminQ, it's not there */ + if (lif->ionic->pfdev && test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) + return 0; + + err = ionic_adminq_post(lif, ctx); + + return ionic_adminq_wait(lif, ctx, err, do_msg); +} + +int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +{ + return __ionic_adminq_post_wait(lif, ctx, true); +} + +int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +{ + return __ionic_adminq_post_wait(lif, ctx, false); +} + +static void ionic_dev_cmd_clean(struct ionic *ionic) +{ + struct ionic_dev *idev = &ionic->idev; + + iowrite32(0, &idev->dev_cmd_regs->doorbell); + memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd)); +} + +void ionic_dev_cmd_dev_err_print(struct ionic *ionic, u8 opcode, u8 status, + int err) +{ + const char *stat_str; + + stat_str = (err == -ETIMEDOUT) ? "TIMEOUT" : + ionic_error_to_str(status); + + dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n", + ionic_opcode_to_str(opcode), opcode, stat_str, err); +} + +static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds, + const bool do_msg) +{ + struct ionic_dev *idev = &ionic->idev; + unsigned long start_time; + unsigned long max_wait; + unsigned long duration; + int done = 0; + bool fw_up; + int opcode; + int err; + + /* Wait for dev cmd to complete, retrying if we get EAGAIN, + * but don't wait any longer than max_seconds. + */ + max_wait = jiffies + (max_seconds * HZ); +try_again: + opcode = ioread8(&idev->dev_cmd_regs->cmd.cmd.opcode); + start_time = jiffies; + for (fw_up = ionic_is_fw_running(idev); + !done && fw_up && time_before(jiffies, max_wait); + fw_up = ionic_is_fw_running(idev)) { + done = ionic_dev_cmd_done(idev); + if (done) + break; + usleep_range(100, 200); + } + duration = jiffies - start_time; + + dev_dbg(ionic->dev, "DEVCMD %s (%d) done=%d took %ld secs (%ld jiffies)\n", + ionic_opcode_to_str(opcode), opcode, + done, duration / HZ, duration); + + if (!done && !fw_up) { + ionic_dev_cmd_clean(ionic); + dev_warn(ionic->dev, "DEVCMD %s (%d) interrupted - FW is down\n", + ionic_opcode_to_str(opcode), opcode); + return -ENXIO; + } + + if (!done && !time_before(jiffies, max_wait)) { + ionic_dev_cmd_clean(ionic); + dev_warn(ionic->dev, "DEVCMD %s (%d) timeout after %ld secs\n", + ionic_opcode_to_str(opcode), opcode, max_seconds); + return -ETIMEDOUT; + } + + err = ionic_dev_cmd_status(&ionic->idev); + if (err) { + if (err == IONIC_RC_EAGAIN && !time_after(jiffies, max_wait)) { + dev_dbg(ionic->dev, "DEV_CMD %s (%d), %s (%d) retrying...\n", + ionic_opcode_to_str(opcode), opcode, + ionic_error_to_str(err), err); + + iowrite32(0, &idev->dev_cmd_regs->done); + msleep(1000); + iowrite32(1, &idev->dev_cmd_regs->doorbell); + goto try_again; + } + + if (do_msg) + ionic_dev_cmd_dev_err_print(ionic, opcode, err, + ionic_error_to_errno(err)); + + return ionic_error_to_errno(err); + } + + ionic_dev_cmd_clean(ionic); + + return 0; +} + +int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds) +{ + return __ionic_dev_cmd_wait(ionic, max_seconds, true); +} + +int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_seconds) +{ + return __ionic_dev_cmd_wait(ionic, max_seconds, false); +} + +int ionic_set_dma_mask(struct ionic *ionic) +{ + struct device *dev = ionic->dev; + int err; + + /* Query system for DMA addressing limitation for the device. */ +#ifdef CONFIG_PPC64 + ionic->pdev->no_64bit_msi = 1; +#endif + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(IONIC_ADDR_LEN)); + if (err) + dev_err(dev, "Unable to obtain 64-bit DMA for consistent allocations, aborting. err=%d\n", + err); + + return err; +} + +int ionic_setup(struct ionic *ionic) +{ + int err; + + err = ionic_dev_setup(ionic); + if (err) + return err; + + ionic_debugfs_add_dev_cmd(ionic); + ionic_reset(ionic); + + return 0; +} + +int ionic_identify(struct ionic *ionic) +{ + struct ionic_identity *ident = &ionic->ident; + struct ionic_dev *idev = &ionic->idev; + size_t sz; + int err; + + memset(ident, 0, sizeof(*ident)); + + ident->drv.os_type = cpu_to_le32(IONIC_OS_TYPE_LINUX); + ident->drv.os_dist = 0; + strncpy(ident->drv.os_dist_str, utsname()->release, + sizeof(ident->drv.os_dist_str) - 1); + ident->drv.kernel_ver = cpu_to_le32(LINUX_VERSION_CODE); + strncpy(ident->drv.kernel_ver_str, utsname()->version, + sizeof(ident->drv.kernel_ver_str) - 1); + strncpy(ident->drv.driver_ver_str, IONIC_DRV_VERSION, + sizeof(ident->drv.driver_ver_str) - 1); + + mutex_lock(&ionic->dev_cmd_lock); + + sz = min(sizeof(ident->drv), sizeof(idev->dev_cmd_regs->data)); + memcpy_toio(&idev->dev_cmd_regs->data, &ident->drv, sz); + +#if defined(IONIC_DEV_IDENTITY_VERSION_2) + ionic_dev_cmd_identify(idev, IONIC_DEV_IDENTITY_VERSION_2); +#else + ionic_dev_cmd_identify(idev, IONIC_IDENTITY_VERSION_1); +#endif + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + if (!err) { + sz = min(sizeof(ident->dev), sizeof(idev->dev_cmd_regs->data)); + memcpy_fromio(&ident->dev, &idev->dev_cmd_regs->data, sz); + } + mutex_unlock(&ionic->dev_cmd_lock); + + if (err) { + dev_err(ionic->dev, "Cannot identify ionic: %d\n", err); + goto err_out; + } + + if (isprint(idev->dev_info.fw_version[0]) && + isascii(idev->dev_info.fw_version[0])) + dev_info(ionic->dev, "FW: %.*s\n", + (int)(sizeof(idev->dev_info.fw_version) - 1), + idev->dev_info.fw_version); + else + dev_info(ionic->dev, "FW: (invalid string) 0x%02x 0x%02x 0x%02x 0x%02x ...\n", + (u8)idev->dev_info.fw_version[0], + (u8)idev->dev_info.fw_version[1], + (u8)idev->dev_info.fw_version[2], + (u8)idev->dev_info.fw_version[3]); + + err = ionic_lif_identify(ionic, IONIC_LIF_TYPE_CLASSIC, + &ionic->ident.lif); + if (err) { + dev_err(ionic->dev, "Cannot identify LIFs: %d\n", err); + goto err_out; + } + + return 0; + +err_out: + return err; +} + +int ionic_init(struct ionic *ionic) +{ + struct ionic_dev *idev = &ionic->idev; + int err; + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_init(idev); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + mutex_unlock(&ionic->dev_cmd_lock); + + return err; +} + +int ionic_reset(struct ionic *ionic) +{ + struct ionic_dev *idev = &ionic->idev; + int err; + + if (!ionic_is_fw_running(idev)) + return 0; + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_reset(idev); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + mutex_unlock(&ionic->dev_cmd_lock); + + return err; +} + +int ionic_port_identify(struct ionic *ionic) +{ + struct ionic_identity *ident = &ionic->ident; + struct ionic_dev *idev = &ionic->idev; + struct device *dev = ionic->dev; + size_t sz; + int err; + + mutex_lock(&ionic->dev_cmd_lock); + + ionic_dev_cmd_port_identify(idev); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + if (!err) { + sz = min(sizeof(ident->port), sizeof(idev->dev_cmd_regs->data)); + memcpy_fromio(&ident->port, &idev->dev_cmd_regs->data, sz); + } + + mutex_unlock(&ionic->dev_cmd_lock); + + dev_dbg(dev, "type %d\n", ident->port.type); + dev_dbg(dev, "speed %d\n", ident->port.config.speed); + dev_dbg(dev, "mtu %d\n", ident->port.config.mtu); + dev_dbg(dev, "state %d\n", ident->port.config.state); + dev_dbg(dev, "an_enable %d\n", ident->port.config.an_enable); + dev_dbg(dev, "fec_type %d\n", ident->port.config.fec_type); + dev_dbg(dev, "pause_type %d\n", ident->port.config.pause_type); + dev_dbg(dev, "loopback_mode %d\n", ident->port.config.loopback_mode); + + return err; +} + +int ionic_port_init(struct ionic *ionic) +{ + struct ionic_identity *ident = &ionic->ident; + struct ionic_dev *idev = &ionic->idev; + size_t sz; + int err; + + if (!idev->port_info) { + idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE); + idev->port_info = dma_alloc_coherent(ionic->dev, + idev->port_info_sz, + &idev->port_info_pa, + GFP_KERNEL); + if (!idev->port_info) + return -ENOMEM; + } + + sz = min(sizeof(ident->port.config), sizeof(idev->dev_cmd_regs->data)); + + mutex_lock(&ionic->dev_cmd_lock); + + memcpy_toio(&idev->dev_cmd_regs->data, &ident->port.config, sz); + ionic_dev_cmd_port_init(idev); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + + if (port_init_up) { + ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_UP); + (void)ionic_dev_cmd_wait(ionic, devcmd_timeout); + } + + mutex_unlock(&ionic->dev_cmd_lock); + if (err) { + dev_err(ionic->dev, "Failed to init port\n"); + dma_free_coherent(ionic->dev, idev->port_info_sz, + idev->port_info, idev->port_info_pa); + idev->port_info = NULL; + idev->port_info_pa = 0; + } + + return err; +} + +int ionic_port_reset(struct ionic *ionic) +{ + struct ionic_dev *idev = &ionic->idev; + int err = 0; + + if (!idev->port_info) + return 0; + + if (ionic_is_fw_running(idev)) { + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_port_reset(idev); + err = ionic_dev_cmd_wait(ionic, devcmd_timeout); + mutex_unlock(&ionic->dev_cmd_lock); + } + + dma_free_coherent(ionic->dev, idev->port_info_sz, + idev->port_info, idev->port_info_pa); + + idev->port_info = NULL; + idev->port_info_pa = 0; + + return err; +} + +static int __init ionic_init_module(void) +{ + unsigned long max_affinity = GENMASK_ULL((min(num_present_cpus(), + (unsigned int)(sizeof(unsigned long)*BITS_PER_BYTE))-1), 0); + + pr_info("%s %s, ver %s\n", + IONIC_DRV_NAME, IONIC_DRV_DESCRIPTION, IONIC_DRV_VERSION); + + ionic_debugfs_create(); + + if (affinity_mask_override) { + /* limit affinity mask override to the available CPUs */ + if (affinity_mask_override > max_affinity) { + affinity_mask_override = (affinity_mask_override & max_affinity); + pr_info("limiting affinity mask to: 0x%lx\n", + affinity_mask_override); + } else { + pr_info("affinity_mask_override: %lx\n", + affinity_mask_override); + } + } + + return ionic_bus_register_driver(); +} + +static void __exit ionic_cleanup_module(void) +{ + /* If there's a long devcmd_timeout set, don't let + * hung FW slow us down when exiting + */ + devcmd_timeout = min_t(int, devcmd_timeout, SHORT_TIMEOUT); + + ionic_bus_unregister_driver(); + ionic_debugfs_destroy(); + + pr_info("%s removed\n", IONIC_DRV_NAME); +} + +module_init(ionic_init_module); +module_exit(ionic_cleanup_module); diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_phc.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_phc.c new file mode 100644 index 0000000000..1e21a57251 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_phc.c @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#include +#include + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_ethtool.h" + +/* XXX not for upstream: kernel config is changed in kcompat */ +/* normally this file will not be compiled if ptp is not enabled */ +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) + +static int ionic_hwstamp_tx_mode(int config_tx_type) +{ + switch (config_tx_type) { + case HWTSTAMP_TX_OFF: + return IONIC_TXSTAMP_OFF; + case HWTSTAMP_TX_ON: + return IONIC_TXSTAMP_ON; + case HWTSTAMP_TX_ONESTEP_SYNC: + return IONIC_TXSTAMP_ONESTEP_SYNC; +#ifdef HAVE_HWSTAMP_TX_ONESTEP_P2P + case HWTSTAMP_TX_ONESTEP_P2P: + return IONIC_TXSTAMP_ONESTEP_P2P; +#endif + default: + return -ERANGE; + } +} + +static u64 ionic_hwstamp_rx_filt(int config_rx_filter) +{ + switch (config_rx_filter) { + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + return IONIC_PKT_CLS_PTP1_ALL; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + return IONIC_PKT_CLS_PTP1_SYNC; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + return IONIC_PKT_CLS_PTP1_SYNC | IONIC_PKT_CLS_PTP1_DREQ; + + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + return IONIC_PKT_CLS_PTP2_L4_ALL; + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + return IONIC_PKT_CLS_PTP2_L4_SYNC; + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + return IONIC_PKT_CLS_PTP2_L4_SYNC | IONIC_PKT_CLS_PTP2_L4_DREQ; + + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + return IONIC_PKT_CLS_PTP2_L2_ALL; + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + return IONIC_PKT_CLS_PTP2_L2_SYNC; + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + return IONIC_PKT_CLS_PTP2_L2_SYNC | IONIC_PKT_CLS_PTP2_L2_DREQ; + + case HWTSTAMP_FILTER_PTP_V2_EVENT: + return IONIC_PKT_CLS_PTP2_ALL; + case HWTSTAMP_FILTER_PTP_V2_SYNC: + return IONIC_PKT_CLS_PTP2_SYNC; + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + return IONIC_PKT_CLS_PTP2_SYNC | IONIC_PKT_CLS_PTP2_DREQ; +#ifdef HAVE_HWTSTAMP_FILTER_NTP_ALL + case HWTSTAMP_FILTER_NTP_ALL: + return IONIC_PKT_CLS_NTP_ALL; +#endif + default: + return 0; + } +} + +static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif, + struct hwtstamp_config *new_ts) +{ + struct ionic *ionic = lif->ionic; + struct hwtstamp_config *config; + struct hwtstamp_config ts; + int tx_mode = 0; + u64 rx_filt = 0; + int err, err2; + bool rx_all; + __le64 mask; + + if (!lif->phc || !lif->phc->ptp) + return -EOPNOTSUPP; + + mutex_lock(&lif->phc->config_lock); + + if (new_ts) { + config = new_ts; + } else { + /* If called with new_ts == NULL, replay the previous request + * primarily for recovery after a FW_RESET. + * We saved the previous configuration request info, so copy + * the previous request for reference, clear the current state + * to match the device's reset state, and run with it. + */ + config = &ts; + memcpy(config, &lif->phc->ts_config, sizeof(*config)); + memset(&lif->phc->ts_config, 0, sizeof(lif->phc->ts_config)); + lif->phc->ts_config_tx_mode = 0; + lif->phc->ts_config_rx_filt = 0; + } + + tx_mode = ionic_hwstamp_tx_mode(config->tx_type); + if (tx_mode < 0) { + err = tx_mode; + goto err_queues; + } + + mask = cpu_to_le64(BIT_ULL(tx_mode)); + if ((ionic->ident.lif.eth.hwstamp_tx_modes & mask) != mask) { + err = -ERANGE; + goto err_queues; + } + + rx_filt = ionic_hwstamp_rx_filt(config->rx_filter); + rx_all = config->rx_filter != HWTSTAMP_FILTER_NONE && !rx_filt; + + mask = cpu_to_le64(rx_filt); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) != mask) { + rx_filt = 0; + rx_all = true; + config->rx_filter = HWTSTAMP_FILTER_ALL; + } + + dev_dbg(ionic->dev, "%s: config_rx_filter %d rx_filt %#llx rx_all %d\n", + __func__, config->rx_filter, rx_filt, rx_all); + + if (tx_mode) { + err = ionic_lif_create_hwstamp_txq(lif); + if (err) + goto err_queues; + } + + if (rx_filt) { + err = ionic_lif_create_hwstamp_rxq(lif); + if (err) + goto err_queues; + } + + if (tx_mode != lif->phc->ts_config_tx_mode) { + err = ionic_lif_set_hwstamp_txmode(lif, tx_mode); + if (err) + goto err_txmode; + } + + if (rx_filt != lif->phc->ts_config_rx_filt) { + err = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt); + if (err) + goto err_rxfilt; + } + + if (rx_all != (lif->phc->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)) { + err = ionic_lif_config_hwstamp_rxq_all(lif, rx_all); + if (err) + goto err_rxall; + } + + memcpy(&lif->phc->ts_config, config, sizeof(*config)); + lif->phc->ts_config_rx_filt = rx_filt; + lif->phc->ts_config_tx_mode = tx_mode; + + mutex_unlock(&lif->phc->config_lock); + + return 0; + +err_rxall: + if (rx_filt != lif->phc->ts_config_rx_filt) { + rx_filt = lif->phc->ts_config_rx_filt; + err2 = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt); + if (err2) + dev_err(ionic->dev, + "Failed to revert rx timestamp filter: %d\n", err2); + } +err_rxfilt: + if (tx_mode != lif->phc->ts_config_tx_mode) { + tx_mode = lif->phc->ts_config_tx_mode; + err2 = ionic_lif_set_hwstamp_txmode(lif, tx_mode); + if (err2) + dev_err(ionic->dev, + "Failed to revert tx timestamp mode: %d\n", err2); + } +err_txmode: + /* special queues remain allocated, just unused */ +err_queues: + mutex_unlock(&lif->phc->config_lock); + return err; +} + +int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (!lif->phc || !lif->phc->ptp) + return -EOPNOTSUPP; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + mutex_lock(&lif->queue_lock); + err = ionic_lif_hwstamp_set_ts_config(lif, &config); + mutex_unlock(&lif->queue_lock); + if (err) { + netdev_info(lif->netdev, "hwstamp set failed: %d\n", err); + return err; + } + + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) + return -EFAULT; + + return 0; +} + +void ionic_lif_hwstamp_replay(struct ionic_lif *lif) +{ + int err; + + if (!lif->phc || !lif->phc->ptp) + return; + + mutex_lock(&lif->queue_lock); + err = ionic_lif_hwstamp_set_ts_config(lif, NULL); + mutex_unlock(&lif->queue_lock); + if (err) + netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err); +} + +void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif) +{ + int err; + + if (!lif->phc || !lif->phc->ptp) + return; + + mutex_lock(&lif->phc->config_lock); + + if (lif->phc->ts_config_tx_mode) { + err = ionic_lif_create_hwstamp_txq(lif); + if (err) + netdev_info(lif->netdev, "hwstamp recreate txq failed: %d\n", err); + } + + if (lif->phc->ts_config_rx_filt) { + err = ionic_lif_create_hwstamp_rxq(lif); + if (err) + netdev_info(lif->netdev, "hwstamp recreate rxq failed: %d\n", err); + } + + mutex_unlock(&lif->phc->config_lock); +} + +int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr) +{ + struct hwtstamp_config config; + + if (!lif->phc || !lif->phc->ptp) + return -EOPNOTSUPP; + + mutex_lock(&lif->phc->config_lock); + memcpy(&config, &lif->phc->ts_config, sizeof(config)); + mutex_unlock(&lif->phc->config_lock); + + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) + return -EFAULT; + return 0; +} + +#ifdef HAVE_PHC_GETTIMEX64 +static u64 ionic_hwstamp_read(struct ionic *ionic, + struct ptp_system_timestamp *sts) +#else +static u64 ionic_hwstamp_read(struct ionic *ionic) +#endif +{ + u32 tick_high_before, tick_high, tick_low; + + /* read and discard low part to defeat hw staging of high part */ + (void)ioread32(&ionic->idev.hwstamp_regs->tick_low); + + tick_high_before = ioread32(&ionic->idev.hwstamp_regs->tick_high); + +#ifdef HAVE_PHC_GETTIMEX64 + ptp_read_system_prets(sts); +#endif + tick_low = ioread32(&ionic->idev.hwstamp_regs->tick_low); +#ifdef HAVE_PHC_GETTIMEX64 + ptp_read_system_postts(sts); +#endif + + tick_high = ioread32(&ionic->idev.hwstamp_regs->tick_high); + + /* If tick_high changed, re-read tick_low once more. Assume tick_high + * cannot change again so soon as in the span of re-reading tick_low. + */ + if (tick_high != tick_high_before) { +#ifdef HAVE_PHC_GETTIMEX64 + ptp_read_system_prets(sts); +#endif + tick_low = ioread32(&ionic->idev.hwstamp_regs->tick_low); +#ifdef HAVE_PHC_GETTIMEX64 + ptp_read_system_postts(sts); +#endif + } + + return (u64)tick_low | ((u64)tick_high << 32); +} + +static u64 ionic_cc_read(const struct cyclecounter *cc) +{ + struct ionic_phc *phc = container_of(cc, struct ionic_phc, cc); + struct ionic *ionic = phc->lif->ionic; + +#ifdef HAVE_PHC_GETTIMEX64 + return ionic_hwstamp_read(ionic, NULL); +#else + return ionic_hwstamp_read(ionic); +#endif +} + +static int ionic_setphc_cmd(struct ionic_phc *phc, struct ionic_admin_ctx *ctx) +{ + ctx->work = COMPLETION_INITIALIZER_ONSTACK(ctx->work); + + ctx->cmd.lif_setphc.opcode = IONIC_CMD_LIF_SETPHC; + ctx->cmd.lif_setphc.lif_index = cpu_to_le16(phc->lif->index); + + ctx->cmd.lif_setphc.tick = cpu_to_le64(phc->tc.cycle_last); + ctx->cmd.lif_setphc.nsec = cpu_to_le64(phc->tc.nsec); + ctx->cmd.lif_setphc.frac = cpu_to_le64(phc->tc.frac); + ctx->cmd.lif_setphc.mult = cpu_to_le32(phc->cc.mult); + ctx->cmd.lif_setphc.shift = cpu_to_le32(phc->cc.shift); + + return ionic_adminq_post(phc->lif, ctx); +} + +#ifdef HAVE_PTP_ADJFINE +static int ionic_phc_adjfine(struct ptp_clock_info *info, long scaled_ppm) +{ + struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info); + struct ionic_admin_ctx ctx = {}; + unsigned long irqflags; + s64 adj; + int err; + + /* Reject phc adjustments during device upgrade */ + if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state)) + return -EBUSY; + + /* Adjustment value scaled by 2^16 million */ + adj = (s64)scaled_ppm * phc->init_cc_mult; + + /* Adjustment value to scale */ + adj /= (s64)SCALED_PPM; + + /* Final adjusted multiplier */ + adj += phc->init_cc_mult; + + spin_lock_irqsave(&phc->lock, irqflags); + + /* update the point-in-time basis to now, before adjusting the rate */ + timecounter_read(&phc->tc); + phc->cc.mult = adj; + + /* Setphc commands are posted in-order, sequenced by phc->lock. We + * need to drop the lock before waiting for the command to complete. + */ + err = ionic_setphc_cmd(phc, &ctx); + + spin_unlock_irqrestore(&phc->lock, irqflags); + + return ionic_adminq_wait(phc->lif, &ctx, err, true); +} +#endif + +static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta) +{ + struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info); + struct ionic_admin_ctx ctx = {}; + unsigned long irqflags; + int err; + + /* Reject phc adjustments during device upgrade */ + if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state)) + return -EBUSY; + + spin_lock_irqsave(&phc->lock, irqflags); + + timecounter_adjtime(&phc->tc, delta); + + /* Setphc commands are posted in-order, sequenced by phc->lock. We + * need to drop the lock before waiting for the command to complete. + */ + err = ionic_setphc_cmd(phc, &ctx); + + spin_unlock_irqrestore(&phc->lock, irqflags); + + return ionic_adminq_wait(phc->lif, &ctx, err, true); +} + +static int ionic_phc_settime64(struct ptp_clock_info *info, + const struct timespec64 *ts) +{ + struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info); + struct ionic_admin_ctx ctx = {}; + unsigned long irqflags; + int err; + u64 ns; + + /* Reject phc adjustments during device upgrade */ + if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state)) + return -EBUSY; + + ns = timespec64_to_ns(ts); + + spin_lock_irqsave(&phc->lock, irqflags); + + timecounter_init(&phc->tc, &phc->cc, ns); + + /* Setphc commands are posted in-order, sequenced by phc->lock. We + * need to drop the lock before waiting for the command to complete. + */ + err = ionic_setphc_cmd(phc, &ctx); + + spin_unlock_irqrestore(&phc->lock, irqflags); + + return ionic_adminq_wait(phc->lif, &ctx, err, true); +} + +#ifdef HAVE_PHC_GETTIMEX64 +static int ionic_phc_gettimex64(struct ptp_clock_info *info, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +#else +static int ionic_phc_gettime64(struct ptp_clock_info *info, + struct timespec64 *ts) +#endif +{ + struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info); + struct ionic *ionic = phc->lif->ionic; + unsigned long irqflags; + u64 tick, ns; + + /* Do not attempt to read device time during upgrade */ + if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state)) + return -EBUSY; + + spin_lock_irqsave(&phc->lock, irqflags); + +#ifdef HAVE_PHC_GETTIMEX64 + tick = ionic_hwstamp_read(ionic, sts); +#else + tick = ionic_hwstamp_read(ionic); +#endif + + ns = timecounter_cyc2time(&phc->tc, tick); + + spin_unlock_irqrestore(&phc->lock, irqflags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static long ionic_phc_aux_work(struct ptp_clock_info *info) +{ + struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info); + struct ionic_admin_ctx ctx = {}; + unsigned long irqflags; + int err; + + /* Do not update phc during device upgrade, but keep polling to resume + * after upgrade. Since we don't update the point in time basis, there + * is no expectation that we are maintaining the phc time during the + * upgrade. After upgrade, it will need to be readjusted back to the + * correct time by the ptp daemon. + */ + if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state)) + return phc->aux_work_delay; + + spin_lock_irqsave(&phc->lock, irqflags); + + /* update point-in-time basis to now */ + timecounter_read(&phc->tc); + + /* Setphc commands are posted in-order, sequenced by phc->lock. We + * need to drop the lock before waiting for the command to complete. + */ + err = ionic_setphc_cmd(phc, &ctx); + + spin_unlock_irqrestore(&phc->lock, irqflags); + + ionic_adminq_wait(phc->lif, &ctx, err, true); + + return phc->aux_work_delay; +} + +#ifndef HAVE_PTP_CLOCK_DO_AUX_WORK +void ionic_phc_aux_work_helper(struct work_struct *work) +{ + struct ionic_phc *phc = container_of(work, struct ionic_phc, dwork.work); + long delay; + + delay = ionic_phc_aux_work(&phc->ptp_info); + schedule_delayed_work(&phc->dwork, delay); +} +#endif + +ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 tick) +{ + unsigned long irqflags; + u64 ns; + + if (!lif->phc) + return ktime_set(0, 0); + + spin_lock_irqsave(&lif->phc->lock, irqflags); + ns = timecounter_cyc2time(&lif->phc->tc, tick); + spin_unlock_irqrestore(&lif->phc->lock, irqflags); + + return ns_to_ktime(ns); +} + +static const struct ptp_clock_info ionic_ptp_info = { + .owner = THIS_MODULE, + .name = "ionic_ptp", +#ifdef HAVE_PTP_ADJFINE + .adjfine = ionic_phc_adjfine, +#endif + .adjtime = ionic_phc_adjtime, +#ifdef HAVE_PHC_GETTIMEX64 + .gettimex64 = ionic_phc_gettimex64, +#else + .gettime64 = ionic_phc_gettime64, +#endif + .settime64 = ionic_phc_settime64, +#ifdef HAVE_PTP_CLOCK_DO_AUX_WORK + .do_aux_work = ionic_phc_aux_work, +#endif +}; + +void ionic_lif_register_phc(struct ionic_lif *lif) +{ + if (!lif->phc || !(lif->hw_features & IONIC_ETH_HW_TIMESTAMP)) + return; + + lif->phc->ptp = ptp_clock_register(&lif->phc->ptp_info, lif->ionic->dev); + + if (IS_ERR(lif->phc->ptp)) { + dev_warn(lif->ionic->dev, "Cannot register phc device: %ld\n", + PTR_ERR(lif->phc->ptp)); + + lif->phc->ptp = NULL; + } + + if (lif->phc->ptp) +#ifndef HAVE_PTP_CLOCK_DO_AUX_WORK + schedule_delayed_work(&lif->phc->dwork, lif->phc->aux_work_delay); +#else + ptp_schedule_worker(lif->phc->ptp, lif->phc->aux_work_delay); +#endif +} + +void ionic_lif_unregister_phc(struct ionic_lif *lif) +{ + if (!lif->phc || !lif->phc->ptp) + return; + +#ifndef HAVE_PTP_CLOCK_DO_AUX_WORK + cancel_delayed_work_sync(&lif->phc->dwork); +#endif + ptp_clock_unregister(lif->phc->ptp); + + lif->phc->ptp = NULL; +} + +void ionic_lif_alloc_phc(struct ionic_lif *lif) +{ + struct ionic *ionic = lif->ionic; + struct ionic_phc *phc; + u64 delay, diff, mult; + u64 frac = 0; + u64 features; + u32 shift; + + if (!ionic->idev.hwstamp_regs) + return; + + features = le64_to_cpu(ionic->ident.lif.eth.config.features); + if (!(features & IONIC_ETH_HW_TIMESTAMP)) + return; + + phc = devm_kzalloc(ionic->dev, sizeof(*phc), GFP_KERNEL); + if (!phc) + return; + + phc->lif = lif; + +#ifndef HAVE_PTP_CLOCK_DO_AUX_WORK + INIT_DELAYED_WORK(&phc->dwork, ionic_phc_aux_work_helper); +#endif + phc->cc.read = ionic_cc_read; + phc->cc.mask = le64_to_cpu(ionic->ident.dev.hwstamp_mask); + phc->cc.mult = le32_to_cpu(ionic->ident.dev.hwstamp_mult); + phc->cc.shift = le32_to_cpu(ionic->ident.dev.hwstamp_shift); + + if (!phc->cc.mult) { + dev_err(lif->ionic->dev, + "Invalid device PHC mask multiplier %u, disabling HW timestamp support\n", + phc->cc.mult); + devm_kfree(lif->ionic->dev, phc); + lif->phc = NULL; + return; + } + + dev_dbg(lif->ionic->dev, "Device PHC mask %#llx mult %u shift %u\n", + phc->cc.mask, phc->cc.mult, phc->cc.shift); + + spin_lock_init(&phc->lock); + mutex_init(&phc->config_lock); + + /* max ticks is limited by the multiplier, or by the update period. */ + if (phc->cc.shift + 2 + ilog2(IONIC_PHC_UPDATE_NS) >= 64) { + /* max ticks that do not overflow when multiplied by max + * adjusted multiplier (twice the initial multiplier) + */ + diff = U64_MAX / phc->cc.mult / 2; + } else { + /* approx ticks at four times the update period */ + diff = (u64)IONIC_PHC_UPDATE_NS << (phc->cc.shift + 2); + diff = DIV_ROUND_UP(diff, phc->cc.mult); + } + + /* transform to bitmask */ + diff |= diff >> 1; + diff |= diff >> 2; + diff |= diff >> 4; + diff |= diff >> 8; + diff |= diff >> 16; + diff |= diff >> 32; + + /* constrain to the hardware bitmask, and use this as the bitmask */ + diff &= phc->cc.mask; + phc->cc.mask = diff; + + /* the wrap period is now defined by diff (or phc->cc.mask) + * + * we will update the time basis at about 1/4 the wrap period, so + * should not see a difference of more than +/- diff/4. + * + * this is sufficient not see a difference of more than +/- diff/2, as + * required by timecounter_cyc2time, to detect an old time stamp. + * + * adjust the initial multiplier, being careful to avoid overflow: + * - do not overflow 63 bits: init_cc_mult * SCALED_PPM + * - do not overflow 64 bits: max_mult * (diff / 2) + * + * we want to increase the initial multiplier as much as possible, to + * allow for more precise adjustment in ionic_phc_adjfine. + * + * only adjust the multiplier if we can double it or more. + */ + mult = U64_MAX / 2 / max(diff / 2, SCALED_PPM); + shift = mult / phc->cc.mult; + if (shift >= 2) { + /* initial multiplier will be 2^n of hardware cc.mult */ + shift = fls(shift); + /* increase cc.mult and cc.shift by the same 2^n and n. */ + phc->cc.mult <<= shift; + phc->cc.shift += shift; + } + + dev_dbg(lif->ionic->dev, "Initial PHC mask %#llx mult %u shift %u\n", + phc->cc.mask, phc->cc.mult, phc->cc.shift); + + /* frequency adjustments are relative to the initial multiplier */ + phc->init_cc_mult = phc->cc.mult; + + timecounter_init(&phc->tc, &phc->cc, ktime_get_real_ns()); + + /* Update cycle_last at 1/4 the wrap period, or IONIC_PHC_UPDATE_NS */ + delay = min_t(u64, IONIC_PHC_UPDATE_NS, + cyclecounter_cyc2ns(&phc->cc, diff / 4, 0, &frac)); + dev_dbg(lif->ionic->dev, "Work delay %llu ms\n", delay / NSEC_PER_MSEC); + + phc->aux_work_delay = nsecs_to_jiffies(delay); + + phc->ptp_info = ionic_ptp_info; + + /* We have allowed to adjust the multiplier up to +/- 1 part per 1. + * Here expressed as NORMAL_PPB (1 billion parts per billion). + */ + phc->ptp_info.max_adj = NORMAL_PPB; + + lif->phc = phc; +} + +void ionic_lif_free_phc(struct ionic_lif *lif) +{ + if (!lif->phc) + return; + + mutex_destroy(&lif->phc->config_lock); + + devm_kfree(lif->ionic->dev, lif->phc); + lif->phc = NULL; +} + +#endif diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_phc_weak.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_phc_weak.c new file mode 100644 index 0000000000..8bdcdfa3a6 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_phc_weak.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 Pensando Systems, Inc */ + +#include +#include + +struct device; +struct ptp_clock; +struct ptp_clock_event; +struct ptp_clock_info; +enum ptp_pin_function { PTP_PIN_DUMMY }; + +__weak struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, struct device *parent) { return NULL; } +__weak int ptp_clock_unregister(struct ptp_clock *ptp) { return 0; } +__weak void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event) { } +__weak int ptp_clock_index(struct ptp_clock *ptp) { return -1; } +__weak int ptp_find_pin(struct ptp_clock *ptp, enum ptp_pin_function func, unsigned int chan) { return -1; } +__weak int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) { return -EOPNOTSUPP; } +__weak void ptp_cancel_worker_sync(struct ptp_clock *ptp) { } diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_rx_filter.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_rx_filter.c new file mode 100644 index 0000000000..e4d6d386e1 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_rx_filter.c @@ -0,0 +1,619 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#include +#include +#include +#include + +#include "ionic.h" +#include "ionic_lif.h" +#include "ionic_rx_filter.h" + +void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f) +{ + struct device *dev = lif->ionic->dev; + + hlist_del(&f->by_id); + hlist_del(&f->by_hash); + devm_kfree(dev, f); +} + +void ionic_rx_filter_replay(struct ionic_lif *lif) +{ + struct ionic_rx_filter_add_cmd *ac; + struct hlist_head new_id_list; + struct ionic_admin_ctx ctx; + struct ionic_rx_filter *f; + struct hlist_head *head; + struct hlist_node *tmp; + unsigned int key; + unsigned int i; + int err; + + INIT_HLIST_HEAD(&new_id_list); + ac = &ctx.cmd.rx_filter_add; + + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + head = &lif->rx_filters.by_id[i]; + hlist_for_each_entry_safe(f, tmp, head, by_id) { + ctx.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work); + memcpy(ac, &f->cmd, sizeof(f->cmd)); + dev_dbg(&lif->netdev->dev, "replay filter command:\n"); + dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1, + &ctx.cmd, sizeof(ctx.cmd), true); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) { + switch (le16_to_cpu(ac->match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + netdev_info(lif->netdev, "Replay failed - %d: vlan %d\n", + err, + le16_to_cpu(ac->vlan.vlan)); + break; + case IONIC_RX_FILTER_MATCH_MAC: + netdev_info(lif->netdev, "Replay failed - %d: mac %pM\n", + err, ac->mac.addr); + break; + case IONIC_RX_FILTER_MATCH_MAC_VLAN: + netdev_info(lif->netdev, "Replay failed - %d: vlan %d mac %pM\n", + err, + le16_to_cpu(ac->vlan.vlan), + ac->mac.addr); + break; + } + spin_lock_bh(&lif->rx_filters.lock); + ionic_rx_filter_free(lif, f); + spin_unlock_bh(&lif->rx_filters.lock); + + continue; + } + + /* remove from old id list, save new id in tmp list */ + spin_lock_bh(&lif->rx_filters.lock); + hlist_del(&f->by_id); + spin_unlock_bh(&lif->rx_filters.lock); + f->filter_id = le32_to_cpu(ctx.comp.rx_filter_add.filter_id); + hlist_add_head(&f->by_id, &new_id_list); + } + } + + /* rebuild the by_id hash lists with the new filter ids */ + spin_lock_bh(&lif->rx_filters.lock); + hlist_for_each_entry_safe(f, tmp, &new_id_list, by_id) { + key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK; + head = &lif->rx_filters.by_id[key]; + hlist_add_head(&f->by_id, head); + } + spin_unlock_bh(&lif->rx_filters.lock); +} + +int ionic_rx_filters_init(struct ionic_lif *lif) +{ + unsigned int i; + + spin_lock_init(&lif->rx_filters.lock); + + spin_lock_bh(&lif->rx_filters.lock); + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]); + INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]); + } + spin_unlock_bh(&lif->rx_filters.lock); + + return 0; +} + +void ionic_rx_filters_deinit(struct ionic_lif *lif) +{ + struct ionic_rx_filter *f; + struct hlist_head *head; + struct hlist_node *tmp; + unsigned int i; + + spin_lock_bh(&lif->rx_filters.lock); + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + head = &lif->rx_filters.by_id[i]; + hlist_for_each_entry_safe(f, tmp, head, by_id) + ionic_rx_filter_free(lif, f); + } + spin_unlock_bh(&lif->rx_filters.lock); +} + +int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, + u32 hash, struct ionic_admin_ctx *ctx, + enum ionic_filter_state state) +{ + struct device *dev = lif->ionic->dev; + struct ionic_rx_filter_add_cmd *ac; + struct ionic_rx_filter *f = NULL; + struct hlist_head *head; + unsigned int key; + + ac = &ctx->cmd.rx_filter_add; + + switch (le16_to_cpu(ac->match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + key = le16_to_cpu(ac->vlan.vlan); + f = ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan)); + break; + case IONIC_RX_FILTER_MATCH_MAC: + key = *(u32 *)ac->mac.addr; + f = ionic_rx_filter_by_addr(lif, ac->mac.addr); + break; + case IONIC_RX_FILTER_MATCH_MAC_VLAN: + key = le16_to_cpu(ac->mac_vlan.vlan); + break; + case IONIC_RX_FILTER_STEER_PKTCLASS: + key = 0; + break; + default: + return -EINVAL; + } + + if (f) { + /* remove from current linking so we can refresh it */ + hlist_del(&f->by_id); + hlist_del(&f->by_hash); + } else { + f = devm_kzalloc(dev, sizeof(*f), GFP_ATOMIC); + if (!f) + return -ENOMEM; + } + + f->flow_id = flow_id; + f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id); + f->state = state; + f->rxq_index = rxq_index; + memcpy(&f->cmd, ac, sizeof(f->cmd)); + netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id); + + INIT_HLIST_NODE(&f->by_hash); + INIT_HLIST_NODE(&f->by_id); + + key = hash_32(key, IONIC_RX_FILTER_HASH_BITS); + head = &lif->rx_filters.by_hash[key]; + hlist_add_head(&f->by_hash, head); + + key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK; + head = &lif->rx_filters.by_id[key]; + hlist_add_head(&f->by_id, head); + + return 0; +} + +struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid) +{ + struct ionic_rx_filter *f; + struct hlist_head *head; + unsigned int key; + + key = hash_32(vid, IONIC_RX_FILTER_HASH_BITS); + head = &lif->rx_filters.by_hash[key]; + + hlist_for_each_entry(f, head, by_hash) { + if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_VLAN) + continue; + if (le16_to_cpu(f->cmd.vlan.vlan) == vid) + return f; + } + + return NULL; +} + +struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, + const u8 *addr) +{ + struct ionic_rx_filter *f; + struct hlist_head *head; + unsigned int key; + + key = hash_32(*(u32 *)addr, IONIC_RX_FILTER_HASH_BITS); + head = &lif->rx_filters.by_hash[key]; + + hlist_for_each_entry(f, head, by_hash) { + if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_MAC) + continue; + if (memcmp(addr, f->cmd.mac.addr, ETH_ALEN) == 0) + return f; + } + + return NULL; +} + +struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif) +{ + struct ionic_rx_filter *f; + struct hlist_head *head; + unsigned int key; + + key = hash_32(0, IONIC_RX_FILTER_HASH_BITS); + head = &lif->rx_filters.by_hash[key]; + + hlist_for_each_entry(f, head, by_hash) { + if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_STEER_PKTCLASS) + continue; + return f; + } + + return NULL; +} + +static struct ionic_rx_filter *ionic_rx_filter_find(struct ionic_lif *lif, + struct ionic_rx_filter_add_cmd *ac) +{ + switch (le16_to_cpu(ac->match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + return ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan)); + case IONIC_RX_FILTER_MATCH_MAC: + return ionic_rx_filter_by_addr(lif, ac->mac.addr); + default: + netdev_err(lif->netdev, "unsupported filter match %d", + le16_to_cpu(ac->match)); + return NULL; + } +} + +int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode) +{ + struct ionic_rx_filter *f; + int err; + + spin_lock_bh(&lif->rx_filters.lock); + + f = ionic_rx_filter_by_addr(lif, addr); + if (mode == ADD_ADDR && !f) { + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_add = { + .opcode = IONIC_CMD_RX_FILTER_ADD, + .lif_index = cpu_to_le16(lif->index), + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), + }, + }; + + memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_NEW); + if (err) { + spin_unlock_bh(&lif->rx_filters.lock); + return err; + } + + } else if (mode == ADD_ADDR && f) { + if (f->state == IONIC_FILTER_STATE_OLD) + f->state = IONIC_FILTER_STATE_SYNCED; + + } else if (mode == DEL_ADDR && f) { + if (f->state == IONIC_FILTER_STATE_NEW) + ionic_rx_filter_free(lif, f); + else if (f->state == IONIC_FILTER_STATE_SYNCED) + f->state = IONIC_FILTER_STATE_OLD; + } else if (mode == DEL_ADDR && !f) { + spin_unlock_bh(&lif->rx_filters.lock); + return -ENOENT; + } + + spin_unlock_bh(&lif->rx_filters.lock); + + set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); + + return 0; +} + +static int ionic_lif_filter_add(struct ionic_lif *lif, + struct ionic_rx_filter_add_cmd *ac) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + }; + struct ionic_rx_filter *f; + int nfilters; + int err = 0; + + ctx.cmd.rx_filter_add = *ac; + ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD, + ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index), + + spin_lock_bh(&lif->rx_filters.lock); + f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add); + if (f) { + /* don't bother if we already have it and it is sync'd */ + if (f->state == IONIC_FILTER_STATE_SYNCED) { + spin_unlock_bh(&lif->rx_filters.lock); + return 0; + } + + /* mark preemptively as sync'd to block any parallel attempts */ + f->state = IONIC_FILTER_STATE_SYNCED; + } else { + /* save as SYNCED to catch any DEL requests while processing */ + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_SYNCED); + } + spin_unlock_bh(&lif->rx_filters.lock); + if (err) + return err; + + /* Don't bother with the write to FW if we know there's no room, + * we can try again on the next sync attempt. + * Since the FW doesn't have a way to tell us the vlan limit, + * we start max_vlans at 0 until we hit the ENOSPC error. + */ + switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + netdev_dbg(lif->netdev, "%s: rx_filter add VLAN %d\n", + __func__, ctx.cmd.rx_filter_add.vlan.vlan); + if (lif->max_vlans && lif->nvlans >= lif->max_vlans) + err = -ENOSPC; + break; + case IONIC_RX_FILTER_MATCH_MAC: + netdev_dbg(lif->netdev, "%s: rx_filter add ADDR %pM\n", + __func__, ctx.cmd.rx_filter_add.mac.addr); + nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); + if ((lif->nucast + lif->nmcast) >= nfilters) + err = -ENOSPC; + break; + } + + if (err != -ENOSPC) + err = ionic_adminq_post_wait_nomsg(lif, &ctx); + + spin_lock_bh(&lif->rx_filters.lock); + + if (err && err != -EEXIST) { + /* set the state back to NEW so we can try again later */ + f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add); + if (f && f->state == IONIC_FILTER_STATE_SYNCED) { + f->state = IONIC_FILTER_STATE_NEW; + + /* If -ENOSPC we won't waste time trying to sync again + * until there is a delete that might make room + */ + if (err != -ENOSPC) + set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); + } + + spin_unlock_bh(&lif->rx_filters.lock); + + /* store the max_vlans limit that we found */ + if (err == -ENOSPC && + le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN) + lif->max_vlans = lif->nvlans; + + /* Prevent unnecessary error messages on recoverable + * errors as the filter will get retried on the next + * sync attempt. + */ + switch (err) { + case -ENOSPC: + case -ENXIO: + case -ETIMEDOUT: + case -EAGAIN: + case -EBUSY: + return 0; + default: + break; + } + + ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode, + ctx.comp.comp.status, err); + switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + netdev_info(lif->netdev, "rx_filter add failed: VLAN %d\n", + ctx.cmd.rx_filter_add.vlan.vlan); + break; + case IONIC_RX_FILTER_MATCH_MAC: + netdev_info(lif->netdev, "rx_filter add failed: ADDR %pM\n", + ctx.cmd.rx_filter_add.mac.addr); + break; + } + + return err; + } + + switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + lif->nvlans++; + break; + case IONIC_RX_FILTER_MATCH_MAC: + if (is_multicast_ether_addr(ctx.cmd.rx_filter_add.mac.addr)) + lif->nmcast++; + else + lif->nucast++; + break; + } + + f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add); + if (f && f->state == IONIC_FILTER_STATE_OLD) { + /* Someone requested a delete while we were adding + * so update the filter info with the results from the add + * and the data will be there for the delete on the next + * sync cycle. + */ + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_OLD); + } else { + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_SYNCED); + } + + spin_unlock_bh(&lif->rx_filters.lock); + + return err; +} + +int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) +{ + struct ionic_rx_filter_add_cmd ac = { + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), + }; + + memcpy(&ac.mac.addr, addr, ETH_ALEN); + + return ionic_lif_filter_add(lif, &ac); +} + +int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid) +{ + struct ionic_rx_filter_add_cmd ac = { + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), + .vlan.vlan = cpu_to_le16(vid), + }; + + return ionic_lif_filter_add(lif, &ac); +} + +static int ionic_lif_filter_del(struct ionic_lif *lif, + struct ionic_rx_filter_add_cmd *ac) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + .lif_index = cpu_to_le16(lif->index), + }, + }; + struct ionic_rx_filter *f; + int state; + int err; + + spin_lock_bh(&lif->rx_filters.lock); + f = ionic_rx_filter_find(lif, ac); + if (!f) { + spin_unlock_bh(&lif->rx_filters.lock); + return -ENOENT; + } + + switch (le16_to_cpu(ac->match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + netdev_dbg(lif->netdev, "%s: rx_filter del VLAN %d id %d\n", + __func__, ac->vlan.vlan, f->filter_id); + lif->nvlans--; + break; + case IONIC_RX_FILTER_MATCH_MAC: + netdev_dbg(lif->netdev, "%s: rx_filter del ADDR %pM id %d\n", + __func__, ac->mac.addr, f->filter_id); + if (is_multicast_ether_addr(ac->mac.addr) && lif->nmcast) + lif->nmcast--; + else if (!is_multicast_ether_addr(ac->mac.addr) && lif->nucast) + lif->nucast--; + break; + } + + state = f->state; + ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); + ionic_rx_filter_free(lif, f); + + spin_unlock_bh(&lif->rx_filters.lock); + + if (state != IONIC_FILTER_STATE_NEW) { + err = ionic_adminq_post_wait_nomsg(lif, &ctx); + + switch (err) { + /* ignore these errors */ + case -EEXIST: + case -ENXIO: + case -ETIMEDOUT: + case -EAGAIN: + case -EBUSY: + case 0: + break; + default: + ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode, + ctx.comp.comp.status, err); + return err; + } + } + + return 0; +} + +int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) +{ + struct ionic_rx_filter_add_cmd ac = { + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), + }; + + memcpy(&ac.mac.addr, addr, ETH_ALEN); + + return ionic_lif_filter_del(lif, &ac); +} + +int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid) +{ + struct ionic_rx_filter_add_cmd ac = { + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), + .vlan.vlan = cpu_to_le16(vid), + }; + + return ionic_lif_filter_del(lif, &ac); +} + +struct sync_item { + struct list_head list; + struct ionic_rx_filter f; +}; + +void ionic_rx_filter_sync(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + struct list_head sync_add_list; + struct list_head sync_del_list; + struct sync_item *sync_item; + struct ionic_rx_filter *f; + struct hlist_head *head; + struct hlist_node *tmp; + struct sync_item *spos; + unsigned int i; + + INIT_LIST_HEAD(&sync_add_list); + INIT_LIST_HEAD(&sync_del_list); + + clear_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); + + /* Copy the filters to be added and deleted + * into a separate local list that needs no locking. + */ + spin_lock_bh(&lif->rx_filters.lock); + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + head = &lif->rx_filters.by_id[i]; + hlist_for_each_entry_safe(f, tmp, head, by_id) { + if (f->state == IONIC_FILTER_STATE_NEW || + f->state == IONIC_FILTER_STATE_OLD) { + sync_item = devm_kzalloc(dev, sizeof(*sync_item), + GFP_ATOMIC); + if (!sync_item) + goto loop_out; + + sync_item->f = *f; + + if (f->state == IONIC_FILTER_STATE_NEW) + list_add(&sync_item->list, &sync_add_list); + else + list_add(&sync_item->list, &sync_del_list); + } + } + } +loop_out: + spin_unlock_bh(&lif->rx_filters.lock); + + /* If the add or delete fails, it won't get marked as sync'd + * and will be tried again in the next sync action. + * Do the deletes first in case we're in an overflow state and + * they can clear room for some new filters + */ + list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) { + (void)ionic_lif_filter_del(lif, &sync_item->f.cmd); + + list_del(&sync_item->list); + devm_kfree(dev, sync_item); + } + + list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) { + (void)ionic_lif_filter_add(lif, &sync_item->f.cmd); + + list_del(&sync_item->list); + devm_kfree(dev, sync_item); + } +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_rx_filter.h b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_rx_filter.h new file mode 100644 index 0000000000..b089fbedb6 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_rx_filter.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#ifndef _IONIC_RX_FILTER_H_ +#define _IONIC_RX_FILTER_H_ + +#define IONIC_RXQ_INDEX_ANY (0xFFFF) + +enum ionic_filter_state { + IONIC_FILTER_STATE_SYNCED, + IONIC_FILTER_STATE_NEW, + IONIC_FILTER_STATE_OLD, +}; + +struct ionic_rx_filter { + u32 flow_id; + u32 filter_id; + u16 rxq_index; + enum ionic_filter_state state; + struct ionic_rx_filter_add_cmd cmd; + struct hlist_node by_hash; + struct hlist_node by_id; +}; + +#define IONIC_RX_FILTER_HASH_BITS 10 +#define IONIC_RX_FILTER_HLISTS BIT(IONIC_RX_FILTER_HASH_BITS) +#define IONIC_RX_FILTER_HLISTS_MASK (IONIC_RX_FILTER_HLISTS - 1) +struct ionic_rx_filters { + spinlock_t lock; /* filter list lock */ + struct hlist_head by_hash[IONIC_RX_FILTER_HLISTS]; /* by skb hash */ + struct hlist_head by_id[IONIC_RX_FILTER_HLISTS]; /* by filter_id */ +}; + +void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f); +void ionic_rx_filter_replay(struct ionic_lif *lif); +int ionic_rx_filters_init(struct ionic_lif *lif); +void ionic_rx_filters_deinit(struct ionic_lif *lif); +int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, + u32 hash, struct ionic_admin_ctx *ctx, + enum ionic_filter_state state); +struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid); +struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, const u8 *addr); +struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif); +void ionic_rx_filter_sync(struct ionic_lif *lif); +int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode); +int ionic_rx_filters_need_sync(struct ionic_lif *lif); +int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid); +int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid); + +#endif /* _IONIC_RX_FILTER_H_ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_stats.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_stats.c new file mode 100644 index 0000000000..705397f21e --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_stats.c @@ -0,0 +1,573 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#include +#include +#include "ionic.h" +#include "ionic_lif.h" +#include "ionic_stats.h" + +static const struct ionic_stat_desc ionic_lif_stats_desc[] = { + IONIC_LIF_STAT_DESC(tx_packets), + IONIC_LIF_STAT_DESC(tx_bytes), + IONIC_LIF_STAT_DESC(rx_packets), + IONIC_LIF_STAT_DESC(rx_bytes), + IONIC_LIF_STAT_DESC(tx_tso), + IONIC_LIF_STAT_DESC(tx_tso_bytes), + IONIC_LIF_STAT_DESC(tx_csum_none), + IONIC_LIF_STAT_DESC(tx_csum), + IONIC_LIF_STAT_DESC(rx_csum_none), + IONIC_LIF_STAT_DESC(rx_csum_complete), + IONIC_LIF_STAT_DESC(rx_csum_error), + IONIC_LIF_STAT_DESC(hw_tx_dropped), + IONIC_LIF_STAT_DESC(hw_rx_dropped), + IONIC_LIF_STAT_DESC(hw_rx_over_errors), + IONIC_LIF_STAT_DESC(hw_rx_missed_errors), + IONIC_LIF_STAT_DESC(hw_tx_aborted_errors), +}; + +static const struct ionic_stat_desc ionic_port_stats_desc[] = { + IONIC_PORT_STAT_DESC(frames_rx_ok), + IONIC_PORT_STAT_DESC(frames_rx_all), + IONIC_PORT_STAT_DESC(frames_rx_bad_fcs), + IONIC_PORT_STAT_DESC(frames_rx_bad_all), + IONIC_PORT_STAT_DESC(octets_rx_ok), + IONIC_PORT_STAT_DESC(octets_rx_all), + IONIC_PORT_STAT_DESC(frames_rx_unicast), + IONIC_PORT_STAT_DESC(frames_rx_multicast), + IONIC_PORT_STAT_DESC(frames_rx_broadcast), + IONIC_PORT_STAT_DESC(frames_rx_pause), + IONIC_PORT_STAT_DESC(frames_rx_bad_length), + IONIC_PORT_STAT_DESC(frames_rx_undersized), + IONIC_PORT_STAT_DESC(frames_rx_oversized), + IONIC_PORT_STAT_DESC(frames_rx_fragments), + IONIC_PORT_STAT_DESC(frames_rx_jabber), + IONIC_PORT_STAT_DESC(frames_rx_pripause), + IONIC_PORT_STAT_DESC(frames_rx_stomped_crc), + IONIC_PORT_STAT_DESC(frames_rx_too_long), + IONIC_PORT_STAT_DESC(frames_rx_vlan_good), + IONIC_PORT_STAT_DESC(frames_rx_dropped), + IONIC_PORT_STAT_DESC(frames_rx_less_than_64b), + IONIC_PORT_STAT_DESC(frames_rx_64b), + IONIC_PORT_STAT_DESC(frames_rx_65b_127b), + IONIC_PORT_STAT_DESC(frames_rx_128b_255b), + IONIC_PORT_STAT_DESC(frames_rx_256b_511b), + IONIC_PORT_STAT_DESC(frames_rx_512b_1023b), + IONIC_PORT_STAT_DESC(frames_rx_1024b_1518b), + IONIC_PORT_STAT_DESC(frames_rx_1519b_2047b), + IONIC_PORT_STAT_DESC(frames_rx_2048b_4095b), + IONIC_PORT_STAT_DESC(frames_rx_4096b_8191b), + IONIC_PORT_STAT_DESC(frames_rx_8192b_9215b), + IONIC_PORT_STAT_DESC(frames_rx_other), + IONIC_PORT_STAT_DESC(frames_tx_ok), + IONIC_PORT_STAT_DESC(frames_tx_all), + IONIC_PORT_STAT_DESC(frames_tx_bad), + IONIC_PORT_STAT_DESC(octets_tx_ok), + IONIC_PORT_STAT_DESC(octets_tx_total), + IONIC_PORT_STAT_DESC(frames_tx_unicast), + IONIC_PORT_STAT_DESC(frames_tx_multicast), + IONIC_PORT_STAT_DESC(frames_tx_broadcast), + IONIC_PORT_STAT_DESC(frames_tx_pause), + IONIC_PORT_STAT_DESC(frames_tx_pripause), + IONIC_PORT_STAT_DESC(frames_tx_vlan), + IONIC_PORT_STAT_DESC(frames_tx_less_than_64b), + IONIC_PORT_STAT_DESC(frames_tx_64b), + IONIC_PORT_STAT_DESC(frames_tx_65b_127b), + IONIC_PORT_STAT_DESC(frames_tx_128b_255b), + IONIC_PORT_STAT_DESC(frames_tx_256b_511b), + IONIC_PORT_STAT_DESC(frames_tx_512b_1023b), + IONIC_PORT_STAT_DESC(frames_tx_1024b_1518b), + IONIC_PORT_STAT_DESC(frames_tx_1519b_2047b), + IONIC_PORT_STAT_DESC(frames_tx_2048b_4095b), + IONIC_PORT_STAT_DESC(frames_tx_4096b_8191b), + IONIC_PORT_STAT_DESC(frames_tx_8192b_9215b), + IONIC_PORT_STAT_DESC(frames_tx_other), + IONIC_PORT_STAT_DESC(frames_tx_pri_0), + IONIC_PORT_STAT_DESC(frames_tx_pri_1), + IONIC_PORT_STAT_DESC(frames_tx_pri_2), + IONIC_PORT_STAT_DESC(frames_tx_pri_3), + IONIC_PORT_STAT_DESC(frames_tx_pri_4), + IONIC_PORT_STAT_DESC(frames_tx_pri_5), + IONIC_PORT_STAT_DESC(frames_tx_pri_6), + IONIC_PORT_STAT_DESC(frames_tx_pri_7), + IONIC_PORT_STAT_DESC(frames_rx_pri_0), + IONIC_PORT_STAT_DESC(frames_rx_pri_1), + IONIC_PORT_STAT_DESC(frames_rx_pri_2), + IONIC_PORT_STAT_DESC(frames_rx_pri_3), + IONIC_PORT_STAT_DESC(frames_rx_pri_4), + IONIC_PORT_STAT_DESC(frames_rx_pri_5), + IONIC_PORT_STAT_DESC(frames_rx_pri_6), + IONIC_PORT_STAT_DESC(frames_rx_pri_7), + IONIC_PORT_STAT_DESC(tx_pripause_0_1us_count), + IONIC_PORT_STAT_DESC(tx_pripause_1_1us_count), + IONIC_PORT_STAT_DESC(tx_pripause_2_1us_count), + IONIC_PORT_STAT_DESC(tx_pripause_3_1us_count), + IONIC_PORT_STAT_DESC(tx_pripause_4_1us_count), + IONIC_PORT_STAT_DESC(tx_pripause_5_1us_count), + IONIC_PORT_STAT_DESC(tx_pripause_6_1us_count), + IONIC_PORT_STAT_DESC(tx_pripause_7_1us_count), + IONIC_PORT_STAT_DESC(rx_pripause_0_1us_count), + IONIC_PORT_STAT_DESC(rx_pripause_1_1us_count), + IONIC_PORT_STAT_DESC(rx_pripause_2_1us_count), + IONIC_PORT_STAT_DESC(rx_pripause_3_1us_count), + IONIC_PORT_STAT_DESC(rx_pripause_4_1us_count), + IONIC_PORT_STAT_DESC(rx_pripause_5_1us_count), + IONIC_PORT_STAT_DESC(rx_pripause_6_1us_count), + IONIC_PORT_STAT_DESC(rx_pripause_7_1us_count), + IONIC_PORT_STAT_DESC(rx_pause_1us_count), + IONIC_PORT_STAT_DESC(frames_tx_truncated), +}; + +static const struct ionic_stat_desc ionic_mgmt_port_stats_desc[] = { + IONIC_MGMT_PORT_STAT_DESC(frames_rx_ok), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_all), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_bad_fcs), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_bad_all), + IONIC_MGMT_PORT_STAT_DESC(octets_rx_ok), + IONIC_MGMT_PORT_STAT_DESC(octets_rx_all), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_unicast), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_multicast), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_broadcast), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_pause), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_bad_length), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_undersized), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_oversized), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_fragments), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_jabber), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_64b), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_65b_127b), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_128b_255b), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_256b_511b), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_512b_1023b), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_1024b_1518b), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_gt_1518b), + IONIC_MGMT_PORT_STAT_DESC(frames_rx_fifo_full), + IONIC_MGMT_PORT_STAT_DESC(frames_tx_ok), + IONIC_MGMT_PORT_STAT_DESC(frames_tx_all), + IONIC_MGMT_PORT_STAT_DESC(frames_tx_bad), + IONIC_MGMT_PORT_STAT_DESC(octets_tx_ok), + IONIC_MGMT_PORT_STAT_DESC(octets_tx_total), + IONIC_MGMT_PORT_STAT_DESC(frames_tx_unicast), + IONIC_MGMT_PORT_STAT_DESC(frames_tx_multicast), + IONIC_MGMT_PORT_STAT_DESC(frames_tx_broadcast), + IONIC_MGMT_PORT_STAT_DESC(frames_tx_pause), +}; + +static const struct ionic_stat_desc ionic_tx_stats_desc[] = { + IONIC_TX_STAT_DESC(pkts), + IONIC_TX_STAT_DESC(bytes), + IONIC_TX_STAT_DESC(clean), + IONIC_TX_STAT_DESC(dma_map_err), + IONIC_TX_STAT_DESC(linearize), + IONIC_TX_STAT_DESC(tso), + IONIC_TX_STAT_DESC(tso_bytes), + IONIC_TX_STAT_DESC(hwstamp_valid), + IONIC_TX_STAT_DESC(hwstamp_invalid), +#ifdef IONIC_DEBUG_STATS + IONIC_TX_STAT_DESC(vlan_inserted), + IONIC_TX_STAT_DESC(frags), + IONIC_TX_STAT_DESC(csum), + IONIC_TX_STAT_DESC(csum_none), +#endif +}; + +static const struct ionic_stat_desc ionic_rx_stats_desc[] = { + IONIC_RX_STAT_DESC(pkts), + IONIC_RX_STAT_DESC(bytes), + IONIC_RX_STAT_DESC(dma_map_err), + IONIC_RX_STAT_DESC(alloc_err), +#ifdef IONIC_DEBUG_STATS + IONIC_RX_STAT_DESC(vlan_stripped), + IONIC_RX_STAT_DESC(csum_none), + IONIC_RX_STAT_DESC(csum_complete), +#endif + IONIC_RX_STAT_DESC(csum_error), + IONIC_RX_STAT_DESC(hwstamp_valid), + IONIC_RX_STAT_DESC(hwstamp_invalid), + IONIC_RX_STAT_DESC(dropped), + IONIC_RX_STAT_DESC(cache_full), + IONIC_RX_STAT_DESC(cache_empty), + IONIC_RX_STAT_DESC(cache_busy), + IONIC_RX_STAT_DESC(cache_get), + IONIC_RX_STAT_DESC(cache_put), + IONIC_RX_STAT_DESC(buf_exhausted), + IONIC_RX_STAT_DESC(buf_not_reusable), + IONIC_RX_STAT_DESC(buf_reused), +}; + +#ifdef IONIC_DEBUG_STATS +static const struct ionic_stat_desc ionic_txq_stats_desc[] = { + IONIC_TX_Q_STAT_DESC(stop), + IONIC_TX_Q_STAT_DESC(wake), + IONIC_TX_Q_STAT_DESC(drop), + IONIC_TX_Q_STAT_DESC(dbell_count), + IONIC_TX_Q_STAT_DESC(depth), + IONIC_TX_Q_STAT_DESC(depth_max) +}; +#endif + +#ifdef IONIC_DEBUG_STATS +static const struct ionic_stat_desc ionic_dbg_cq_stats_desc[] = { + IONIC_CQ_STAT_DESC(compl_count), +}; +#define IONIC_NUM_DBG_CQ_STATS ARRAY_SIZE(ionic_dbg_cq_stats_desc) + +static const struct ionic_stat_desc ionic_dbg_intr_stats_desc[] = { + IONIC_INTR_STAT_DESC(rearm_count), +}; +#define IONIC_NUM_DBG_INTR_STATS ARRAY_SIZE(ionic_dbg_intr_stats_desc) + +static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = { + IONIC_NAPI_STAT_DESC(poll_count), +}; +#define IONIC_NUM_DBG_NAPI_STATS ARRAY_SIZE(ionic_dbg_napi_stats_desc) + +#define IONIC_NUM_TX_Q_STATS ARRAY_SIZE(ionic_txq_stats_desc) +#endif + +#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc) +#define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc) +#define IONIC_NUM_MGMT_PORT_STATS ARRAY_SIZE(ionic_mgmt_port_stats_desc) +#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc) +#define IONIC_NUM_RX_STATS ARRAY_SIZE(ionic_rx_stats_desc) + +#define MAX_Q(lif) ((lif)->netdev->real_num_tx_queues) + +static void ionic_add_lif_txq_stats(struct ionic_lif *lif, int q_num, + struct ionic_lif_sw_stats *stats) +{ + struct ionic_tx_stats *txstats = &lif->txqstats[q_num]; + + stats->tx_packets += txstats->pkts; + stats->tx_bytes += txstats->bytes; + stats->tx_tso += txstats->tso; + stats->tx_tso_bytes += txstats->tso_bytes; + stats->tx_csum_none += txstats->csum_none; + stats->tx_csum += txstats->csum; + stats->tx_hwstamp_valid += txstats->hwstamp_valid; + stats->tx_hwstamp_invalid += txstats->hwstamp_invalid; +} + +static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num, + struct ionic_lif_sw_stats *stats) +{ + struct ionic_rx_stats *rxstats = &lif->rxqstats[q_num]; + + stats->rx_packets += rxstats->pkts; + stats->rx_bytes += rxstats->bytes; + stats->rx_csum_none += rxstats->csum_none; + stats->rx_csum_complete += rxstats->csum_complete; + stats->rx_csum_error += rxstats->csum_error; + stats->rx_hwstamp_valid += rxstats->hwstamp_valid; + stats->rx_hwstamp_invalid += rxstats->hwstamp_invalid; +} + +static void ionic_get_lif_stats(struct ionic_lif *lif, + struct ionic_lif_sw_stats *stats) +{ + struct rtnl_link_stats64 ns; + int q_num; + + memset(stats, 0, sizeof(*stats)); + + for (q_num = 0; q_num < MAX_Q(lif); q_num++) { + ionic_add_lif_txq_stats(lif, q_num, stats); + ionic_add_lif_rxq_stats(lif, q_num, stats); + } + + if (lif->hwstamp_txq) + ionic_add_lif_txq_stats(lif, lif->hwstamp_txq->q.index, stats); + + if (lif->hwstamp_rxq) + ionic_add_lif_rxq_stats(lif, lif->hwstamp_rxq->q.index, stats); + + ionic_get_stats64(lif->netdev, &ns); + stats->hw_tx_dropped = ns.tx_dropped; + stats->hw_rx_dropped = ns.rx_dropped; + stats->hw_rx_over_errors = ns.rx_over_errors; + stats->hw_rx_missed_errors = ns.rx_missed_errors; + stats->hw_tx_aborted_errors = ns.tx_aborted_errors; +} + +static u64 ionic_sw_stats_get_count(struct ionic_lif *lif) +{ + u64 total = 0, tx_queues = MAX_Q(lif), rx_queues = MAX_Q(lif); + + total += IONIC_NUM_LIF_STATS; + + if (lif->ionic->is_mgmt_nic) + total += IONIC_NUM_MGMT_PORT_STATS; + else + total += IONIC_NUM_PORT_STATS; + + if (lif->hwstamp_txq) + tx_queues += 1; + + if (lif->hwstamp_rxq) + rx_queues += 1; + + total += tx_queues * IONIC_NUM_TX_STATS; + total += rx_queues * IONIC_NUM_RX_STATS; + +#ifdef IONIC_DEBUG_STATS + if (test_bit(IONIC_LIF_F_UP, lif->state) && + test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) { + /* tx debug stats */ + total += tx_queues * (IONIC_NUM_DBG_CQ_STATS + + IONIC_NUM_TX_Q_STATS + + IONIC_NUM_DBG_INTR_STATS + + IONIC_NUM_DBG_NAPI_STATS + + IONIC_MAX_NUM_NAPI_CNTR + + IONIC_MAX_NUM_SG_CNTR); + + /* rx debug stats */ + total += rx_queues * (IONIC_NUM_DBG_CQ_STATS + + IONIC_NUM_DBG_INTR_STATS + + IONIC_NUM_DBG_NAPI_STATS + + IONIC_MAX_NUM_NAPI_CNTR); + } +#endif + + return total; +} + +static void ionic_sw_stats_get_tx_strings(struct ionic_lif *lif, u8 **buf, + int q_num) +{ + int i; + + for (i = 0; i < IONIC_NUM_TX_STATS; i++) + ethtool_sprintf(buf, "tx_%d_%s", q_num, + ionic_tx_stats_desc[i].name); + +#ifdef IONIC_DEBUG_STATS + if (!test_bit(IONIC_LIF_F_UP, lif->state) || + !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) + return; + + for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) + ethtool_sprintf(buf, "txq_%d_%s", q_num, + ionic_txq_stats_desc[i].name); + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) + ethtool_sprintf(buf, "txq_%d_cq_%s", q_num, + ionic_dbg_cq_stats_desc[i].name); + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) + ethtool_sprintf(buf, "txq_%d_intr_%s", q_num, + ionic_dbg_intr_stats_desc[i].name); + for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) + ethtool_sprintf(buf, "txq_%d_napi_%s", q_num, + ionic_dbg_napi_stats_desc[i].name); + for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) + ethtool_sprintf(buf, "txq_%d_napi_work_done_%d", q_num, i); + for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) + ethtool_sprintf(buf, "txq_%d_sg_cntr_%d", q_num, i); +#endif +} + +static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf, + int q_num) +{ + int i; + + for (i = 0; i < IONIC_NUM_RX_STATS; i++) + ethtool_sprintf(buf, "rx_%d_%s", q_num, + ionic_rx_stats_desc[i].name); + +#ifdef IONIC_DEBUG_STATS + if (!test_bit(IONIC_LIF_F_UP, lif->state) || + !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) + return; + + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) + ethtool_sprintf(buf, "rxq_%d_cq_%s", q_num, + ionic_dbg_cq_stats_desc[i].name); + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) + ethtool_sprintf(buf, "rxq_%d_intr_%s", q_num, + ionic_dbg_intr_stats_desc[i].name); + for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) + ethtool_sprintf(buf, "rxq_%d_napi_%s", q_num, + ionic_dbg_napi_stats_desc[i].name); + for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) + ethtool_sprintf(buf, "rxq_%d_napi_work_done_%d", q_num, i); +#endif +} + +static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf) +{ + int i, q_num; + + for (i = 0; i < IONIC_NUM_LIF_STATS; i++) + ethtool_sprintf(buf, ionic_lif_stats_desc[i].name); + + if (lif->ionic->is_mgmt_nic) { + for (i = 0; i < IONIC_NUM_MGMT_PORT_STATS; i++) + ethtool_sprintf(buf, ionic_mgmt_port_stats_desc[i].name); + } else { + for (i = 0; i < IONIC_NUM_PORT_STATS; i++) + ethtool_sprintf(buf, ionic_port_stats_desc[i].name); + } + + for (q_num = 0; q_num < MAX_Q(lif); q_num++) + ionic_sw_stats_get_tx_strings(lif, buf, q_num); + + if (lif->hwstamp_txq) + ionic_sw_stats_get_tx_strings(lif, buf, lif->hwstamp_txq->q.index); + + for (q_num = 0; q_num < MAX_Q(lif); q_num++) + ionic_sw_stats_get_rx_strings(lif, buf, q_num); + + if (lif->hwstamp_rxq) + ionic_sw_stats_get_rx_strings(lif, buf, lif->hwstamp_rxq->q.index); +} + +static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf, + int q_num) +{ + struct ionic_tx_stats *txstats; +#ifdef IONIC_DEBUG_STATS + struct ionic_qcq *txqcq; +#endif + int i; + + txstats = &lif->txqstats[q_num]; + + for (i = 0; i < IONIC_NUM_TX_STATS; i++) { + **buf = IONIC_READ_STAT64(txstats, &ionic_tx_stats_desc[i]); + (*buf)++; + } + +#ifdef IONIC_DEBUG_STATS + if (!test_bit(IONIC_LIF_F_UP, lif->state) || + !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) + return; + + txqcq = lif->txqcqs[q_num]; + for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->q, + &ionic_txq_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->cq, + &ionic_dbg_cq_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->intr, + &ionic_dbg_intr_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->napi_stats, + &ionic_dbg_napi_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) { + **buf = txqcq->napi_stats.work_done_cntr[i]; + (*buf)++; + } + for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) { + **buf = txstats->sg_cntr[i]; + (*buf)++; + } +#endif +} + +static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf, + int q_num) +{ + struct ionic_rx_stats *rxstats; +#ifdef IONIC_DEBUG_STATS + struct ionic_qcq *rxqcq; +#endif + int i; + + rxstats = &lif->rxqstats[q_num]; + + for (i = 0; i < IONIC_NUM_RX_STATS; i++) { + **buf = IONIC_READ_STAT64(rxstats, &ionic_rx_stats_desc[i]); + (*buf)++; + } + +#ifdef IONIC_DEBUG_STATS + if (!test_bit(IONIC_LIF_F_UP, lif->state) || + !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) + return; + + rxqcq = lif->rxqcqs[q_num]; + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { + **buf = IONIC_READ_STAT64(&rxqcq->cq, + &ionic_dbg_cq_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { + **buf = IONIC_READ_STAT64(&rxqcq->intr, + &ionic_dbg_intr_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) { + **buf = IONIC_READ_STAT64(&rxqcq->napi_stats, + &ionic_dbg_napi_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) { + **buf = rxqcq->napi_stats.work_done_cntr[i]; + (*buf)++; + } +#endif +} + +static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf) +{ + struct ionic_mgmt_port_stats *mgmt_stats; + struct ionic_port_stats *port_stats; + struct ionic_lif_sw_stats lif_stats; + int i, q_num; + + ionic_get_lif_stats(lif, &lif_stats); + + for (i = 0; i < IONIC_NUM_LIF_STATS; i++) { + **buf = IONIC_READ_STAT64(&lif_stats, &ionic_lif_stats_desc[i]); + (*buf)++; + } + + if (lif->ionic->is_mgmt_nic) { + mgmt_stats = &lif->ionic->idev.port_info->mgmt_stats; + for (i = 0; i < IONIC_NUM_MGMT_PORT_STATS; i++) { + **buf = IONIC_READ_STAT_LE64(mgmt_stats, + &ionic_mgmt_port_stats_desc[i]); + (*buf)++; + } + } else { + port_stats = &lif->ionic->idev.port_info->stats; + for (i = 0; i < IONIC_NUM_PORT_STATS; i++) { + **buf = IONIC_READ_STAT_LE64(port_stats, + &ionic_port_stats_desc[i]); + (*buf)++; + } + } + + for (q_num = 0; q_num < MAX_Q(lif); q_num++) + ionic_sw_stats_get_txq_values(lif, buf, q_num); + + if (lif->hwstamp_txq) + ionic_sw_stats_get_txq_values(lif, buf, lif->hwstamp_txq->q.index); + + for (q_num = 0; q_num < MAX_Q(lif); q_num++) + ionic_sw_stats_get_rxq_values(lif, buf, q_num); + + if (lif->hwstamp_rxq) + ionic_sw_stats_get_rxq_values(lif, buf, lif->hwstamp_rxq->q.index); +} + +const struct ionic_stats_group_intf ionic_stats_groups[] = { + /* SW Stats group */ + { + .get_strings = ionic_sw_stats_get_strings, + .get_values = ionic_sw_stats_get_values, + .get_count = ionic_sw_stats_get_count, + }, + /* Add more stat groups here */ +}; + +const int ionic_num_stats_grps = ARRAY_SIZE(ionic_stats_groups); diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_stats.h b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_stats.h new file mode 100644 index 0000000000..9cf6717ea6 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_stats.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#ifndef _IONIC_STATS_H_ +#define _IONIC_STATS_H_ + +#define IONIC_STAT_TO_OFFSET(type, stat_name) (offsetof(type, stat_name)) + +#define IONIC_STAT_DESC(type, stat_name) { \ + .name = #stat_name, \ + .offset = IONIC_STAT_TO_OFFSET(type, stat_name) \ +} + +#define IONIC_PORT_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_port_stats, stat_name) + +#define IONIC_MGMT_PORT_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_mgmt_port_stats, stat_name) + +#define IONIC_LIF_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_lif_sw_stats, stat_name) + +#define IONIC_TX_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_tx_stats, stat_name) + +#define IONIC_RX_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_rx_stats, stat_name) + +#ifdef IONIC_DEBUG_STATS +#define IONIC_TX_Q_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_queue, stat_name) + +#define IONIC_CQ_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_cq, stat_name) + +#define IONIC_INTR_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_intr_info, stat_name) + +#define IONIC_NAPI_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_napi_stats, stat_name) +#endif + +/* Interface structure for a particalar stats group */ +struct ionic_stats_group_intf { + void (*get_strings)(struct ionic_lif *lif, u8 **buf); + void (*get_values)(struct ionic_lif *lif, u64 **buf); + u64 (*get_count)(struct ionic_lif *lif); +}; + +extern const struct ionic_stats_group_intf ionic_stats_groups[]; +extern const int ionic_num_stats_grps; + +#define IONIC_READ_STAT64(base_ptr, desc_ptr) \ + (*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset))) + +#define IONIC_READ_STAT_LE64(base_ptr, desc_ptr) \ + __le64_to_cpu(*((__le64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset))) + +struct ionic_stat_desc { + char name[ETH_GSTRING_LEN]; + u64 offset; +}; + +#endif /* _IONIC_STATS_H_ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_txrx.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_txrx.c new file mode 100644 index 0000000000..de5cc1a1f3 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_txrx.c @@ -0,0 +1,1595 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#include +#include +#include +#include +#include + +#include "ionic.h" +#include "ionic_lif.h" +#include "ionic_txrx.h" + +static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, + ionic_desc_cb cb_func, void *cb_arg) +{ + DEBUG_STATS_TXQ_POST(q, ring_dbell); + + ionic_q_post(q, ring_dbell, cb_func, cb_arg); +} + +static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, + ionic_desc_cb cb_func, void *cb_arg) +{ + ionic_q_post(q, ring_dbell, cb_func, cb_arg); + + DEBUG_STATS_RX_BUFF_CNT(q); +} + +bool ionic_txq_poke_doorbell(struct ionic_queue *q) +{ + unsigned long now, then, dif; + struct netdev_queue *netdev_txq; + struct net_device *netdev; + + netdev = q->lif->netdev; + netdev_txq = netdev_get_tx_queue(netdev, q->index); + + HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id()); + + if (q->tail_idx == q->head_idx) { + HARD_TX_UNLOCK(netdev, netdev_txq); + return false; + } + + now = READ_ONCE(jiffies); + then = q->dbell_jiffies; + dif = now - then; + + if (dif > q->dbell_deadline) { + ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, + q->dbval | q->head_idx); + + q->dbell_jiffies = now; + } + + HARD_TX_UNLOCK(netdev, netdev_txq); + + return true; +} + +bool ionic_rxq_poke_doorbell(struct ionic_queue *q) +{ + unsigned long now, then, dif; + + /* no lock, called from rx napi or txrx napi, nothing else can fill */ + + if (q->tail_idx == q->head_idx) + return false; + + now = READ_ONCE(jiffies); + then = q->dbell_jiffies; + dif = now - then; + + if (dif > q->dbell_deadline) { + ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, + q->dbval | q->head_idx); + + q->dbell_jiffies = now; + + dif = 2 * q->dbell_deadline; + if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE) + dif = IONIC_RX_MAX_DOORBELL_DEADLINE; + + q->dbell_deadline = dif; + } + + return true; +} + +static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) +{ + return netdev_get_tx_queue(q->lif->netdev, q->index); +} + +static inline void *ionic_rx_buf_va(struct ionic_buf_info *buf_info) +{ + return page_address(buf_info->page) + buf_info->page_offset; +} + +static inline dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info) +{ + return buf_info->dma_addr + buf_info->page_offset; +} + +static inline unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info) +{ + return IONIC_PAGE_SIZE - buf_info->page_offset; +} + +static bool ionic_rx_cache_put(struct ionic_queue *q, + struct ionic_buf_info *buf_info) +{ + struct ionic_page_cache *cache = &q->page_cache; + struct ionic_rx_stats *stats = q_to_rx_stats(q); + u32 tail_next; + + tail_next = (cache->tail + 1) & (IONIC_PAGE_CACHE_SIZE - 1); + if (tail_next == cache->head) { + stats->cache_full++; + return false; + } + + get_page(buf_info->page); + + cache->ring[cache->tail] = *buf_info; + cache->tail = tail_next; + stats->cache_put++; + + return true; +} + +static bool ionic_rx_cache_get(struct ionic_queue *q, + struct ionic_buf_info *buf_info) +{ + struct ionic_page_cache *cache = &q->page_cache; + struct ionic_rx_stats *stats = q_to_rx_stats(q); + + if (unlikely(cache->head == cache->tail)) { + stats->cache_empty++; + return false; + } + + if (page_ref_count(cache->ring[cache->head].page) != 1) { + stats->cache_busy++; + return false; + } + + *buf_info = cache->ring[cache->head]; + cache->head = (cache->head + 1) & (IONIC_PAGE_CACHE_SIZE - 1); + stats->cache_get++; + + dma_sync_single_for_device(q->dev, buf_info->dma_addr, + IONIC_PAGE_SIZE, + DMA_FROM_DEVICE); + + return true; +} + +static void ionic_rx_cache_drain(struct ionic_queue *q) +{ + struct ionic_page_cache *cache = &q->page_cache; + struct ionic_rx_stats *stats = q_to_rx_stats(q); + struct ionic_buf_info *buf_info; + + while (cache->head != cache->tail) { + buf_info = &cache->ring[cache->head]; + dma_unmap_page(q->dev, buf_info->dma_addr, IONIC_PAGE_SIZE, + DMA_FROM_DEVICE); + put_page(buf_info->page); + cache->head = (cache->head + 1) & (IONIC_PAGE_CACHE_SIZE - 1); + } + + cache->head = 0; + cache->tail = 0; + stats->cache_empty = 0; + stats->cache_busy = 0; + stats->cache_get = 0; + stats->cache_put = 0; + stats->cache_full = 0; +} + +static bool ionic_rx_buf_reuse(struct ionic_queue *q, + struct ionic_buf_info *buf_info, u32 used) +{ + struct ionic_rx_stats *stats = q_to_rx_stats(q); + u32 size; + + if (!dev_page_is_reusable(buf_info->page)) { + stats->buf_not_reusable++; + return false; + } + + size = ALIGN(used, IONIC_PAGE_SPLIT_SZ); + buf_info->page_offset += size; + if (buf_info->page_offset >= IONIC_PAGE_SIZE) { + buf_info->page_offset = 0; + stats->buf_exhausted++; + return false; + } + + stats->buf_reused++; + + get_page(buf_info->page); + + return true; +} + +static void ionic_rx_buf_complete(struct ionic_queue *q, + struct ionic_buf_info *buf_info, u32 used) +{ + if (ionic_rx_buf_reuse(q, buf_info, used)) + return; + + if (!ionic_rx_cache_put(q, buf_info)) { +#ifndef HAVE_STRUCT_DMA_ATTRS + dma_unmap_page_attrs(q->dev, buf_info->dma_addr, IONIC_PAGE_SIZE, + DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); +#else + dma_unmap_page(q->dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); +#endif + } + + buf_info->page = NULL; +} + +static inline int ionic_rx_page_alloc(struct ionic_queue *q, + struct ionic_buf_info *buf_info) +{ + struct net_device *netdev = q->lif->netdev; + struct ionic_rx_stats *stats; + struct device *dev; + struct page *page; + + if (ionic_rx_cache_get(q, buf_info)) + return 0; + + dev = q->dev; + stats = q_to_rx_stats(q); + + if (unlikely(!buf_info)) { + net_err_ratelimited("%s: %s invalid buf_info in alloc\n", + netdev->name, q->name); + return -EINVAL; + } + + page = alloc_pages_node(dev_to_node(dev), IONIC_PAGE_GFP_MASK, IONIC_PAGE_ORDER); + if (unlikely(!page)) { + net_err_ratelimited("%s: %s page alloc failed\n", + netdev->name, q->name); + stats->alloc_err++; + return -ENOMEM; + } + + buf_info->dma_addr = dma_map_page(dev, page, 0, + IONIC_PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) { + __free_pages(page, IONIC_PAGE_ORDER); + net_err_ratelimited("%s: %s dma map failed\n", + netdev->name, q->name); + stats->dma_map_err++; + return -EIO; + } + + buf_info->page = page; + buf_info->page_offset = 0; + + return 0; +} + +static inline void ionic_rx_page_free(struct ionic_queue *q, + struct ionic_buf_info *buf_info) +{ + struct net_device *netdev = q->lif->netdev; + struct device *dev = q->dev; + + if (unlikely(!buf_info)) { + net_err_ratelimited("%s: %s invalid buf_info in free\n", + netdev->name, q->name); + return; + } + + if (!buf_info->page) + return; + + dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); + __free_pages(buf_info->page, IONIC_PAGE_ORDER); + buf_info->page = NULL; +} + +static void ionic_rx_add_skb_frag(struct ionic_queue *q, + struct sk_buff *skb, + struct ionic_buf_info *buf_info, + u32 off, u32 len) +{ + dma_sync_single_for_cpu(q->dev, + ionic_rx_buf_pa(buf_info) + off, + len, DMA_FROM_DEVICE); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + buf_info->page, buf_info->page_offset + off, + len, + IONIC_PAGE_SIZE); + + ionic_rx_buf_complete(q, buf_info, off + len); +} + +static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q, + struct ionic_desc_info *desc_info, + struct ionic_rxq_comp *comp) +{ + struct net_device *netdev = q->lif->netdev; + struct ionic_buf_info *buf_info; + struct ionic_rx_stats *stats; + struct device *dev = q->dev; + struct sk_buff *skb; + unsigned int i; + u16 head_len; + u16 frag_len; + u16 copy_len; + u16 len; + + stats = q_to_rx_stats(q); + + buf_info = &desc_info->bufs[0]; + if (unlikely(!buf_info->page)) + return NULL; + + prefetchw(buf_info->page); + + len = le16_to_cpu(comp->len); + head_len = min_t(u16, q->lif->rx_copybreak, len); + + skb = napi_alloc_skb(&q_to_qcq(q)->napi, head_len); + if (unlikely(!skb)) { + net_warn_ratelimited("%s: SKB alloc failed on %s!\n", + netdev->name, q->name); + stats->alloc_err++; + return NULL; + } + + copy_len = ALIGN(head_len, sizeof(long)); /* for better memcpy performance */ + dma_sync_single_for_cpu(dev, ionic_rx_buf_pa(buf_info), copy_len, DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info), copy_len); + skb_put(skb, head_len); + + if (len > head_len) { + len -= head_len; + frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info) - head_len); + len -= frag_len; + ionic_rx_add_skb_frag(q, skb, buf_info, head_len, frag_len); + buf_info++; + for (i = 0; i < comp->num_sg_elems; i++) { + if (len == 0) + goto err_out; + if (unlikely(!buf_info->page)) + goto err_out; + frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info)); + len -= frag_len; + ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len); + buf_info++; + } + } else { + dma_sync_single_for_device(dev, + ionic_rx_buf_pa(buf_info), + len, DMA_FROM_DEVICE); + } + + skb->protocol = eth_type_trans(skb, q->lif->netdev); + + return skb; + +err_out: + if (skb) + dev_kfree_skb(skb); + return NULL; +} + +static void ionic_rx_clean(struct ionic_queue *q, + struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, + void *cb_arg) +{ + struct net_device *netdev = q->lif->netdev; + struct ionic_qcq *qcq = q_to_qcq(q); + struct ionic_rx_stats *stats; + struct ionic_rxq_comp *comp; + struct sk_buff *skb; +#ifdef CSUM_DEBUG + __sum16 csum; +#endif + + comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp); + + stats = q_to_rx_stats(q); + + if (comp->status) { + stats->dropped++; + return; + } + + if (le16_to_cpu(comp->len) > netdev->mtu + ETH_HLEN + VLAN_HLEN) { + stats->dropped++; + net_warn_ratelimited("%s: RX PKT TOO LARGE! comp->len %d\n", + netdev->name, + le16_to_cpu(comp->len)); + return; + } + + stats->pkts++; + stats->bytes += le16_to_cpu(comp->len); + + skb = ionic_rx_build_skb(q, desc_info, comp); + if (unlikely(!skb)) { + stats->dropped++; + return; + } + +#ifdef CSUM_DEBUG + csum = ip_compute_csum(skb->data, skb->len); +#endif + + skb_record_rx_queue(skb, q->index); + + if (likely(netdev->features & NETIF_F_RXHASH)) { + switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { + case IONIC_PKT_TYPE_IPV4: + case IONIC_PKT_TYPE_IPV6: + skb_set_hash(skb, le32_to_cpu(comp->rss_hash), + PKT_HASH_TYPE_L3); + break; + case IONIC_PKT_TYPE_IPV4_TCP: + case IONIC_PKT_TYPE_IPV6_TCP: + case IONIC_PKT_TYPE_IPV4_UDP: + case IONIC_PKT_TYPE_IPV6_UDP: + skb_set_hash(skb, le32_to_cpu(comp->rss_hash), + PKT_HASH_TYPE_L4); + break; + } + } + + if (likely(netdev->features & NETIF_F_RXCSUM) && + (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) { + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = (__force __wsum)le16_to_cpu(comp->csum); +#ifdef IONIC_DEBUG_STATS + stats->csum_complete++; +#endif +#ifdef CSUM_DEBUG + if (skb->csum != (u16)~csum) + netdev_warn(netdev, "Rx CSUM incorrect. Want 0x%04x got 0x%04x, protocol 0x%04x\n", + (u16)~csum, skb->csum, + htons(skb->protocol)); +#endif + } else { +#ifdef IONIC_DEBUG_STATS + stats->csum_none++; +#endif + } + + if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || + (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) || + (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD))) + stats->csum_error++; + + if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + le16_to_cpu(comp->vlan_tci)); +#ifdef IONIC_DEBUG_STATS + stats->vlan_stripped++; +#endif + } + + if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) { + __le64 *cq_desc_hwstamp; + u64 hwstamp; + + cq_desc_hwstamp = + cq_info->cq_desc + + qcq->cq.desc_size - + sizeof(struct ionic_rxq_comp) - + IONIC_HWSTAMP_CQ_NEGOFFSET; + + hwstamp = le64_to_cpu(*cq_desc_hwstamp); + + if (hwstamp != IONIC_HWSTAMP_INVALID) { + skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp); + stats->hwstamp_valid++; + } else { + stats->hwstamp_invalid++; + } + } + + napi_gro_receive(&qcq->napi, skb); +} + +bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) +{ + struct ionic_queue *q = cq->bound_q; + struct ionic_desc_info *desc_info; + struct ionic_rxq_comp *comp; + + comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); + + if (!color_match(comp->pkt_type_color, cq->done_color)) + return false; + + /* check for empty queue */ + if (q->tail_idx == q->head_idx) + return false; + + if (q->tail_idx != le16_to_cpu(comp->comp_index)) + return false; + + desc_info = &q->info[q->tail_idx]; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + + /* clean the related q entry, only one per qc completion */ + ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); + + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + + return true; +} + +void ionic_rx_fill(struct ionic_queue *q) +{ + struct net_device *netdev = q->lif->netdev; + struct ionic_desc_info *desc_info; + struct ionic_rxq_sg_desc *sg_desc; + struct ionic_rxq_sg_elem *sg_elem; + struct ionic_buf_info *buf_info; + unsigned int fill_threshold; + struct ionic_rxq_desc *desc; + unsigned int remain_len; + unsigned int frag_len; + unsigned int nfrags; + unsigned int n_fill; + unsigned int len; + unsigned int i; + unsigned int j; + + n_fill = ionic_q_space_avail(q); + + fill_threshold = min_t(unsigned int, rx_fill_threshold, + q->num_descs / IONIC_RX_FILL_DIV); + if (n_fill < fill_threshold) + return; + + len = netdev->mtu + ETH_HLEN + VLAN_HLEN; + + for (i = n_fill; i; i--) { + nfrags = 0; + remain_len = len; + desc_info = &q->info[q->head_idx]; + desc = desc_info->desc; + buf_info = &desc_info->bufs[0]; + + if (!buf_info->page) { /* alloc a new buffer? */ + if (unlikely(ionic_rx_page_alloc(q, buf_info))) { + desc->addr = 0; + desc->len = 0; + return; + } + } + + /* fill main descriptor - buf[0] */ + desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info)); + frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info)); + desc->len = cpu_to_le16(frag_len); + remain_len -= frag_len; + buf_info++; + nfrags++; + + /* fill sg descriptors - buf[1..n] */ + sg_desc = desc_info->sg_desc; + for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) { + sg_elem = &sg_desc->elems[j]; + if (!buf_info->page) { /* alloc a new sg buffer? */ + if (unlikely(ionic_rx_page_alloc(q, buf_info))) { + sg_elem->addr = 0; + sg_elem->len = 0; + return; + } + } + + sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info)); + frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info)); + sg_elem->len = cpu_to_le16(frag_len); + remain_len -= frag_len; + buf_info++; + nfrags++; + } + + /* clear end sg element as a sentinel */ + if (j < q->max_sg_elems) { + sg_elem = &sg_desc->elems[j]; + memset(sg_elem, 0, sizeof(*sg_elem)); + } + + desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : + IONIC_RXQ_DESC_OPCODE_SIMPLE; + desc_info->nbufs = nfrags; + + /* commit descriptor contents in one shot */ + if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS) + memcpy_toio(desc_info->cmb_desc, desc, q->desc_size); + + ionic_rxq_post(q, false, ionic_rx_clean, NULL); + } + + ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, + q->dbval | q->head_idx); + + q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE; + q->dbell_jiffies = jiffies; + + mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline, + jiffies + IONIC_NAPI_DEADLINE); +} + +void ionic_rx_empty(struct ionic_queue *q) +{ + struct ionic_desc_info *desc_info; + struct ionic_buf_info *buf_info; + unsigned int i, j; + + for (i = 0; i < q->num_descs; i++) { + desc_info = &q->info[i]; + for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) { + buf_info = &desc_info->bufs[j]; + if (buf_info->page) + ionic_rx_page_free(q, buf_info); + } + + desc_info->nbufs = 0; + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + } + + q->head_idx = 0; + q->tail_idx = 0; + + ionic_rx_cache_drain(q); +} + +static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode) +{ + struct dim_sample dim_sample; + struct ionic_lif *lif; + unsigned int qi; + u64 pkts, bytes; + + if (!qcq->intr.dim_coal_hw) + return; + + lif = qcq->q.lif; + qi = qcq->cq.bound_q->index; + + switch (napi_mode) { + case IONIC_LIF_F_TX_DIM_INTR: + pkts = lif->txqstats[qi].pkts; + bytes = lif->txqstats[qi].bytes; + break; + case IONIC_LIF_F_RX_DIM_INTR: + pkts = lif->rxqstats[qi].pkts; + bytes = lif->rxqstats[qi].bytes; + break; + default: + pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts; + bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes; + break; + } + + dim_update_sample_with_comps(qcq->cq.bound_intr->rearm_count, + pkts, bytes, 0, &dim_sample); + + net_dim(&qcq->dim, dim_sample); +} + +int ionic_tx_napi(struct napi_struct *napi, int budget) +{ + struct ionic_qcq *qcq = napi_to_qcq(napi); + struct ionic_cq *cq = napi_to_cq(napi); + struct ionic_dev *idev; + struct ionic_lif *lif; + u32 work_done = 0; + u32 flags = 0; + u64 dbr; + + lif = cq->bound_q->lif; + idev = &lif->ionic->idev; + + work_done = ionic_cq_service(cq, budget, + ionic_tx_service, NULL, NULL); + + if (work_done < budget && napi_complete_done(napi, work_done)) { + flags |= IONIC_INTR_CRED_UNMASK; + cq->bound_intr->rearm_count++; + } + + if (work_done || flags) { + flags |= IONIC_INTR_CRED_RESET_COALESCE; + if (!lif->ionic->neth_eqs) { + if (flags & IONIC_INTR_CRED_UNMASK) + ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR); + ionic_intr_credits(idev->intr_ctrl, + cq->bound_intr->index, + work_done, flags); + } else { + if (!qcq->armed) { + qcq->armed = true; + dbr = IONIC_DBELL_RING_1 | + IONIC_DBELL_QID(qcq->q.hw_index); + ionic_dbell_ring(lif->kern_dbpage, + qcq->q.hw_type, + dbr | qcq->cq.tail_idx); + } + } + } + + if (!work_done && ionic_txq_poke_doorbell(&qcq->q)) + mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); + + DEBUG_STATS_NAPI_POLL(qcq, work_done); + + return work_done; +} + +int ionic_rx_napi(struct napi_struct *napi, int budget) +{ + struct ionic_qcq *qcq = napi_to_qcq(napi); + struct ionic_cq *cq = napi_to_cq(napi); + struct ionic_dev *idev; + struct ionic_lif *lif; + u32 work_done = 0; + u32 flags = 0; + u64 dbr; + + lif = cq->bound_q->lif; + idev = &lif->ionic->idev; + + work_done = ionic_cq_service(cq, budget, + ionic_rx_service, NULL, NULL); + + ionic_rx_fill(cq->bound_q); + + if (work_done < budget && napi_complete_done(napi, work_done)) { + flags |= IONIC_INTR_CRED_UNMASK; + cq->bound_intr->rearm_count++; + } + + if (work_done || flags) { + flags |= IONIC_INTR_CRED_RESET_COALESCE; + if (!lif->ionic->neth_eqs) { + if (flags & IONIC_INTR_CRED_UNMASK) + ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR); + ionic_intr_credits(idev->intr_ctrl, + cq->bound_intr->index, + work_done, flags); + } else { + if (!qcq->armed) { + qcq->armed = true; + dbr = IONIC_DBELL_RING_1 | + IONIC_DBELL_QID(qcq->q.hw_index); + ionic_dbell_ring(lif->kern_dbpage, + qcq->q.hw_type, + dbr | qcq->cq.tail_idx); + } + } + } + + if (!work_done && ionic_rxq_poke_doorbell(&qcq->q)) + mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); + + DEBUG_STATS_NAPI_POLL(qcq, work_done); + + return work_done; +} + +int ionic_txrx_napi(struct napi_struct *napi, int budget) +{ + struct ionic_qcq *rxqcq = napi_to_qcq(napi); + struct ionic_cq *rxcq = napi_to_cq(napi); + unsigned int qi = rxcq->bound_q->index; + struct ionic_dev *idev; + struct ionic_lif *lif; + struct ionic_qcq *txqcq; + struct ionic_cq *txcq; + bool resched = false; + u32 tx_work_done = 0; + u32 rx_work_done = 0; + u32 flags = 0; + + lif = rxcq->bound_q->lif; + idev = &lif->ionic->idev; + txqcq = lif->txqcqs[qi]; + txcq = &lif->txqcqs[qi]->cq; + + tx_work_done = ionic_cq_service(txcq, tx_budget, + ionic_tx_service, NULL, NULL); + + rx_work_done = ionic_cq_service(rxcq, budget, + ionic_rx_service, NULL, NULL); + + ionic_rx_fill(rxcq->bound_q); + + if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { + flags |= IONIC_INTR_CRED_UNMASK; + rxcq->bound_intr->rearm_count++; + } + + if (rx_work_done || flags) { + flags |= IONIC_INTR_CRED_RESET_COALESCE; + if (!lif->ionic->neth_eqs) { + if (flags & IONIC_INTR_CRED_UNMASK) + ionic_dim_update(rxqcq, 0); + ionic_intr_credits(idev->intr_ctrl, + rxcq->bound_intr->index, + tx_work_done + rx_work_done, flags); + } else { + u64 dbr; + + if (!rxqcq->armed) { + rxqcq->armed = true; + dbr = IONIC_DBELL_RING_1 | + IONIC_DBELL_QID(rxqcq->q.hw_index); + ionic_dbell_ring(lif->kern_dbpage, + rxqcq->q.hw_type, + dbr | rxqcq->cq.tail_idx); + } + if (!txqcq->armed) { + txqcq->armed = true; + dbr = IONIC_DBELL_RING_1 | + IONIC_DBELL_QID(txqcq->q.hw_index); + ionic_dbell_ring(lif->kern_dbpage, + txqcq->q.hw_type, + dbr | txqcq->cq.tail_idx); + } + } + } + + DEBUG_STATS_NAPI_POLL(rxqcq, rx_work_done); + DEBUG_STATS_NAPI_POLL(txqcq, tx_work_done); + + if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q)) + resched = true; + if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q)) + resched = true; + if (resched) + mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); + + return rx_work_done; +} + +static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, + void *data, size_t len) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct device *dev = q->dev; + dma_addr_t dma_addr; + + dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + net_warn_ratelimited("%s: DMA single map failed on %s!\n", + q->lif->netdev->name, q->name); + stats->dma_map_err++; + return 0; + } + return dma_addr; +} + +static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, + const skb_frag_t *frag, + size_t offset, size_t len) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct device *dev = q->dev; + dma_addr_t dma_addr; + + dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + net_warn_ratelimited("%s: DMA frag map failed on %s!\n", + q->lif->netdev->name, q->name); + stats->dma_map_err++; + } + return dma_addr; +} + +static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, + struct ionic_desc_info *desc_info) +{ + struct ionic_buf_info *buf_info = desc_info->bufs; + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct device *dev = q->dev; + dma_addr_t dma_addr; + unsigned int nfrags; + skb_frag_t *frag; + int frag_idx; + + dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); + if (dma_mapping_error(dev, dma_addr)) { + stats->dma_map_err++; + return -EIO; + } + buf_info->dma_addr = dma_addr; + buf_info->len = skb_headlen(skb); + buf_info++; + + frag = skb_shinfo(skb)->frags; + nfrags = skb_shinfo(skb)->nr_frags; + for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) { + dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); + if (dma_mapping_error(dev, dma_addr)) { + stats->dma_map_err++; + goto dma_fail; + } + buf_info->dma_addr = dma_addr; + buf_info->len = skb_frag_size(frag); + buf_info++; + } + + desc_info->nbufs = 1 + nfrags; + + return 0; + +dma_fail: + /* unwind the frag mappings and the head mapping */ + while (frag_idx > 0) { + frag_idx--; + buf_info--; + dma_unmap_page(dev, buf_info->dma_addr, + buf_info->len, DMA_TO_DEVICE); + } + dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE); + return -EIO; +} + +static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, + struct ionic_desc_info *desc_info) +{ + struct ionic_buf_info *buf_info = desc_info->bufs; + struct device *dev = q->dev; + unsigned int i; + + if (!desc_info->nbufs) + return; + + dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr, + buf_info->len, DMA_TO_DEVICE); + buf_info++; + for (i = 1; i < desc_info->nbufs; i++, buf_info++) + dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr, + buf_info->len, DMA_TO_DEVICE); + + desc_info->nbufs = 0; +} + +static void ionic_tx_clean(struct ionic_queue *q, + struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, + void *cb_arg) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct ionic_qcq *qcq = q_to_qcq(q); + struct sk_buff *skb = cb_arg; + u16 qi; + + ionic_tx_desc_unmap_bufs(q, desc_info); + + if (!skb) + return; + + qi = skb_get_queue_mapping(skb); + + if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) { + if (cq_info) { + struct skb_shared_hwtstamps hwts = {}; + __le64 *cq_desc_hwstamp; + u64 hwstamp; + + cq_desc_hwstamp = + cq_info->cq_desc + + qcq->cq.desc_size - + sizeof(struct ionic_txq_comp) - + IONIC_HWSTAMP_CQ_NEGOFFSET; + + hwstamp = le64_to_cpu(*cq_desc_hwstamp); + + if (hwstamp != IONIC_HWSTAMP_INVALID) { + hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp); + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + skb_tstamp_tx(skb, &hwts); + + stats->hwstamp_valid++; + } else { + stats->hwstamp_invalid++; + } + } + + } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) { + netif_wake_subqueue(q->lif->netdev, qi); + q->wake++; + } + + desc_info->bytes = skb->len; + stats->clean++; + + dev_consume_skb_any(skb); +} + +bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) +{ + struct ionic_queue *q = cq->bound_q; + struct ionic_desc_info *desc_info; + struct ionic_txq_comp *comp; + int bytes = 0; + int pkts = 0; + u16 index; + + comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); + + if (!color_match(comp->color, cq->done_color)) + return false; + + /* clean the related q entries, there could be + * several q entries completed for each cq completion + */ + do { + desc_info = &q->info[q->tail_idx]; + desc_info->bytes = 0; + index = q->tail_idx; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg); + if (desc_info->cb_arg) { + pkts++; + bytes += desc_info->bytes; + } + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + } while (index != le16_to_cpu(comp->comp_index)); + +#ifdef IONIC_SUPPORTS_BQL + if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) + netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes); +#endif + + return true; +} + +void ionic_tx_flush(struct ionic_cq *cq) +{ + struct ionic_dev *idev = &cq->lif->ionic->idev; + u32 work_done; + + work_done = ionic_cq_service(cq, cq->num_descs, + ionic_tx_service, NULL, NULL); + + if (work_done && !cq->lif->ionic->neth_eqs) + ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, + work_done, IONIC_INTR_CRED_RESET_COALESCE); +} + +void ionic_tx_empty(struct ionic_queue *q) +{ + struct ionic_desc_info *desc_info; + int bytes = 0; + int pkts = 0; + + /* walk the not completed tx entries, if any */ + while (q->head_idx != q->tail_idx) { + desc_info = &q->info[q->tail_idx]; + desc_info->bytes = 0; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg); + if (desc_info->cb_arg) { + pkts++; + bytes += desc_info->bytes; + } + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + } + +#ifdef IONIC_SUPPORTS_BQL + if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) + netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes); +#endif +} + +static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb) +{ + int err; + + err = skb_cow_head(skb, 0); + if (err) + return err; + + if (skb->protocol == cpu_to_be16(ETH_P_IP)) { + inner_ip_hdr(skb)->check = 0; + inner_tcp_hdr(skb)->check = + ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, + inner_ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { + inner_tcp_hdr(skb)->check = + ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr, + &inner_ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + return 0; +} + +static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) +{ + int err; + + err = skb_cow_head(skb, 0); + if (err) + return err; + + if (skb->protocol == cpu_to_be16(ETH_P_IP)) { + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + return 0; +} + +static void ionic_tx_tso_post(struct ionic_queue *q, + struct ionic_desc_info *desc_info, + struct sk_buff *skb, + dma_addr_t addr, u8 nsge, u16 len, + unsigned int hdrlen, unsigned int mss, + bool outer_csum, + u16 vlan_tci, bool has_vlan, + bool start, bool done) +{ + struct ionic_txq_desc *desc = desc_info->desc; + u8 flags = 0; + u64 cmd; + + flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; + flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; + + cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr); + desc->cmd = cpu_to_le64(cmd); + desc->len = cpu_to_le16(len); + desc->vlan_tci = cpu_to_le16(vlan_tci); + desc->hdr_len = cpu_to_le16(hdrlen); + desc->mss = cpu_to_le16(mss); + + /* commit descriptor contents in one shot */ + if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS) + memcpy_toio(desc_info->cmb_desc, desc, q->desc_size); + + if (start) { + skb_tx_timestamp(skb); +#ifdef IONIC_SUPPORTS_BQL + if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) + netdev_tx_sent_queue(q_to_ndq(q), skb->len); +#endif + ionic_txq_post(q, false, ionic_tx_clean, skb); + } else { + ionic_txq_post(q, done, NULL, NULL); + } +} + +static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct ionic_desc_info *desc_info; + struct ionic_buf_info *buf_info; + struct ionic_txq_sg_elem *elem; + struct ionic_txq_desc *desc; + unsigned int chunk_len; + unsigned int frag_rem; + unsigned int tso_rem; + unsigned int seg_rem; + dma_addr_t desc_addr; + dma_addr_t frag_addr; + unsigned int hdrlen; + unsigned int len; + unsigned int mss; + bool start, done; + bool outer_csum; + bool has_vlan; + u16 desc_len; + u8 desc_nsge; + u16 vlan_tci; + bool encap; + int err; + + desc_info = &q->info[q->head_idx]; + buf_info = desc_info->bufs; + + if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) + return -EIO; + + len = skb->len; + mss = skb_shinfo(skb)->gso_size; + outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | +#ifdef NETIF_F_GSO_IPXIP4 + SKB_GSO_IPXIP4 | +#endif +#ifdef NETIF_F_GSO_IPXIP6 + SKB_GSO_IPXIP6 | +#endif +#ifdef NETIF_F_GSO_IPIP + SKB_GSO_IPIP | +#endif +#ifdef NETIF_F_GSO_SIT + SKB_GSO_SIT | +#endif + SKB_GSO_UDP_TUNNEL | + SKB_GSO_UDP_TUNNEL_CSUM)); + has_vlan = !!skb_vlan_tag_present(skb); + vlan_tci = skb_vlan_tag_get(skb); + encap = skb->encapsulation; + + /* Preload inner-most TCP csum field with IP pseudo hdr + * calculated with IP length set to zero. HW will later + * add in length to each TCP segment resulting from the TSO. + */ + + if (encap) + err = ionic_tx_tcp_inner_pseudo_csum(skb); + else + err = ionic_tx_tcp_pseudo_csum(skb); + if (err) { + /* clean up mapping from ionic_tx_map_skb */ + ionic_tx_desc_unmap_bufs(q, desc_info); + return err; + } + + if (encap) + hdrlen = skb_inner_tcp_all_headers(skb); + else + hdrlen = skb_tcp_all_headers(skb); + + tso_rem = len; + seg_rem = min(tso_rem, hdrlen + mss); + + frag_addr = 0; + frag_rem = 0; + + start = true; + + while (tso_rem > 0) { + desc = NULL; + elem = NULL; + desc_addr = 0; + desc_len = 0; + desc_nsge = 0; + /* use fragments until we have enough to post a single descriptor */ + while (seg_rem > 0) { + /* if the fragment is exhausted then move to the next one */ + if (frag_rem == 0) { + /* grab the next fragment */ + frag_addr = buf_info->dma_addr; + frag_rem = buf_info->len; + buf_info++; + } + chunk_len = min(frag_rem, seg_rem); + if (!desc) { + /* fill main descriptor */ + desc = desc_info->txq_desc; + elem = desc_info->txq_sg_desc->elems; + desc_addr = frag_addr; + desc_len = chunk_len; + } else { + /* fill sg descriptor */ + elem->addr = cpu_to_le64(frag_addr); + elem->len = cpu_to_le16(chunk_len); + elem++; + desc_nsge++; + } + frag_addr += chunk_len; + frag_rem -= chunk_len; + tso_rem -= chunk_len; + seg_rem -= chunk_len; + } + seg_rem = min(tso_rem, mss); + done = (tso_rem == 0); + /* post descriptor */ + ionic_tx_tso_post(q, desc_info, skb, + desc_addr, desc_nsge, desc_len, + hdrlen, mss, outer_csum, vlan_tci, has_vlan, + start, done); + start = false; + /* Buffer information is stored with the first tso descriptor */ + desc_info = &q->info[q->head_idx]; + desc_info->nbufs = 0; + } + + stats->pkts += DIV_ROUND_UP(len - hdrlen, mss); + stats->bytes += len; + stats->tso++; + stats->tso_bytes = len; + + return 0; +} + +static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb, + struct ionic_desc_info *desc_info) +{ + struct ionic_buf_info *buf_info = desc_info->bufs; +#ifdef IONIC_DEBUG_STATS + struct ionic_tx_stats *stats = q_to_tx_stats(q); +#endif + struct ionic_txq_desc *desc = desc_info->desc; + bool has_vlan; + u8 flags = 0; + bool encap; + u64 cmd; + + has_vlan = !!skb_vlan_tag_present(skb); + encap = skb->encapsulation; + + flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + + cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL, + flags, skb_shinfo(skb)->nr_frags, + buf_info->dma_addr); + desc->cmd = cpu_to_le64(cmd); + desc->len = cpu_to_le16(buf_info->len); + if (has_vlan) { + desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); +#ifdef IONIC_DEBUG_STATS + stats->vlan_inserted++; +#endif + } + desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); + desc->csum_offset = cpu_to_le16(skb->csum_offset); + + /* commit descriptor contents in one shot */ + if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS) + memcpy_toio(desc_info->cmb_desc, desc, q->desc_size); + +#ifdef IONIC_DEBUG_STATS +#ifdef HAVE_CSUM_NOT_INET + if (skb->csum_not_inet) + stats->crc32_csum++; + else +#endif + stats->csum++; +#endif +} + +static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb, + struct ionic_desc_info *desc_info) +{ + struct ionic_buf_info *buf_info = desc_info->bufs; + struct ionic_txq_desc *desc = desc_info->desc; +#ifdef IONIC_DEBUG_STATS + struct ionic_tx_stats *stats = q_to_tx_stats(q); +#endif + bool has_vlan; + u8 flags = 0; + bool encap; + u64 cmd; + + has_vlan = !!skb_vlan_tag_present(skb); + encap = skb->encapsulation; + + flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + + cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE, + flags, skb_shinfo(skb)->nr_frags, + buf_info->dma_addr); + desc->cmd = cpu_to_le64(cmd); + desc->len = cpu_to_le16(buf_info->len); + if (has_vlan) { + desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); +#ifdef IONIC_DEBUG_STATS + stats->vlan_inserted++; +#endif + } + + /* commit descriptor contents in one shot */ + if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS) + memcpy_toio(desc_info->cmb_desc, desc, q->desc_size); + +#ifdef IONIC_DEBUG_STATS + stats->csum_none++; +#endif +} + +static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb, + struct ionic_desc_info *desc_info) +{ + struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc; + struct ionic_buf_info *buf_info = &desc_info->bufs[1]; + struct ionic_txq_sg_elem *elem = sg_desc->elems; +#ifdef IONIC_DEBUG_STATS + struct ionic_tx_stats *stats = q_to_tx_stats(q); +#endif + unsigned int i; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) { + elem->addr = cpu_to_le64(buf_info->dma_addr); + elem->len = cpu_to_le16(buf_info->len); + } + +#ifdef IONIC_DEBUG_STATS + stats->frags += skb_shinfo(skb)->nr_frags; +#endif +} + +static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) +{ + struct ionic_desc_info *desc_info = &q->info[q->head_idx]; + struct ionic_tx_stats *stats = q_to_tx_stats(q); + + if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) + return -EIO; + + /* set up the initial descriptor */ + if (skb->ip_summed == CHECKSUM_PARTIAL) + ionic_tx_calc_csum(q, skb, desc_info); + else + ionic_tx_calc_no_csum(q, skb, desc_info); + + /* add frags */ + ionic_tx_skb_frags(q, skb, desc_info); + + skb_tx_timestamp(skb); + stats->pkts++; + stats->bytes += skb->len; + +#ifdef IONIC_SUPPORTS_BQL + if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) + netdev_tx_sent_queue(q_to_ndq(q), skb->len); +#endif +#ifdef HAVE_SKB_XMIT_MORE + ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); +#else + ionic_txq_post(q, true, ionic_tx_clean, skb); +#endif + + return 0; +} + +static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + int ndescs; + int err; + + /* Each desc is mss long max, so a descriptor for each gso_seg */ + if (skb_is_gso(skb)) + ndescs = skb_shinfo(skb)->gso_segs; + else + ndescs = 1; + + /* If non-TSO, just need 1 desc and nr_frags sg elems */ + if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems) + return ndescs; + + /* Too many frags, so linearize */ + err = skb_linearize(skb); + if (err) + return err; + + stats->linearize++; + + return ndescs; +} + +static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) +{ + int stopped = 0; + + if (unlikely(!ionic_q_has_space(q, ndescs))) { + netif_stop_subqueue(q->lif->netdev, q->index); + q->stop++; + stopped = 1; + + /* Might race with ionic_tx_clean, check again */ + smp_rmb(); + if (ionic_q_has_space(q, ndescs)) { + netif_wake_subqueue(q->lif->netdev, q->index); + stopped = 0; + } + } + + return stopped; +} + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_queue *q = &lif->hwstamp_txq->q; + int err, ndescs; + + /* Does not stop/start txq, because we post to a separate tx queue + * for timestamping, and if a packet can't be posted immediately to + * the timestamping queue, it is dropped. + */ + + ndescs = ionic_tx_descs_needed(q, skb); + if (unlikely(ndescs < 0)) + goto err_out_drop; + + if (unlikely(!ionic_q_has_space(q, ndescs))) + goto err_out_drop; + + skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP; + if (skb_is_gso(skb)) + err = ionic_tx_tso(q, skb); + else + err = ionic_tx(q, skb); + + if (err) + goto err_out_drop; + + return NETDEV_TX_OK; + +err_out_drop: + q->drop++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} +#endif + +netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + u16 queue_index = skb_get_queue_mapping(skb); + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_queue *q; + int ndescs; + int err; + + if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode) + return ionic_start_hwstamp_xmit(skb, netdev); +#endif + + if (unlikely(queue_index >= lif->nxqs)) + queue_index = 0; + q = &lif->txqcqs[queue_index]->q; + + ndescs = ionic_tx_descs_needed(q, skb); + if (ndescs < 0) + goto err_out_drop; + + if (unlikely(ionic_maybe_stop_tx(q, ndescs))) + return NETDEV_TX_BUSY; + + if (skb_is_gso(skb)) + err = ionic_tx_tso(q, skb); + else + err = ionic_tx(q, skb); + + if (err) + goto err_out_drop; + + /* Stop the queue if there aren't descriptors for the next packet. + * Since our SG lists per descriptor take care of most of the possible + * fragmentation, we don't need to have many descriptors available. + */ + ionic_maybe_stop_tx(q, 4); + + return NETDEV_TX_OK; + +err_out_drop: + q->stop++; + q->drop++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_txrx.h b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_txrx.h new file mode 100644 index 0000000000..0d9bf7ac74 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/ionic_txrx.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */ + +#ifndef _IONIC_TXRX_H_ +#define _IONIC_TXRX_H_ + +void ionic_rx_flush(struct ionic_cq *cq); +void ionic_tx_flush(struct ionic_cq *cq); + +void ionic_rx_fill(struct ionic_queue *q); +void ionic_rx_empty(struct ionic_queue *q); +void ionic_tx_empty(struct ionic_queue *q); +int ionic_rx_napi(struct napi_struct *napi, int budget); +int ionic_tx_napi(struct napi_struct *napi, int budget); +int ionic_txrx_napi(struct napi_struct *napi, int budget); +netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev); + +bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); +bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); + +#endif /* _IONIC_TXRX_H_ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/kcompat.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/kcompat.c new file mode 100644 index 0000000000..3357b7d575 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/kcompat.c @@ -0,0 +1,2609 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2019 Intel Corporation. */ + +#include "kcompat.h" + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) || defined __VMKLNX__ +/* From lib/vsprintf.c */ +#include + +static int skip_atoi(const char **s) +{ + int i=0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +#define _kc_ZEROPAD 1 /* pad with zero */ +#define _kc_SIGN 2 /* unsigned/signed long */ +#define _kc_PLUS 4 /* show plus */ +#define _kc_SPACE 8 /* space if plus */ +#define _kc_LEFT 16 /* left justified */ +#define _kc_SPECIAL 32 /* 0x */ +#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) +{ + char c,sign,tmp[66]; + const char *digits; + const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + int i; + + digits = (type & _kc_LARGE) ? large_digits : small_digits; + if (type & _kc_LEFT) + type &= ~_kc_ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & _kc_ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & _kc_SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & _kc_PLUS) { + sign = '+'; + size--; + } else if (type & _kc_SPACE) { + sign = ' '; + size--; + } + } + if (type & _kc_SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) + tmp[i++] = digits[do_div(num,base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { + while(size-->0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + } + if (sign) { + if (buf <= end) + *buf = sign; + ++buf; + } + if (type & _kc_SPECIAL) { + if (base==8) { + if (buf <= end) + *buf = '0'; + ++buf; + } else if (base==16) { + if (buf <= end) + *buf = '0'; + ++buf; + if (buf <= end) + *buf = digits[33]; + ++buf; + } + } + if (!(type & _kc_LEFT)) { + while (size-- > 0) { + if (buf <= end) + *buf = c; + ++buf; + } + } + while (i < precision--) { + if (buf <= end) + *buf = '0'; + ++buf; + } + while (i-- > 0) { + if (buf <= end) + *buf = tmp[i]; + ++buf; + } + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + return buf; +} + +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char *str, *end, c; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + str = buf; + end = buf + size - 1; + + if (end < buf - 1) { + end = ((void *) -1); + size = end - buf + 1; + } + + for (; *fmt ; ++fmt) { + if (*fmt != '%') { + if (str <= end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= _kc_LEFT; goto repeat; + case '+': flags |= _kc_PLUS; goto repeat; + case ' ': flags |= _kc_SPACE; goto repeat; + case '#': flags |= _kc_SPECIAL; goto repeat; + case '0': flags |= _kc_ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= _kc_LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & _kc_LEFT)) { + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + } + c = (unsigned char) va_arg(args, int); + if (str <= end) + *str = c; + ++str; + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & _kc_LEFT)) { + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + } + for (i = 0; i < len; ++i) { + if (str <= end) + *str = *s; + ++str; ++s; + } + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 'p': + if ('M' == *(fmt+1)) { + str = get_mac(str, end, va_arg(args, unsigned char *)); + fmt++; + } else { + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= _kc_ZEROPAD; + } + str = number(str, end, + (unsigned long) va_arg(args, void *), + 16, field_width, precision, flags); + } + continue; + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long * ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t * ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int * ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str <= end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= _kc_LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= _kc_SIGN; + case 'u': + break; + + default: + if (str <= end) + *str = '%'; + ++str; + if (*fmt) { + if (str <= end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & _kc_SIGN) + num = (signed long) num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & _kc_SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & _kc_SIGN) + num = (signed int) num; + } + str = number(str, end, num, base, + field_width, precision, flags); + } + if (str <= end) + *str = '\0'; + else if (size > 0) + /* don't write out a null byte if the buf size is zero */ + *end = '\0'; + /* the trailing null byte doesn't count towards the total + * ++str; + */ + return str-buf; +} + +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = _kc_vsnprintf(buf,size,fmt,args); + va_end(args); + return i; +} +#endif /* < 2.4.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#if defined(CONFIG_HIGHMEM) + +#ifndef PCI_DRAM_OFFSET +#define PCI_DRAM_OFFSET 0 +#endif + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + + PCI_DRAM_OFFSET); +} + +#else /* CONFIG_HIGHMEM */ + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return pci_map_single(dev, (void *)page_address(page) + offset, size, + direction); +} + +#endif /* CONFIG_HIGHMEM */ + +void +_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, + int direction) +{ + return pci_unmap_single(dev, dma_addr, size, direction); +} + +#endif /* 2.4.13 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +int +_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) +{ + if (!pci_dma_supported(dev, mask)) + return -EIO; + dev->dma_mask = mask; + return 0; +} + +int +_kc_pci_request_regions(struct pci_dev *dev, char *res_name) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) { + if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { + if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } + } + return 0; +} + +void +_kc_pci_release_regions(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) + release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + + else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) + release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + } +} + +/**************************************/ +/* NETWORK DRIVER API */ + +struct net_device * +_kc_alloc_etherdev(int sizeof_priv) +{ + struct net_device *dev; + int alloc_size; + + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; + dev = kzalloc(alloc_size, GFP_KERNEL); + if (!dev) + return NULL; + + if (sizeof_priv) + dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); + dev->name[0] = '\0'; + ether_setup(dev); + + return dev; +} + +int +_kc_is_valid_ether_addr(u8 *addr) +{ + const char zaddr[6] = { 0, }; + + return !(addr[0] & 1) && memcmp(addr, zaddr, 6); +} + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +int +_kc_pci_set_power_state(struct pci_dev *dev, int state) +{ + return 0; +} + +int +_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) +{ + return 0; +} + +#endif /* 2.4.6 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, + int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + frag->page = page; + frag->page_offset = off; + frag->size = size; + skb_shinfo(skb)->nr_frags = i + 1; +} + +/* + * Original Copyright: + * find_next_bit.c: fallback find next bit implementation + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + * @return: The next set bit in the memory region + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + ffs(tmp); +} + +size_t _kc_strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + +#ifndef do_div +#if BITS_PER_LONG == 32 +uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) +{ + uint64_t rem = *n; + uint64_t b = base; + uint64_t res, d = 1; + uint32_t high = rem >> 32; + + /* Reduce the thing a bit first */ + res = 0; + if (high >= base) { + high /= base; + res = (uint64_t) high << 32; + rem -= (uint64_t) (high*base) << 32; + } + + while ((int64_t)b > 0 && b < rem) { + b = b+b; + d = d+d; + } + + do { + if (rem >= b) { + rem -= b; + res += d; + } + b >>= 1; + d >>= 1; + } while (d); + + *n = res; + return rem; +} +#endif /* BITS_PER_LONG == 32 */ +#endif /* do_div */ +#endif /* 2.6.0 => 2.4.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + return (i >= size) ? (size - 1) : i; +} +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +char *_kc_kstrdup(const char *s, unsigned int gfp) +{ + size_t len; + char *buf; + + if (!s) + return NULL; + + len = strlen(s) + 1; + buf = kmalloc(len, gfp); + if (buf) + memcpy(buf, s, len); + return buf; +} +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +void *_kc_kzalloc(size_t size, int flags) +{ + void *ret = kmalloc(size, flags); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif /* <= 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +int _kc_skb_pad(struct sk_buff *skb, int pad) +{ + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) + goto free_skb; + } + +#ifdef MAX_SKB_FRAGS + if (skb_is_nonlinear(skb) && + !__pskb_pull_tail(skb, skb->data_len)) + goto free_skb; + +#endif + memset(skb->data + skb->len, 0, pad); + return 0; + +free_skb: + kfree_skb(skb); + return -ENOMEM; +} + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +int _kc_pci_save_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset, pcie_link_status; + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) + /* no ->dev for 2.4 kernels */ + WARN_ON(pdev->dev.driver_data == NULL); +#endif + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset) { + if (!pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + } + pci_config_space_ich8lan(); +#ifdef HAVE_PCI_ERS + if (adapter->config_space == NULL) +#else + WARN_ON(adapter->config_space != NULL); +#endif + adapter->config_space = kmalloc(size, GFP_KERNEL); + if (!adapter->config_space) { + printk(KERN_ERR "Out of memory in pci_save_state\n"); + return -ENOMEM; + } + for (i = 0; i < (size / 4); i++) + pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); + return 0; +} + +void _kc_pci_restore_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset; + u16 pcie_link_status; + + if (adapter->config_space != NULL) { + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset && + !pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + + pci_config_space_ich8lan(); + for (i = 0; i < (size / 4); i++) + pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); +#ifndef HAVE_PCI_ERS + kfree(adapter->config_space); + adapter->config_space = NULL; +#endif + } +} +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +void _kc_free_netdev(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + + kfree(adapter->config_space); +#ifdef CONFIG_SYSFS + if (netdev->reg_state == NETREG_UNINITIALIZED) { + kfree((char *)netdev - netdev->padded); + } else { + BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); + netdev->reg_state = NETREG_RELEASED; + class_device_put(&netdev->class_dev); + } +#else + kfree((char *)netdev - netdev->padded); +#endif +} +#endif + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) +{ + void *p; + + p = kzalloc(len, gfp); + if (p) + memcpy(p, src, len); + return p; +} +#endif /* <= 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) +{ + return ((struct adapter_struct *)netdev_priv(netdev))->pdev; +} +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +/* hexdump code taken from lib/hexdump.c */ +static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, unsigned char *linebuf, + size_t linebuflen, bool ascii) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + int ascii_column; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%8.8x", j ? " " : "", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%4.4x", j ? " " : "", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); + linebuf[lx++] = ' '; + } + if (j) + lx--; + + ascii_column = 3 * rowsize + 2; + break; + } + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) + linebuf[lx++] = ' '; + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] + : '.'; +nil: + linebuf[lx++] = '\0'; +} + +void _kc_print_hex_dump(const char *level, + const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[200]; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%s%*p: %s\n", level, prefix_str, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); + break; + default: + printk("%s%s%s\n", level, prefix_str, linebuf); + break; + } + } +} + +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifdef NAPI +struct net_device *napi_to_poll_dev(const struct napi_struct *napi) +{ + struct adapter_q_vector *q_vector = container_of(napi, + struct adapter_q_vector, + napi); + return &q_vector->poll_dev; +} + +int __kc_adapter_clean(struct net_device *netdev, int *budget) +{ + int work_done; + int work_to_do = min(*budget, netdev->quota); + /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ + struct napi_struct *napi = netdev->priv; + work_done = napi->poll(napi, work_to_do); + *budget -= work_done; + netdev->quota -= work_done; + return (work_done >= work_to_do) ? 1 : 0; +} +#endif /* NAPI */ +#endif /* <= 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) +{ + struct pci_dev *parent = pdev->bus->self; + u16 link_state; + int pos; + + if (!parent) + return; + + pos = pci_find_capability(parent, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); + link_state &= ~state; + pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); + } +} +#endif /* < 2.6.26 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_stop_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} +void _kc_netif_tx_wake_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_wake_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); +} +void _kc_netif_tx_start_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_start_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_start_subqueue(netdev, i); +} +#endif /* HAVE_TX_MQ */ + +void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) +{ + va_list args; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); + printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); + + dump_stack(); +} +#endif /* __VMKLNX__ */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + +int +_kc_pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state; + int error; + + target_state = pci_choose_state(dev, PMSG_SUSPEND); + + pci_enable_wake(dev, target_state, true); + + error = pci_set_power_state(dev, target_state); + + if (error) + pci_enable_wake(dev, target_state, false); + + return error; +} + +int +_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + int err; + + err = pci_enable_wake(dev, PCI_D3cold, enable); + if (err) + goto out; + + err = pci_enable_wake(dev, PCI_D3hot, enable); + +out: + return err; +} +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) +{ + u16 old_cmd, cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); + if (enable) + cmd = old_cmd | PCI_COMMAND_MASTER; + else + cmd = old_cmd & ~PCI_COMMAND_MASTER; + if (cmd != old_cmd) { + dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", + enable ? "enabling" : "disabling"); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) ) + pdev->is_busmaster = enable; +#endif +} + +void _kc_pci_clear_master(struct pci_dev *dev) +{ + __kc_pci_set_master(dev, false); +} +#endif /* < 2.6.29 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) +{ + int num_vf = 0; +#ifdef CONFIG_PCI_IOV + struct pci_dev *vfdev; + + /* loop through all ethernet devices starting at PF dev */ + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == dev) + num_vf++; + + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); + } + +#endif + return num_vf; +} +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 2.6.34 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#ifdef HAVE_TX_MQ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) +{ + unsigned int real_num = dev->real_num_tx_queues; + struct Qdisc *qdisc; + int i; + + if (txq < 1 || txq > dev->num_tx_queues) + return -EINVAL; + + else if (txq > real_num) + dev->real_num_tx_queues = txq; + else if (txq < real_num) { + dev->real_num_tx_queues = txq; + for (i = txq; i < dev->num_tx_queues; i++) { + qdisc = netdev_get_tx_queue(dev, i)->qdisc; + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); + } + } + } + + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#endif /* HAVE_TX_MQ */ + +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count) +{ + loff_t pos = *ppos; + size_t res; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !count) + return 0; + if (count > available - pos) + count = available - pos; + res = copy_from_user(to + pos, from, count); + if (res == count) + return -EFAULT; + count -= res; + *ppos = pos + count; + return count; +} + +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +static const u32 _kc_flags_dup_features = + (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); + +u32 _kc_ethtool_op_get_flags(struct net_device *dev) +{ + return dev->features & _kc_flags_dup_features; +} + +int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) +{ + if (data & ~supported) + return -EINVAL; + + dev->features = ((dev->features & ~_kc_flags_dup_features) | + (data & _kc_flags_dup_features)); + return 0; +} +#endif /* < 2.6.36 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_NETDEV_SELECT_QUEUE +#include +#include + +u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb, + u16 num_tx_queues) +{ + u32 hash; + u16 qoffset = 0; + u16 qcount = num_tx_queues; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + while (unlikely(hash >= num_tx_queues)) + hash -= num_tx_queues; + return hash; + } + + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else +#ifdef NETIF_F_RXHASH + hash = (__force u16) skb->protocol ^ skb->rxhash; +#else + hash = skb->protocol; +#endif + + hash = jhash_1word(hash, _kc_hashrnd); + + return (u16) (((u64) hash * qcount) >> 32) + qoffset; +} +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + +u8 _kc_netdev_get_num_tc(struct net_device *dev) +{ + return 0; +} + +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc) +{ + return -EINVAL; +} + +u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up) +{ + return 0; +} + +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ +#endif /* < 2.6.39 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +int _kc_simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + + return 0; +} +#endif /* SLE_VERSION < 11,3,0 */ + +#endif /* < 3.4.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +static inline int __kc_pcie_cap_version(struct pci_dev *dev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return 0; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); + return reg16 & PCI_EXP_FLAGS_VERS; +} + +static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) +{ + return true; +} + +static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + int pos; + u16 pcie_flags_reg; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return false; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + pcie_flags_reg & PCI_EXP_FLAGS_SLOT); +} + +static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; +} + +static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return __kc_pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return __kc_pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return __kc_pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return __kc_pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return __kc_pcie_cap_version(dev) > 1; + default: + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + } + + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!__kc_pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} + +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = __kc_pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = __kc_pcie_capability_write_word(dev, pos, val); + } + + return ret; +} + +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear) +{ + return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} +#endif /* < 3.7.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) +#ifdef CONFIG_XPS +#if NR_CPUS < 64 +#define _KC_MAX_XPS_CPUS NR_CPUS +#else +#define _KC_MAX_XPS_CPUS 64 +#endif + +/* + * netdev_queue sysfs structures and functions. + */ +struct _kc_netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, char *buf); + ssize_t (*store)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, const char *buf, size_t len); +}; + +#define to_kc_netdev_queue_attr(_attr) container_of(_attr, \ + struct _kc_netdev_queue_attribute, attr) + +int __kc_netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, + u16 index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, index); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) + /* Redhat requires some odd extended netdev structures */ + struct netdev_tx_queue_extended *txq_ext = + netdev_extended(dev)->_tx_ext + index; + struct kobj_type *ktype = txq_ext->kobj.ktype; +#else + struct kobj_type *ktype = txq->kobj.ktype; +#endif + struct _kc_netdev_queue_attribute *xps_attr; + struct attribute *attr = NULL; + int i, len, err; +#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9) + char buf[_KC_XPS_BUFLEN]; + + if (!ktype) + return -ENOMEM; + + /* attempt to locate the XPS attribute in the Tx queue */ + for (i = 0; (attr = ktype->default_attrs[i]); i++) { + if (!strcmp("xps_cpus", attr->name)) + break; + } + + /* if we did not find it return an error */ + if (!attr) + return -EINVAL; + + /* copy the mask into a string */ + len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN, + cpumask_bits(mask), _KC_MAX_XPS_CPUS); + if (!len) + return -ENOMEM; + + xps_attr = to_kc_netdev_queue_attr(attr); + + /* Store the XPS value using the SYSFS store call */ + err = xps_attr->store(txq, xps_attr, buf, len); + + /* we only had an error on err < 0 */ + return (err < 0) ? err : 0; +} +#endif /* CONFIG_XPS */ +#ifdef HAVE_NETDEV_SELECT_QUEUE +static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + struct xps_dev_maps *dev_maps; + struct xps_map *map; + int queue_index = -1; + + rcu_read_lock(); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) + /* Redhat requires some odd extended netdev structures */ + dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps); +#else + dev_maps = rcu_dereference(dev->xps_maps); +#endif + if (dev_maps) { + map = rcu_dereference( + dev_maps->cpu_map[raw_smp_processor_id()]); + if (map) { + if (map->len == 1) + queue_index = map->queues[0]; + else { + u32 hash; + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else + hash = (__force u16) skb->protocol ^ + skb->rxhash; + hash = jhash_1word(hash, _kc_hashrnd); + queue_index = map->queues[ + ((u64)hash * map->len) >> 32]; + } + if (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index = -1; + } + } + rcu_read_unlock(); + + return queue_index; +#else + return -1; +#endif +} + +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + int queue_index = sk_tx_queue_get(sk); + int new_index; + + if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) { +#ifdef CONFIG_XPS + if (!skb->ooo_okay) +#endif + return queue_index; + } + + new_index = kc_get_xps_queue(dev, skb); + if (new_index < 0) + new_index = skb_tx_hash(dev, skb); + + if (queue_index != new_index && sk) { + struct dst_entry *dst = + rcu_dereference(sk->sk_dst_cache); + + if (dst && skb_dst(skb) == dst) + sk_tx_queue_set(sk, new_index); + + } + + return new_index; +} + +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 flags) +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_FDB_DEL_NLATTR +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr) +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr) +#endif +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (!(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + + return err; +} + +#endif /* HAVE_FDB_OPS */ +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) +{ + unsigned int vfs_assigned = 0; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + int pos; + struct pci_dev *vfdev; + unsigned short dev_id; + + /* only search if we are a PF */ + if (!dev->is_physfn) + return 0; + + /* find SR-IOV capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* + * determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one + */ + pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* + * It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set + */ + if (vfdev->is_virtfn && (vfdev->physfn == dev) && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } + +#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ + return vfs_assigned; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* 3.10.0 */ + +static const unsigned char __maybe_unused pcie_link_speed[] = { + PCI_SPEED_UNKNOWN, /* 0 */ + PCIE_SPEED_2_5GT, /* 1 */ + PCIE_SPEED_5_0GT, /* 2 */ + PCIE_SPEED_8_0GT, /* 3 */ + PCIE_SPEED_16_0GT, /* 4 */ + PCI_SPEED_UNKNOWN, /* 5 */ + PCI_SPEED_UNKNOWN, /* 6 */ + PCI_SPEED_UNKNOWN, /* 7 */ + PCI_SPEED_UNKNOWN, /* 8 */ + PCI_SPEED_UNKNOWN, /* 9 */ + PCI_SPEED_UNKNOWN, /* A */ + PCI_SPEED_UNKNOWN, /* B */ + PCI_SPEED_UNKNOWN, /* C */ + PCI_SPEED_UNKNOWN, /* D */ + PCI_SPEED_UNKNOWN, /* E */ + PCI_SPEED_UNKNOWN /* F */ +}; + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + int ret; + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while (dev) { + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + + ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + if (ret) + return ret; + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + if (next_speed < *speed) + *speed = next_speed; + + if (next_width < *width) + *width = next_width; + + dev = dev->bus->self; + } + + return 0; +} + +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) +int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int err = dma_set_mask(dev, mask); + + if (!err) + /* coherent mask for the same size will always succeed if + * dma_set_mask does. However we store the error anyways, due + * to some kernels which use gcc's warn_unused_result on their + * definition of dma_set_coherent_mask. + */ + err = dma_set_coherent_mask(dev, mask); + return err; +} +#endif /* 3.13.0 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) +/****************************************************************************** + * ripped from linux/net/ipv6/exthdrs_core.c, GPL2, no direct copyright, + * inferred copyright from kernel + */ +int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags) +{ + unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); + u8 nexthdr = ipv6_hdr(skb)->nexthdr; + unsigned int len; + bool found; + +#define __KC_IP6_FH_F_FRAG BIT(0) +#define __KC_IP6_FH_F_AUTH BIT(1) +#define __KC_IP6_FH_F_SKIP_RH BIT(2) + + if (fragoff) + *fragoff = 0; + + if (*offset) { + struct ipv6hdr _ip6, *ip6; + + ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); + if (!ip6 || (ip6->version != 6)) { + printk(KERN_ERR "IPv6 header not found\n"); + return -EBADMSG; + } + start = *offset + sizeof(struct ipv6hdr); + nexthdr = ip6->nexthdr; + } + len = skb->len - start; + + do { + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + found = (nexthdr == target); + + if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { + if (target < 0 || found) + break; + return -ENOENT; + } + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (!hp) + return -EBADMSG; + + if (nexthdr == NEXTHDR_ROUTING) { + struct ipv6_rt_hdr _rh, *rh; + + rh = skb_header_pointer(skb, start, sizeof(_rh), + &_rh); + if (!rh) + return -EBADMSG; + + if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && + rh->segments_left == 0) + found = false; + } + + if (nexthdr == NEXTHDR_FRAGMENT) { + unsigned short _frag_off; + __be16 *fp; + + if (flags) /* Indicate that this is a fragment */ + *flags |= __KC_IP6_FH_F_FRAG; + fp = skb_header_pointer(skb, + start+offsetof(struct frag_hdr, + frag_off), + sizeof(_frag_off), + &_frag_off); + if (!fp) + return -EBADMSG; + + _frag_off = ntohs(*fp) & ~0x7; + if (_frag_off) { + if (target < 0 && + ((!ipv6_ext_hdr(hp->nexthdr)) || + hp->nexthdr == NEXTHDR_NONE)) { + if (fragoff) + *fragoff = _frag_off; + return hp->nexthdr; + } + return -ENOENT; + } + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) { + if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) + break; + hdrlen = (hp->hdrlen + 2) << 2; + } else + hdrlen = ipv6_optlen(hp); + + if (!found) { + nexthdr = hp->nexthdr; + len -= hdrlen; + start += hdrlen; + } + } while (!found); + + *offset = start; + return nexthdr; +} + +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif /* 3.14.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)) +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) +{ + size_t size; + char *buf; + + if (!s) + return NULL; + + size = strlen(s) + 1; + buf = devm_kzalloc(dev, size, gfp); + if (buf) + memcpy(buf, s, size); + return buf; +} + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + /* Set of random keys generated using kernel random number generator */ + static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, + 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, + 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, + 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, + 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, + 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, + 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; + + BUG_ON(len > NETDEV_RSS_KEY_LEN); + memcpy(buffer, seed, len); +} +#endif /* 3.15.0 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + int err; + + /* first go through and flush out any stale entries */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced || ha->refcount != 1) +#else + if (!ha->sync_cnt || ha->refcount != 1) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } + + /* go through and sync new entries to the list */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (ha->synced) +#else + if (ha->sync_cnt) +#endif + continue; + + err = sync(dev, ha->addr); + if (err) + return err; +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = true; +#else + ha->sync_cnt++; +#endif + ha->refcount++; + } + + return 0; +} + +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced) +#else + if (!ha->sync_cnt) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = false; +#else + ha->sync_cnt--; +#endif + if (--ha->refcount) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } +} + +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da, **next = list; + int err; + + /* first go through and flush out any stale entries */ + while ((da = *next) != NULL) { + if (da->da_synced && da->da_users == 1) { + if (!unsync || !unsync(dev, da->da_addr)) { + *next = da->next; + kfree(da); + (*count)--; + continue; + } + } + next = &da->next; + } + + /* go through and sync new entries to the list */ + for (da = *list; da != NULL; da = da->next) { + if (da->da_synced) + continue; + + err = sync(dev, da->da_addr); + if (err) + return err; + + da->da_synced++; + da->da_users++; + } + + return 0; +} + +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da; + + while ((da = *list) != NULL) { + if (da->da_synced) { + if (!unsync || !unsync(dev, da->da_addr)) { + da->da_synced--; + if (--da->da_users == 0) { + *list = da->next; + kfree(da); + (*count)--; + continue; + } + } + } + list = &da->next; + } +} +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp) +{ + void *p; + + p = devm_kzalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} +#endif /* 3.16.0 */ + +/******************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#endif /* <3.17.0 && RHEL_RELEASE_CODE < RHEL7.5 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +static void __kc_sock_efree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} + +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sk_buff *clone; + + if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) + return NULL; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) { + sock_put(sk); + return NULL; + } + + clone->sk = sk; + clone->destructor = __kc_sock_efree; + + return clone; +} + +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + struct sock_exterr_skb *serr; + struct sock *sk = skb->sk; + int err; + + sock_hold(sk); + + *skb_hwtstamps(skb) = *hwtstamps; + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + + err = sock_queue_err_skb(sk, skb); + if (err) + kfree_skb(skb); + + sock_put(sk); +} +#endif + +/* include headers needed for get_headlen function */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#include +#endif +#ifdef HAVE_SCTP +#include +#endif + +u32 __kc_eth_get_headlen(const struct net_device __always_unused *dev, + unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 proto; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + proto = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + +again: + switch (proto) { + /* handle any vlan tag if present */ + case __constant_htons(ETH_P_8021AD): + case __constant_htons(ETH_P_8021Q): + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + proto = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + goto again; + /* handle L3 protocols */ + case __constant_htons(ETH_P_IP): + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + + hdr.network += hlen; + break; +#ifdef NETIF_F_TSO6 + case __constant_htons(ETH_P_IPV6): + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hdr.network += sizeof(struct ipv6hdr); + break; +#endif /* NETIF_F_TSO6 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + case __constant_htons(ETH_P_FCOE): + hdr.network += FCOE_HEADER_LEN; + break; +#endif + default: + return hdr.network - data; + } + + /* finally sort out L4 */ + switch (nexthdr) { + case IPPROTO_TCP: + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hdr.network += max_t(u8, sizeof(struct tcphdr), + (hdr.network[12] & 0xF0) >> 2); + + break; + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + hdr.network += sizeof(struct udphdr); + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + hdr.network += sizeof(struct sctphdr); + break; +#endif + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + return min_t(unsigned int, hdr.network - data, max_len); +} + +#endif /* < 3.18.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#ifdef HAVE_NET_GET_RANDOM_ONCE +static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(__kc_netdev_rss_key)); + net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); + memcpy(buffer, __kc_netdev_rss_key, len); +} +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, + int nmaskbits) +{ + ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2; + int n = 0; + + if (len > 1) { + n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) : + bitmap_scnprintf(buf, len, maskp, nmaskbits); + buf[n++] = '\n'; + buf[n] = '\0'; + } + return n; +} +#endif + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node) +{ + int cpu; + + /* Wrap: we always want a cpu. */ + i %= num_online_cpus(); + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + /* Kernels prior to 2.6.28 do not have for_each_cpu or + * cpumask_of_node, so just use for_each_online_cpu() + */ + for_each_online_cpu(cpu) + if (i-- == 0) + return cpu; + + return 0; +#else + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; + } else { + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + if (i-- == 0) + return cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; + } + } +#endif /* KERNEL_VERSION >= 2.6.28 */ + BUG(); +} +#endif +#endif + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) ) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#ifdef CONFIG_SPARC +#include +#include +#endif +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused) +{ +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) && defined(CONFIG_OF) && \ + !defined(HAVE_STRUCT_DEVICE_OF_NODE) || !defined(CONFIG_OF)) && \ + !defined(CONFIG_SPARC)) + return -ENODEV; +#else + const unsigned char *addr; + struct device_node *dp; + + if (dev_is_pci(dev)) + dp = pci_device_to_OF_node(to_pci_dev(dev)); + else +#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF) + dp = dev->of_node; +#else + dp = NULL; +#endif + + addr = NULL; + if (dp) + addr = of_get_mac_address(dp); +#ifdef CONFIG_SPARC + /* Kernel hasn't implemented arch_get_platform_mac_address, but we + * should handle the SPARC case here since it was supported + * originally. This is replaced by arch_get_platform_mac_address() + * upstream. + */ + if (!addr) + addr = idprom->id_ethaddr; +#endif + if (!addr) + return -ENODEV; + + ether_addr_copy(mac_addr, addr); + return 0; +#endif +} +#endif /* !(RHEL_RELEASE >= 7.3) */ +#endif /* < 4.5.0 */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +const char *_kc_phy_speed_to_str(int speed) +{ + switch (speed) { + case SPEED_10: + return "10Mbps"; + case SPEED_100: + return "100Mbps"; + case SPEED_1000: + return "1Gbps"; + case SPEED_2500: + return "2.5Gbps"; + case SPEED_5000: + return "5Gbps"; + case SPEED_10000: + return "10Gbps"; + case SPEED_14000: + return "14Gbps"; + case SPEED_20000: + return "20Gbps"; + case SPEED_25000: + return "25Gbps"; + case SPEED_40000: + return "40Gbps"; + case SPEED_50000: + return "50Gbps"; + case SPEED_56000: + return "56Gbps"; +#ifdef SPEED_100000 + case SPEED_100000: + return "100Gbps"; +#endif + case SPEED_UNKNOWN: + return "Unknown"; + default: + return "Unsupported (update phy-core.c)"; + } +} +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ) +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + unsigned int idx = 0; + + for (; idx < size; idx++) { + dst->link_modes.supported[idx] &= + src->link_modes.supported[idx]; + dst->link_modes.advertising[idx] &= + src->link_modes.advertising[idx]; + } +} +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +/* PCIe link information */ +#define PCIE_SPEED2STR(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \ + (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \ + (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \ + (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \ + "Unknown speed") + +/* PCIe speed to Mb/s reduced by encoding overhead */ +#define PCIE_SPEED2MBS_ENC(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \ + (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \ + (speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \ + (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \ + 0) + +static u32 +_kc_pcie_bandwidth_available(struct pci_dev *dev, + struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + u32 bw, next_bw; + + if (speed) + *speed = PCI_SPEED_UNKNOWN; + if (width) + *width = PCIE_LNK_WIDTH_UNKNOWN; + + bw = 0; + + while (dev) { + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); + + /* Check if current device limits the total bandwidth */ + if (!bw || next_bw <= bw) { + bw = next_bw; + + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; + } + + dev = pci_upstream_bridge(dev); + } + + return bw; +} + +static enum pci_bus_speed _kc_pcie_get_speed_cap(struct pci_dev *dev) +{ + u32 lnkcap2, lnkcap; + + /* + * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link + * Speeds Vector in Link Capabilities 2 when supported, falling + * back to Max Link Speed in Link Capabilities otherwise. + */ + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); + if (lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + return PCI_SPEED_UNKNOWN; + } + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) { + if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + } + + return PCI_SPEED_UNKNOWN; +} + +static enum pcie_link_width _kc_pcie_get_width_cap(struct pci_dev *dev) +{ + u32 lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) + return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + + return PCIE_LNK_WIDTH_UNKNOWN; +} + +static u32 +_kc_pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + *speed = _kc_pcie_get_speed_cap(dev); + *width = _kc_pcie_get_width_cap(dev); + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return 0; + + return *width * PCIE_SPEED2MBS_ENC(*speed); +} + +void _kc_pcie_print_link_status(struct pci_dev *dev) { + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + struct pci_dev *limiting_dev = NULL; + u32 bw_avail, bw_cap; + + bw_cap = _kc_pcie_bandwidth_capable(dev, &speed_cap, &width_cap); + bw_avail = _kc_pcie_bandwidth_available(dev, &limiting_dev, &speed, + &width); + + if (bw_avail >= bw_cap) + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); + else + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", + bw_avail / 1000, bw_avail % 1000, + PCIE_SPEED2STR(speed), width, + limiting_dev ? pci_name(limiting_dev) : "", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); +} +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,13,0)) +void _kc_ethtool_sprintf(u8 **data, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vsnprintf(*data, ETH_GSTRING_LEN, fmt, args); + va_end(args); + + *data += ETH_GSTRING_LEN; +} +#endif /* 5.13.0 */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/kcompat.h b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/kcompat.h new file mode 100644 index 0000000000..1f8161f5a9 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/kcompat.h @@ -0,0 +1,6850 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2019 Intel Corporation. */ + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#ifndef LINUX_VERSION_CODE +#include +#else +#ifndef KERNEL_VERSION +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if IS_ENABLED(CONFIG_NET_DEVLINK) +#include +#endif + +#ifndef GCC_VERSION +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) +#endif /* GCC_VERSION */ + +/* Backport macros for controlling GCC diagnostics */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0) ) + +/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */ +#if GCC_VERSION >= 40600 +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) +#else +#define __diag(s) +#endif /* GCC_VERSION >= 4.6 */ +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) +#endif /* LINUX_VERSION < 4.18.0 */ + +#ifndef NSEC_PER_MSEC +#define NSEC_PER_MSEC 1000000L +#endif +#include +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#include +#else +#include +#endif +#endif + +/* NAPI enable/disable flags here */ +#define NAPI + +/* MSI compatibility code for all kernels and drivers */ +#ifdef DISABLE_PCI_MSI +#undef CONFIG_PCI_MSI +#endif +#ifndef CONFIG_PCI_MSI +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +struct msix_entry { + u16 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ +}; +#endif +#undef pci_enable_msi +#define pci_enable_msi(a) -ENOTSUPP +#undef pci_disable_msi +#define pci_disable_msi(a) do {} while (0) +#undef pci_enable_msix +#define pci_enable_msix(a, b, c) -ENOTSUPP +#undef pci_disable_msix +#define pci_disable_msix(a) do {} while (0) +#define msi_remove_pci_irq_vectors(a) do {} while (0) +#endif /* CONFIG_PCI_MSI */ +#ifdef DISABLE_PM +#undef CONFIG_PM +#endif + +#ifdef DISABLE_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef PMSG_SUSPEND +#define PMSG_SUSPEND 3 +#endif + +/* generic boolean compatibility */ +#undef TRUE +#undef FALSE +#define TRUE true +#define FALSE false +#ifdef GCC_VERSION +#if ( GCC_VERSION < 3000 ) +#define _Bool char +#endif +#else +#define _Bool char +#endif + +#ifndef BIT +#define BIT(nr) (1UL << (nr)) +#endif + +#undef __always_unused +#define __always_unused __attribute__((__unused__)) + +#undef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) + +/* kernels less than 2.4.14 don't have this */ +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +#ifndef uninitialized_var +#define uninitialized_var(x) x = x +#endif + +#ifndef PCIE_LINK_STATE_L0S +#define PCIE_LINK_STATE_L0S 1 +#endif +#ifndef PCIE_LINK_STATE_L1 +#define PCIE_LINK_STATE_L1 2 +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#define free_netdev(x) kfree(x) +#endif + +#ifndef dynamic_hex_dump +#define dynamic_hex_dump(...) +#endif + +#ifdef HAVE_POLL_CONTROLLER +#define CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef SKB_DATAREF_SHIFT +/* if we do not have the infrastructure to detect if skb_header is cloned + just return false in all cases */ +#define skb_header_cloned(x) 0 +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef NETIF_F_GRO +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef NETIF_F_LRO +#define NETIF_F_LRO BIT(15) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE BIT(27) +#endif + +#ifndef NETIF_F_ALL_FCOE +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) +#endif + +#ifndef IPPROTO_SCTP +#define IPPROTO_SCTP 132 +#endif + +#ifndef IPPROTO_UDPLITE +#define IPPROTO_UDPLITE 136 +#endif + +#ifndef CHECKSUM_PARTIAL +#define CHECKSUM_PARTIAL CHECKSUM_HW +#define CHECKSUM_COMPLETE CHECKSUM_HW +#endif + +#ifndef __read_mostly +#define __read_mostly +#endif + +#ifndef MII_RESV1 +#define MII_RESV1 0x17 /* Reserved... */ +#endif + +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif + +#ifndef WARN_ON +#define WARN_ON(x) +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +#ifndef node_online +#define node_online(node) ((node) == 0) +#endif + +#ifndef cpu_online +#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) +#endif + +#ifndef _LINUX_RANDOM_H +#include +#endif + +#ifndef DECLARE_BITMAP +#ifndef BITS_TO_LONGS +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +#endif +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] +#endif + +#ifndef VLAN_HLEN +#define VLAN_HLEN 4 +#endif + +#ifndef VLAN_ETH_HLEN +#define VLAN_ETH_HLEN 18 +#endif + +#ifndef VLAN_ETH_FRAME_LEN +#define VLAN_ETH_FRAME_LEN 1518 +#endif + +#ifndef DCA_GET_TAG_TWO_ARGS +#define dca3_get_tag(a,b) dca_get_tag(b) +#endif + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#if defined(__i386__) || defined(__x86_64__) +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif +#endif + +/* taken from 2.6.24 definition in linux/kernel.h */ +#ifndef IS_ALIGNED +#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) +#endif + +#ifdef IS_ENABLED +#undef IS_ENABLED +#undef __ARG_PLACEHOLDER_1 +#undef config_enabled +#undef _config_enabled +#undef __config_enabled +#undef ___config_enabled +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) +struct _kc_vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_ethhdr _kc_vlan_ethhdr +struct _kc_vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_hdr _kc_vlan_hdr +#define vlan_tx_tag_present(_skb) 0 +#define vlan_tx_tag_get(_skb) 0 +#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_2_5GB +#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_5_0GB +#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_8_0GB +#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X1 +#define PCI_EXP_LNKSTA_NLW_X1 0x0010 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X2 +#define PCI_EXP_LNKSTA_NLW_X2 0x0020 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X4 +#define PCI_EXP_LNKSTA_NLW_X4 0x0040 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X8 +#define PCI_EXP_LNKSTA_NLW_X8 0x0080 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +#ifndef IP_OFFSET +#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ +#endif + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = BIT(0), + ETH_TEST_FL_FAILED = BIT(1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GWOL +#define ETHTOOL_GWOL 0x5 +#define ETHTOOL_SWOL 0x6 +#define SOPASS_MAX 6 +struct ethtool_wolinfo { + u32 cmd; + u32 supported; + u32 wolopts; + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; +#endif /* ETHTOOL_GWOL */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +#ifndef WAKE_FILTER +#define WAKE_FILTER BIT(7) +#endif + +#ifndef SPEED_2500 +#define SPEED_2500 2500 +#endif +#ifndef SPEED_5000 +#define SPEED_5000 5000 +#endif +#ifndef SPEED_14000 +#define SPEED_14000 14000 +#endif +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif +#ifndef SPEED_56000 +#define SPEED_56000 56000 +#endif +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 +#endif + +/* RHEL 7 didn't backport the parameter change in + * create_singlethread_workqueue. + * If/when RH corrects this we will want to tighten up the version check. + */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) +#undef create_singlethread_workqueue +#define create_singlethread_workqueue(name) \ + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) +#endif + +/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find + * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new + * enough versions of Ubuntu. Otherwise you can simply see it in the output of + * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in + * the linux-source package, but in the linux-headers package. It begins to + * appear in later releases of 14.04 and 14.10. + * + * Ex: + * + * $uname -r + * 3.13.0-45-generic + * ABI is 45 + * + * + * $uname -r + * 3.16.0-23-generic + * ABI is 23 + */ +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else +/* Ubuntu does not provide actual release version macro, so we use the kernel + * version plus the ABI to generate a unique version code specific to Ubuntu. + * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to + * ignore differences in sublevel which are not important since we have the + * ABI value. Otherwise, it becomes impossible to correlate ABI to version for + * ordering checks. + */ + +/* The UTS_UBUNTU_RELEASE_ABI value for upstream kernels built as Debian + * packages comes out to things like 050807, which looks like an octal because + * it starts with a '0', but has an '8' which is an invalid octal digit, and + * the preprocessor complains and quits. Until we need to care about + * differences in that number, we can simply replace it with a fake value. + */ +#undef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 + +#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ + UTS_UBUNTU_RELEASE_ABI) + +#if UTS_UBUNTU_RELEASE_ABI > 255 +#error UTS_UBUNTU_RELEASE_ABI is too large... +#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ + +#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) +/* Our version code scheme does not make sense for non 3.x or newer kernels, + * and we have no support in kcompat for this scenario. Thus, treat this as a + * non-Ubuntu kernel. Possibly might be better to error here. + */ +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#endif + +#endif + +/* Note that the 3rd digit is always zero, and will be ignored. This is + * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux + * version codes are 3 digit, this 3rd digit is superseded by the ABI value. + */ +#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d)) + + +/* SuSE version macros are the same as Linux kernel version macro */ +#ifndef SLE_VERSION +#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) +#endif +#define SLE_LOCALVERSION(a,b,c) KERNEL_VERSION(a,b,c) +#ifdef CONFIG_SUSE_KERNEL +#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) +/* SLES11 GA is 2.6.27 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,0,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) +/* SLES11 SP1 is 2.6.32 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,1,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) +/* SLES11 SP2 GA is 3.0.13-0.27 */ +#define SLE_VERSION_CODE SLE_VERSION(11,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) +/* SLES11 SP3 GA is 3.0.76-0.11 */ +#define SLE_VERSION_CODE SLE_VERSION(11,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)) + #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0)) + /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */ + #define SLE_VERSION_CODE SLE_VERSION(11,2,0) + #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0)) + /* most SLES11sp3 update kernels */ + #define SLE_VERSION_CODE SLE_VERSION(11,3,0) + #else + /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ + #define SLE_VERSION_CODE SLE_VERSION(11,4,0) + #endif +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)) +/* SLES12 GA is 3.12.28-4 + * kernel updates 3.12.xx-<33 through 52>[.yy] */ +#define SLE_VERSION_CODE SLE_VERSION(12,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49)) +/* SLES12 SP1 GA is 3.12.49-11 + * updates 3.12.xx-60.yy where xx={51..} */ +#define SLE_VERSION_CODE SLE_VERSION(12,1,0) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,21) && \ + (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,59))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,74) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(92,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(93,0,0))) +/* SLES12 SP2 GA is 4.4.21-69. + * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59} + * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120} + * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(4,4,73) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,82) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,92)) || \ + (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,103) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,33,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,38,0))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,114) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(94,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(95,0,0)) ) +/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3. + * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92} + * SLES12 SP3 updates conflicting with SP2 are: + * - 4.4.103-6.33.1, 4.4.103-6.38.1 + * - 4.4.{114,120}-94.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(94,41,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(95,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(96,0,0)))) +/* SLES12 SP4 GM is 4.12.14-94.41 and update kernel is 4.12.14-95.x. */ +#define SLE_VERSION_CODE SLE_VERSION(12,4,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(23,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(2,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(136,0,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(26,0,0)) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(150,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(151,0,0)))) +/* SLES15 Beta1 is 4.12.14-2 + * SLES15 GM is 4.12.14-23 and update kernel is 4.12.14-{25,136}, + * and 4.12.14-150.14. + */ +#define SLE_VERSION_CODE SLE_VERSION(15,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,23,0)) +/* SLES15 SP1 Beta1 is 4.12.14-25.23 */ +#define SLE_VERSION_CODE SLE_VERSION(15,1,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(5,3,13)) +/* SLES15 SP2 Beta1 is 5.3.13 */ +#define SLE_VERSION_CODE SLE_VERSION(15,2,0) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,18)) +#if (SLE_LOCALVERSION_CODE < KERNEL_VERSION(47,0,0)) +/* SLES15 SP2 Beta1 is 5.3.18 */ +#define SLE_VERSION_CODE SLE_VERSION(15,2,0) +#else /* local version >= 47 */ +/* SLES15 SP3 Beta1 is 5.3.18 */ +#define SLE_VERSION_CODE SLE_VERSION(15,3,0) +#endif + +/* new SLES kernels must be added here with >= based on kernel + * the idea is to order from newest to oldest and just catch all + * of them using the >= + */ +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ +#ifndef SLE_LOCALVERSION_CODE +#define SLE_LOCALVERSION_CODE 0 +#endif /* SLE_LOCALVERSION_CODE */ + +#ifdef __KLOCWORK__ +/* The following are not compiled into the binary driver; they are here + * only to tune Klocwork scans to workaround false-positive issues. + */ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define memcpy(dest, src, len) memcpy_s(dest, len, src, len) +#define memset(dest, ch, len) memset_s(dest, len, ch, len) + +static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr) + +static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr) + +#ifdef CONFIG_DYNAMIC_DEBUG +#undef dev_dbg +#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg) +#undef pr_debug +#define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg) +#endif /* CONFIG_DYNAMIC_DEBUG */ + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (n = NULL, pos = hlist_entry_safe((head)->first, typeof(*(pos)), \ + member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#ifdef uninitialized_var +#undef uninitialized_var +#define uninitialized_var(x) x = *(&(x)) +#endif +#endif /* __KLOCWORK__ */ + +/* Older versions of GCC will trigger -Wformat-nonliteral warnings for const + * char * strings. Unfortunately, the implementation of do_trace_printk does + * this, in order to add a storage attribute to the memory. This was fixed in + * GCC 5.1, but we still use older distributions built with GCC 4.x. + * + * The string pointer is only passed as a const char * to the __trace_bprintk + * function. Since that function has the __printf attribute, it will trigger + * the warnings. We can't remove the attribute, so instead we'll use the + * __diag macro to disable -Wformat-nonliteral around the call to + * __trace_bprintk. + */ +#if GCC_VERSION < 50100 +#define __trace_bprintk(ip, fmt, args...) ({ \ + int err; \ + __diag_push(); \ + __diag(ignored "-Wformat-nonliteral"); \ + err = __trace_bprintk(ip, fmt, ##args); \ + __diag_pop(); \ + err; \ +}) +#endif /* GCC_VERSION < 5.1.0 */ + +/*****************************************************************************/ +/* 2.4.3 => 2.4.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +#ifndef pci_set_dma_mask +#define pci_set_dma_mask _kc_pci_set_dma_mask +int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); +#endif + +#ifndef pci_request_regions +#define pci_request_regions _kc_pci_request_regions +int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); +#endif + +#ifndef pci_release_regions +#define pci_release_regions _kc_pci_release_regions +void _kc_pci_release_regions(struct pci_dev *pdev); +#endif + +/**************************************/ +/* NETWORK DRIVER API */ + +#ifndef alloc_etherdev +#define alloc_etherdev _kc_alloc_etherdev +struct net_device * _kc_alloc_etherdev(int sizeof_priv); +#endif + +#ifndef is_valid_ether_addr +#define is_valid_ether_addr _kc_is_valid_ether_addr +int _kc_is_valid_ether_addr(u8 *addr); +#endif + +/**************************************/ +/* MISCELLANEOUS */ + +#ifndef INIT_TQUEUE +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) +#endif + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +/* Basic mode control register. */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +/* Advertisement control register. */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL) +/* Expansion register for auto-negotiation. */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#endif + +/*****************************************************************************/ +/* 2.4.6 => 2.4.3 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +#ifndef pci_set_power_state +#define pci_set_power_state _kc_pci_set_power_state +int _kc_pci_set_power_state(struct pci_dev *dev, int state); +#endif + +#ifndef pci_enable_wake +#define pci_enable_wake _kc_pci_enable_wake +int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); +#endif + +#ifndef pci_disable_device +#define pci_disable_device _kc_pci_disable_device +void _kc_pci_disable_device(struct pci_dev *pdev); +#endif + +/* PCI PM entry point syntax changed, so don't support suspend/resume */ +#undef CONFIG_PM + +#endif /* 2.4.6 => 2.4.3 */ + +#ifndef HAVE_PCI_SET_MWI +#define pci_set_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ + PCI_COMMAND_INVALIDATE); +#define pci_clear_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ + ~PCI_COMMAND_INVALIDATE); +#endif + +/*****************************************************************************/ +/* 2.4.10 => 2.4.9 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) + +/**************************************/ +/* MODULE API */ + +#ifndef MODULE_LICENSE + #define MODULE_LICENSE(X) +#endif + +/**************************************/ +/* OTHER */ + +#undef min +#define min(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x < _y ? _x : _y; }) + +#undef max +#define max(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x > _y ? _x : _y; }) + +#define min_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x < _y ? _x : _y; }) + +#define max_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x > _y ? _x : _y; }) + +#ifndef list_for_each_safe +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) +#else /* 2.4.8 => 2.4.9 */ +int snprintf(char * buf, size_t size, const char *fmt, ...); +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#endif +#endif /* 2.4.10 -> 2.4.6 */ + + +/*****************************************************************************/ +/* 2.4.12 => 2.4.10 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) +#ifndef HAVE_NETIF_MSG +#define HAVE_NETIF_MSG 1 +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#endif /* !HAVE_NETIF_MSG */ +#endif /* 2.4.12 => 2.4.10 */ + +/*****************************************************************************/ +/* 2.4.13 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#ifndef virt_to_page + #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) +#endif + +#ifndef pci_map_page +#define pci_map_page _kc_pci_map_page +u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); +#endif + +#ifndef pci_unmap_page +#define pci_unmap_page _kc_pci_unmap_page +void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); +#endif + +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ + +#undef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0xffffffff +#undef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffff + +/**************************************/ +/* OTHER */ + +#ifndef cpu_relax +#define cpu_relax() rep_nop() +#endif + +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_vlan_proto; + unsigned short h_vlan_TCI; + unsigned short h_vlan_encapsulated_proto; +}; +#endif /* 2.4.13 => 2.4.12 */ + +/*****************************************************************************/ +/* 2.4.17 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) + +#ifndef __devexit_p + #define __devexit_p(x) &(x) +#endif + +#endif /* 2.4.17 => 2.4.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) +#define NETIF_MSG_HW 0x2000 +#define NETIF_MSG_WOL 0x4000 + +#ifndef netif_msg_hw +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#endif +#ifndef netif_msg_wol +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) +#endif +#endif /* 2.4.18 */ + +/*****************************************************************************/ + +/*****************************************************************************/ +/* 2.4.20 => 2.4.19 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) + +/* we won't support NAPI on less than 2.4.20 */ +#ifdef NAPI +#undef NAPI +#endif + +#endif /* 2.4.20 => 2.4.19 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#define pci_name(x) ((x)->slot_name) + +#ifndef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full BIT(12) +#endif +#ifndef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full BIT(12) +#endif +#endif + +/*****************************************************************************/ +/*****************************************************************************/ +/* 2.4.23 => 2.4.22 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) +/*****************************************************************************/ +#ifdef NAPI +#ifndef netif_poll_disable +#define netif_poll_disable(x) _kc_netif_poll_disable(x) +static inline void _kc_netif_poll_disable(struct net_device *netdev) +{ + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { + /* No hurry */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + } +} +#endif +#ifndef netif_poll_enable +#define netif_poll_enable(x) _kc_netif_poll_enable(x) +static inline void _kc_netif_poll_enable(struct net_device *netdev) +{ + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); +} +#endif +#endif /* NAPI */ +#ifndef netif_tx_disable +#define netif_tx_disable(x) _kc_netif_tx_disable(x) +static inline void _kc_netif_tx_disable(struct net_device *dev) +{ + spin_lock_bh(&dev->xmit_lock); + netif_stop_queue(dev); + spin_unlock_bh(&dev->xmit_lock); +} +#endif +#else /* 2.4.23 => 2.4.22 */ +#define HAVE_SCTP +#endif /* 2.4.23 => 2.4.22 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) +#define __user +#endif /* < 2.4.27 */ + +/*****************************************************************************/ +/* 2.5.71 => 2.4.x */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) +#define sk_protocol protocol +#define pci_get_device pci_find_device +#endif /* 2.5.70 => 2.4.x */ + +/*****************************************************************************/ +/* < 2.4.27 or 2.6.0 <= 2.6.5 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) + +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) -1; +} +#endif + +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ +/*****************************************************************************/ +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ + (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) +#define netdev_priv(x) x->priv +#endif + +/*****************************************************************************/ +/* <= 2.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) +#include +#undef pci_register_driver +#define pci_register_driver pci_module_init + +/* + * Most of the dma compat code is copied/modifed from the 2.4.37 + * /include/linux/libata-compat.h header file + */ +/* These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct device { + struct pci_dev pdev; +}; + +static inline struct pci_dev *to_pci_dev (struct device *dev) +{ + return (struct pci_dev *) dev; +} +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *) pdev; +} +#define pdev_printk(lvl, pdev, fmt, args...) \ + printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) +#define dev_err(dev, fmt, args...) \ + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) +#define dev_info(dev, fmt, args...) \ + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) +#define dev_warn(dev, fmt, args...) \ + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) +#define dev_notice(dev, fmt, args...) \ + pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) +#define dev_dbg(dev, fmt, args...) \ + pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) + +/* NOTE: dangerous! we ignore the 'gfp' argument */ +#define dma_alloc_coherent(dev,sz,dma,gfp) \ + pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) +#define dma_free_coherent(dev,sz,addr,dma_addr) \ + pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) + +#define dma_map_page(dev,a,b,c,d) \ + pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) +#define dma_unmap_page(dev,a,b,c) \ + pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_single(dev,a,b,c) \ + pci_map_single(to_pci_dev(dev),(a),(b),(c)) +#define dma_unmap_single(dev,a,b,c) \ + pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) + +#define dma_sync_single(dev,a,b,c) \ + pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) + +/* for range just sync everything, that's all the pci API can do */ +#define dma_sync_single_range(dev,addr,off,sz,dir) \ + pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) + +#define dma_set_mask(dev,mask) \ + pci_set_dma_mask(to_pci_dev(dev),(mask)) + +/* hlist_* code - double linked lists */ +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = NULL; + n->pprev = NULL; +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +#ifndef might_sleep +#define might_sleep() +#endif +#else +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} +#endif /* <= 2.5.0 */ + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#define cancel_work_sync(x) flush_scheduled_work() + +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#ifndef read_barrier_depends +#define read_barrier_depends() rmb() +#endif + +#ifndef rcu_head +struct __kc_callback_head { + struct __kc_callback_head *next; + void (*func)(struct callback_head *head); +}; +#define rcu_head __kc_callback_head +#endif + +#undef get_cpu +#define get_cpu() smp_processor_id() +#undef put_cpu +#define put_cpu() do { } while(0) +#define MODULE_INFO(version, _version) +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT +#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGC_DISABLE_PACKET_SPLIT +#define CONFIG_IGC_DISABLE_PACKET_SPLIT 1 +#endif + +#define dma_set_coherent_mask(dev,mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); +#endif + +#undef ALIGN +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + +#ifndef page_count +#define page_count(p) atomic_read(&(p)->count) +#endif + +#ifdef MAX_NUMNODES +#undef MAX_NUMNODES +#endif +#define MAX_NUMNODES 1 + +/* find_first_bit and find_next bit are not defined for most + * 2.4 kernels (except for the redhat 2.4.21 kernels + */ +#include +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#undef find_next_bit +#define find_next_bit _kc_find_next_bit +unsigned long _kc_find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset); +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (strchr(dev->name, '%')) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#ifndef strlcpy +#define strlcpy _kc_strlcpy +size_t _kc_strlcpy(char *dest, const char *src, size_t size); +#endif /* strlcpy */ + +#ifndef do_div +#if BITS_PER_LONG == 64 +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) +#elif BITS_PER_LONG == 32 +uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = _kc__div64_32(&(n), __base); \ + __rem; \ + }) +#else /* BITS_PER_LONG == ?? */ +# error do_div() does not yet support the C64 +#endif /* BITS_PER_LONG */ +#endif /* do_div */ + +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + +#undef HAVE_I2C_SUPPORT +#else /* 2.6.0 */ + +#endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name,dev,size,align,allocation) \ + pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) +#endif /* < 2.6.3 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) +#define dma_sync_single_for_cpu dma_sync_single +#define dma_sync_single_for_device dma_sync_single +#define dma_sync_single_range_for_cpu dma_sync_single_range +#define dma_sync_single_range_for_device dma_sync_single_range +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) +/* taken from 2.6 include/linux/bitmap.h */ +#undef bitmap_zero +#define bitmap_zero _kc_bitmap_zero +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} +#define page_to_nid(x) 0 + +#endif /* < 2.6.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} + +#ifndef __force +#define __force +#endif +#endif /* < 2.6.7 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +#ifndef PCI_EXP_DEVCTL +#define PCI_EXP_DEVCTL 8 +#endif +#ifndef PCI_EXP_DEVCTL_CERE +#define PCI_EXP_DEVCTL_CERE 0x0001 +#endif +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ +#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ + schedule_timeout((x * HZ)/1000 + 2); \ + } while (0) + +#endif /* < 2.6.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) +#include +#define __iomem + +#ifndef kcalloc +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) +void *_kc_kzalloc(size_t size, int flags); +#endif +#define MSEC_PER_SEC 1000L +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} + +#define msleep_interruptible _kc_msleep_interruptible +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} + +/* Basic mode control register. */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#endif +#ifndef __be32 +#define __be32 u32 +#endif +#ifndef __be64 +#define __be64 u64 +#endif + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb->mac.raw; +} + +/* Wake-On-Lan options. */ +#define WAKE_PHY BIT(0) +#define WAKE_UCAST BIT(1) +#define WAKE_MCAST BIT(2) +#define WAKE_BCAST BIT(3) +#define WAKE_ARP BIT(4) +#define WAKE_MAGIC BIT(5) +#define WAKE_MAGICSECURE BIT(6) /* only meaningful if WAKE_MAGIC */ + +#define skb_header_pointer _kc_skb_header_pointer +static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, + int offset, int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + +#ifdef MAX_SKB_FRAGS + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +#else + return NULL; +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif +} + +#ifndef __bitwise +#define __bitwise +#endif +#endif /* < 2.6.9 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +#ifdef module_param_array_named +#undef module_param_array_named +#define module_param_array_named(name, array, type, nump, perm) \ + static struct kparam_array __param_arr_##name \ + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ + sizeof(array[0]), array }; \ + module_param_call(name, param_array_set, param_array_get, \ + &__param_arr_##name, perm) +#endif /* module_param_array_named */ +/* + * num_online is broken for all < 2.6.10 kernels. + */ +#undef num_online_nodes +#define num_online_nodes(n) 1 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); +#undef node_online_map +#define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) +#define PCI_D0 0 +#define PCI_D1 1 +#define PCI_D2 2 +#define PCI_D3hot 3 +#define PCI_D3cold 4 +typedef int pci_power_t; +#define pci_choose_state(pdev,state) state +#define PMSG_SUSPEND 3 +#define PCI_EXP_LNKCTL 16 + +#undef NETIF_F_LLTX + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(X) +#endif + +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +#define KC_USEC_PER_SEC 1000000L +#define usecs_to_jiffies _kc_usecs_to_jiffies +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) +{ +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (KC_USEC_PER_SEC / HZ) * j; +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); +#else + return (j * KC_USEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return m * (HZ / KC_USEC_PER_SEC); +#else + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; +#endif +} + +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ +#endif /* < 2.6.11 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) +#include +#define USE_REBOOT_NOTIFIER + +/* Generic MII registers. */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +/* Advertisement control register. */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* Link partner ability register. */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ + +#ifndef is_zero_ether_addr +#define is_zero_ether_addr _kc_is_zero_ether_addr +static inline int _kc_is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} +#endif /* is_zero_ether_addr */ +#ifndef is_multicast_ether_addr +#define is_multicast_ether_addr _kc_is_multicast_ether_addr +static inline int _kc_is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} +#endif /* is_multicast_ether_addr */ +#endif /* < 2.6.12 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +#ifndef kstrdup +#define kstrdup _kc_kstrdup +char *_kc_kstrdup(const char *s, unsigned int gfp); +#endif +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +#define pm_message_t u32 +#ifndef kzalloc +#define kzalloc _kc_kzalloc +void *_kc_kzalloc(size_t size, int flags); +#endif + +/* Generic MII registers. */ +#define MII_ESTATUS 0x0f /* Extended Status */ +/* Basic mode status register. */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +/* Extended status register. */ +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +#define SUPPORTED_Pause BIT(13) +#define SUPPORTED_Asym_Pause BIT(14) +#define ADVERTISED_Pause BIT(13) +#define ADVERTISED_Asym_Pause BIT(14) + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) +#define gfp_t unsigned +#else +typedef unsigned gfp_t; +#endif +#endif /* !RHEL4.3->RHEL5.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) +#ifdef CONFIG_X86_64 +#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ + dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) +#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ + dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) +#endif +#endif +#endif /* < 2.6.14 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) +#ifndef kfree_rcu +/* this is placed here due to a lack of rcu_barrier in previous kernels */ +#define kfree_rcu(_ptr, _offset) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef vmalloc_node +#define vmalloc_node(a,b) vmalloc(a) +#endif /* vmalloc_node*/ + +#define setup_timer(_timer, _function, _data) \ +do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ +} while (0) +#ifndef device_can_wakeup +#define device_can_wakeup(dev) (1) +#endif +#ifndef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) do{}while(0) +#endif +#ifndef device_init_wakeup +#define device_init_wakeup(dev,val) do {} while (0) +#endif +static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *) addr1; + const u16 *b = (const u16 *) addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} +#undef compare_ether_addr +#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) +#endif /* < 2.6.15 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) +#undef DEFINE_MUTEX +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) +#define mutex_lock(x) down_interruptible(x) +#define mutex_unlock(x) up(x) + +#ifndef ____cacheline_internodealigned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp +#else +#define ____cacheline_internodealigned_in_smp +#endif /* CONFIG_SMP */ +#endif /* ____cacheline_internodealigned_in_smp */ +#undef HAVE_PCI_ERS +#else /* 2.6.16 and above */ +#undef HAVE_PCI_ERS +#define HAVE_PCI_ERS +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) +#ifdef device_can_wakeup +#undef device_can_wakeup +#endif /* device_can_wakeup */ +#define device_can_wakeup(dev) 1 +#endif /* SLE_VERSION(10,4,0) */ +#endif /* < 2.6.16 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) +#ifndef dev_notice +#define dev_notice(dev, fmt, args...) \ + dev_printk(KERN_NOTICE, dev, fmt, ## args) +#endif + +#ifndef first_online_node +#define first_online_node 0 +#endif +#ifndef NET_SKB_PAD +#define NET_SKB_PAD 16 +#endif +#endif /* < 2.6.17 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) + +#ifndef IRQ_HANDLED +#define irqreturn_t void +#define IRQ_HANDLED +#define IRQ_NONE +#endif + +#ifndef IRQF_PROBE_SHARED +#ifdef SA_PROBEIRQ +#define IRQF_PROBE_SHARED SA_PROBEIRQ +#else +#define IRQF_PROBE_SHARED 0 +#endif +#endif + +#ifndef IRQF_SHARED +#define IRQF_SHARED SA_SHIRQ +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef skb_is_gso +#ifdef NETIF_F_TSO +#define skb_is_gso _kc_skb_is_gso +static inline int _kc_skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} +#else +#define skb_is_gso(a) 0 +#endif +#endif + +#ifndef resource_size_t +#define resource_size_t unsigned long +#endif + +#ifdef skb_pad +#undef skb_pad +#endif +#define skb_pad(x,y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#ifdef skb_padto +#undef skb_padto +#endif +#define skb_padto(x,y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} + +#ifndef DECLARE_PCI_UNMAP_ADDR +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + u32 LEN_NAME +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) +#endif /* DECLARE_PCI_UNMAP_ADDR */ +#endif /* < 2.6.18 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) +#define i_private u.generic_ip +#endif /* >= RHEL 5.0 */ + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) +#if (!((RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#undef CONFIG_INET_LRO +#undef CONFIG_INET_LRO_MODULE +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#endif +typedef irqreturn_t (*new_handler_t)(int, void*); +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#else /* 2.4.x */ +typedef void (*irq_handler_t)(int, void*, struct pt_regs *); +typedef void (*new_handler_t)(int, void*); +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#endif /* >= 2.5.x */ +{ + irq_handler_t new_handler = (irq_handler_t) handler; + return request_irq(irq, new_handler, flags, devname, dev_id); +} + +#undef request_irq +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) + +#define irq_handler_t new_handler_t + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) ) +#ifndef skb_checksum_help +static inline int __kc_skb_checksum_help(struct sk_buff *skb) +{ + return skb_checksum_help(skb, 0); +} +#define skb_checksum_help(skb) __kc_skb_checksum_help((skb)) +#endif +#endif /* < 2.6.19 && >= 2.6.11 */ + +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +#define PCIE_CONFIG_SPACE_LEN 256 +#define PCI_CONFIG_SPACE_LEN 64 +#define PCIE_LINK_STATUS 0x12 +#define pci_config_space_ich8lan() do {} while(0) +#undef pci_save_state +int _kc_pci_save_state(struct pci_dev *); +#define pci_save_state(pdev) _kc_pci_save_state(pdev) +#undef pci_restore_state +void _kc_pci_restore_state(struct pci_dev *); +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +#undef free_netdev +void _kc_free_netdev(struct net_device *); +#define free_netdev(netdev) _kc_free_netdev(netdev) +#endif +static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) +{ + return 0; +} +#define pci_disable_pcie_error_reporting(dev) do {} while (0) +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); +#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif +#else /* 2.6.19 */ +#include +#include + +#define NEW_SKB_CSUM_HELP +#endif /* < 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) +#undef INIT_WORK +#define INIT_WORK(_work, _func) \ +do { \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->pending = 0; \ + (_work)->func = (void (*)(void *))_func; \ + (_work)->data = _work; \ + init_timer(&(_work)->timer); \ +} while (0) +#endif + +#ifndef PCI_VDEVICE +#define PCI_VDEVICE(ven, dev) \ + PCI_VENDOR_ID_##ven, (dev), \ + PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#endif + +#ifndef PCI_VENDOR_ID_INTEL +#define PCI_VENDOR_ID_INTEL 0x8086 +#endif + +#ifndef round_jiffies +#define round_jiffies(x) x +#endif + +#define csum_offset csum + +#define HAVE_EARLY_VMALLOC_NODE +#define dev_to_node(dev) -1 +#undef set_dev_node +/* remove compiler warning with b=b, for unused variable */ +#define set_dev_node(a, b) do { (b) = (b); } while(0) + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +static inline __wsum csum_unfold(__sum16 n) +{ + return (__force __wsum)n; +} +#endif + +#else /* < 2.6.20 */ +#define HAVE_DEVICE_NUMA_NODE +#endif /* < 2.6.20 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define to_net_dev(class) container_of(class, struct net_device, class_dev) +#define NETDEV_CLASS_DEV +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) +#define vlan_group_set_device(vg, id, dev) \ + do { \ + if (vg) vg->vlan_devices[id] = dev; \ + } while (0) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ +#define pci_channel_offline(pdev) (pdev->error_state && \ + pdev->error_state != pci_channel_io_normal) +#define pci_request_selected_regions(pdev, bars, name) \ + pci_request_regions(pdev, name) +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); +#define netdev_to_dev(netdev) \ + pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) +#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) +#define devm_kfree(dev, p) kfree(p) +#else /* 2.6.21 */ +static inline struct device *netdev_to_dev(struct net_device *netdev) +{ + return &netdev->dev; +} + +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define tcp_hdr(skb) (skb->h.th) +#define tcp_hdrlen(skb) (skb->h.th->doff << 2) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#define skb_transport_header(skb) (skb->h.raw) +#define ipv6_hdr(skb) (skb->nh.ipv6h) +#define ip_hdr(skb) (skb->nh.iph) +#define skb_network_offset(skb) (skb->nh.raw - skb->data) +#define skb_network_header(skb) (skb->nh.raw) +#define skb_tail_pointer(skb) skb->tail +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) \ + memcpy(skb->data, from, len) +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ + memcpy(skb->data + offset, from, len) +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) +#define pci_register_driver pci_module_init +#define skb_mac_header(skb) skb->mac.raw + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#include +void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full BIT(15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full BIT(15) +#endif + +#ifndef ETH_P_PAUSE +#define ETH_P_PAUSE 0x8808 +#endif + +static inline int compound_order(struct page *page) +{ + return 0; +} + +#define __must_be_array(a) 0 + +#ifndef SKB_WITH_OVERHEAD +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#endif +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) +#undef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do { } while (0) +#endif /* > 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +#define netif_subqueue_stopped(_a, _b) 0 +#ifndef PTR_ALIGN +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#endif + +#ifndef CONFIG_PM_SLEEP +#define CONFIG_PM_SLEEP CONFIG_PM +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) +#define HAVE_ETHTOOL_GET_PERM_ADDR +#endif /* 2.6.14 through 2.6.22 */ + +static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + int delta = 0; + + if (headroom > (skb->data - skb->head)) + delta = headroom - (skb->data - skb->head); + + if (delta || skb_header_cloned(skb)) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} +#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifndef ETH_FLAG_LRO +#define ETH_FLAG_LRO NETIF_F_LRO +#endif + +#ifndef ACCESS_ONCE +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#endif + +/* if GRO is supported then the napi struct must already exist */ +#ifndef NETIF_F_GRO +/* NAPI API changes in 2.6.24 break everything */ +struct napi_struct { + /* used to look up the real NAPI polling routine */ + int (*poll)(struct napi_struct *, int); + struct net_device *dev; + int weight; +}; +#endif + +#ifdef NAPI +int __kc_adapter_clean(struct net_device *, int *); +/* The following definitions are multi-queue aware, and thus we have a driver + * define list which determines which drivers support multiple queues, and + * thus need these stronger defines. If a driver does not support multi-queue + * functionality, you don't need to add it to this list. + */ +struct net_device *napi_to_poll_dev(const struct napi_struct *napi); + +static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + poll_dev->poll = __kc_adapter_clean; + poll_dev->priv = napi; + poll_dev->weight = weight; + set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); + set_bit(__LINK_STATE_START, &poll_dev->state); + dev_hold(poll_dev); + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_add __kc_mq_netif_napi_add + +static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); + dev_put(poll_dev); + memset(poll_dev, 0, sizeof(struct net_device)); +} + +#define netif_napi_del __kc_mq_netif_napi_del + +static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) +{ + return netif_running(napi->dev) && + netif_rx_schedule_prep(napi_to_poll_dev(napi)); +} +#define napi_schedule_prep __kc_mq_napi_schedule_prep + +static inline void __kc_mq_napi_schedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) + __netif_rx_schedule(napi_to_poll_dev(napi)); +} +#define napi_schedule __kc_mq_napi_schedule + +#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) +#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) +#ifdef CONFIG_SMP +static inline void napi_synchronize(const struct napi_struct *n) +{ + struct net_device *dev = napi_to_poll_dev(n); + + while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { + /* No hurry. */ + msleep(1); + } +} +#else +#define napi_synchronize(n) barrier() +#endif /* CONFIG_SMP */ +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) +static inline void _kc_napi_complete(struct napi_struct *napi) +{ +#ifdef NETIF_F_GRO + napi_gro_flush(napi); +#endif + netif_rx_complete(napi_to_poll_dev(napi)); +} +#define napi_complete _kc_napi_complete +#else /* NAPI */ + +/* The following definitions are only used if we don't support NAPI at all. */ + +static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + dev->poll = poll; + dev->weight = weight; + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_del(_a) do {} while (0) +#endif /* NAPI */ + +#undef dev_get_by_name +#define dev_get_by_name(_a, _b) dev_get_by_name(_b) +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) +#endif + +#ifdef NETIF_F_TSO6 +#define skb_is_gso_v6 _kc_skb_is_gso_v6 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif /* NETIF_F_TSO6 */ + +#ifndef KERN_CONT +#define KERN_CONT "" +#endif +#ifndef pr_err +#define pr_err(fmt, arg...) \ + printk(KERN_ERR fmt, ##arg) +#endif + +#ifndef rounddown_pow_of_two +#define rounddown_pow_of_two(n) \ + __builtin_constant_p(n) ? ( \ + (n == 1) ? 0 : \ + (1UL << ilog2(n))) : \ + (1UL << (fls_long(n) - 1)) +#endif + +#else /* < 2.6.24 */ +#define HAVE_ETHTOOL_GET_SSET_COUNT +#define HAVE_NETDEV_NAPI_LIST +#endif /* < 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#define INCLUDE_PM_QOS_PARAMS_H +#include +#else /* >= 3.2.0 */ +#include +#endif /* else >= 3.2.0 */ +#endif /* > 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) +#define PM_QOS_CPU_DMA_LATENCY 1 + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) +#include +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY +#define pm_qos_add_requirement(pm_qos_class, name, value) \ + set_acceptable_latency(name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) \ + remove_acceptable_latency(name) +#define pm_qos_update_requirement(pm_qos_class, name, value) \ + modify_acceptable_latency(name, value) +#else +#define PM_QOS_DEFAULT_VALUE -1 +#define pm_qos_add_requirement(pm_qos_class, name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) +#define pm_qos_update_requirement(pm_qos_class, name, value) { \ + if (value != PM_QOS_DEFAULT_VALUE) { \ + printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ + pci_name(adapter->pdev)); \ + } \ +} + +#endif /* > 2.6.18 */ + +#define pci_enable_device_mem(pdev) pci_enable_device(pdev) + +#ifndef DEFINE_PCI_DEVICE_TABLE +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] +#endif /* DEFINE_PCI_DEVICE_TABLE */ + +#ifndef strict_strtol +#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) +static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) +{ + /* adapted from strict_strtoul() in 2.6.25 */ + char *tail; + long val; + size_t len; + + *res = 0; + len = strlen(buf); + if (!len) + return -EINVAL; + val = simple_strtol(buf, &tail, base); + if (tail == buf) + return -EINVAL; + if ((*tail == '\0') || + ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { + *res = val; + return 0; + } + + return -EINVAL; +} +#endif + +#else /* < 2.6.25 */ + +#endif /* < 2.6.25 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +#ifndef clamp_t +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; }) +#endif /* clamp_t */ +#undef kzalloc_node +#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) + +void _kc_pci_disable_link_state(struct pci_dev *dev, int state); +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) +#else /* < 2.6.26 */ +#define NETDEV_CAN_SET_GSO_MAX_SIZE +#define HAVE_NETDEV_VLAN_FEATURES +#ifndef PCI_EXP_LNKCAP_ASPMS +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#endif /* PCI_EXP_LNKCAP_ASPMS */ +#endif /* < 2.6.26 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)speed; + /* ep->speed_hi = (__u16)(speed >> 16); */ +} +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set + +static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + /* no speed_hi before 2.6.27, and probably no need for it yet */ + return (__u32)ep->speed; +} +#define ethtool_cmd_speed _kc_ethtool_cmd_speed + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) +#define ANCIENT_PM 1 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ + defined(CONFIG_PM_SLEEP)) +#define NEWER_PM 1 +#endif +#if defined(ANCIENT_PM) || defined(NEWER_PM) +#undef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) \ + do { \ + u16 pmc = 0; \ + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ + if (pm) { \ + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ + &pmc); \ + } \ + (dev)->power.can_wakeup = !!(pmc >> 11); \ + (dev)->power.should_wakeup = (val && (pmc >> 11)); \ + } while (0) +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ +#endif /* 2.6.15 through 2.6.27 */ +#ifndef netif_napi_del +#define netif_napi_del(_a) do {} while (0) +#ifdef NAPI +#ifdef CONFIG_NETPOLL +#undef netif_napi_del +#define netif_napi_del(_a) list_del(&(_a)->dev_list); +#endif +#endif +#endif /* netif_napi_del */ +#ifdef dma_mapping_error +#undef dma_mapping_error +#endif +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE +#define HAVE_TX_MQ +#endif + +#ifndef DMA_ATTR_WEAK_ORDERING +#define DMA_ATTR_WEAK_ORDERING 0 +#endif + +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *); +void _kc_netif_tx_wake_all_queues(struct net_device *); +void _kc_netif_tx_start_all_queues(struct net_device *); +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) +#undef netif_stop_subqueue +#define netif_stop_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_stop_subqueue((_ndev), (_qi)); \ + else \ + netif_stop_queue((_ndev)); \ + } while (0) +#undef netif_start_subqueue +#define netif_start_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_start_subqueue((_ndev), (_qi)); \ + else \ + netif_start_queue((_ndev)); \ + } while (0) +#else /* HAVE_TX_MQ */ +#define netif_tx_stop_all_queues(a) netif_stop_queue(a) +#define netif_tx_wake_all_queues(a) netif_wake_queue(a) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) +#define netif_tx_start_all_queues(a) netif_start_queue(a) +#else +#define netif_tx_start_all_queues(a) do {} while (0) +#endif +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) +#endif /* HAVE_TX_MQ */ +#ifndef NETIF_F_MULTI_QUEUE +#define NETIF_F_MULTI_QUEUE 0 +#define netif_is_multiqueue(a) 0 +#define netif_wake_subqueue(a, b) +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef __WARN_printf +void __kc_warn_slowpath(const char *file, const int line, + const char *fmt, ...) __attribute__((format(printf, 3, 4))); +#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) +#endif /* __WARN_printf */ + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif /* WARN */ +#define qdisc_reset_all_tx(a) +#else /* < 2.6.27 */ +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)(speed & 0xFFFF); + ep->speed_hi = (__u16)(speed >> 16); +} +#define HAVE_TX_MQ +#define HAVE_NETDEV_SELECT_QUEUE +#endif /* < 2.6.27 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) +#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ + pci_resource_len(pdev, bar)) +#define pci_wake_from_d3 _kc_pci_wake_from_d3 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep +int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); +int _kc_pci_prepare_to_sleep(struct pci_dev *dev); +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) +#ifndef __skb_queue_head_init +static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) +#endif + +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + +#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ +#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ + +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +#ifndef swap +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) +#endif +#define pci_request_selected_regions_exclusive(pdev, bars, name) \ + pci_request_selected_regions(pdev, bars, name) +#ifndef CONFIG_NR_CPUS +#define CONFIG_NR_CPUS 1 +#endif /* CONFIG_NR_CPUS */ +#ifndef pcie_aspm_enabled +#define pcie_aspm_enabled() (1) +#endif /* pcie_aspm_enabled */ + +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ + +#ifndef PCI_EXP_LNKSTA_CLS +#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#endif +#ifndef PCI_EXP_LNKSTA_NLW +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ +#endif + +#ifndef pci_clear_master +void _kc_pci_clear_master(struct pci_dev *dev); +#define pci_clear_master(dev) _kc_pci_clear_master(dev) +#endif + +#ifndef PCI_EXP_LNKCTL_ASPMC +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#endif + +#ifndef PCI_EXP_LNKCAP_MLW +#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ +#endif + +#else /* < 2.6.29 */ +#ifndef HAVE_NET_DEVICE_OPS +#define HAVE_NET_DEVICE_OPS +#endif +#ifdef CONFIG_DCB +#define HAVE_PFC_MODE_ENABLE +#endif /* CONFIG_DCB */ +#endif /* < 2.6.29 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) +#define NO_PTP_SUPPORT +#define skb_rx_queue_recorded(a) false +#define skb_get_rx_queue(a) 0 +#define skb_record_rx_queue(a, b) do {} while (0) +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#ifndef CONFIG_PCI_IOV +#undef pci_enable_sriov +#define pci_enable_sriov(a, b) -ENOTSUPP +#undef pci_disable_sriov +#define pci_disable_sriov(a) do {} while (0) +#endif /* CONFIG_PCI_IOV */ +#ifndef pr_cont +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) +#endif /* pr_cont */ +static inline void _kc_synchronize_irq(unsigned int a) +{ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + synchronize_irq(); +#else /* < 2.5.28 */ + synchronize_irq(a); +#endif /* < 2.5.28 */ +} +#undef synchronize_irq +#define synchronize_irq(a) _kc_synchronize_irq(a) + +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ + +#ifdef nr_cpus_node +#undef nr_cpus_node +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5)) +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* RHEL >= 5.5 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5))) +static inline bool pci_is_root_bus(struct pci_bus *pbus) +{ + return !(pbus->parent); +} +#endif + +#else /* < 2.6.30 */ +#define HAVE_ASPM_QUIRKS +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* < 2.6.30 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) +#define ETH_P_1588 0x88F7 +#define ETH_P_FIP 0x8914 +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc_count) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(uclist, dev) \ + for (uclist = dev->uc_list; uclist; uclist = uclist->next) +#endif +#ifndef PORT_OTHER +#define PORT_OTHER 0xff +#endif +#ifndef MDIO_PHY_ID_PRTAD +#define MDIO_PHY_ID_PRTAD 0x03e0 +#endif +#ifndef MDIO_PHY_ID_DEVAD +#define MDIO_PHY_ID_DEVAD 0x001f +#endif +#ifndef skb_dst +#define skb_dst(s) ((s)->dst) +#endif + +#ifndef SUPPORTED_1000baseKX_Full +#define SUPPORTED_1000baseKX_Full BIT(17) +#endif +#ifndef SUPPORTED_10000baseKX4_Full +#define SUPPORTED_10000baseKX4_Full BIT(18) +#endif +#ifndef SUPPORTED_10000baseKR_Full +#define SUPPORTED_10000baseKR_Full BIT(19) +#endif + +#ifndef ADVERTISED_1000baseKX_Full +#define ADVERTISED_1000baseKX_Full BIT(17) +#endif +#ifndef ADVERTISED_10000baseKX4_Full +#define ADVERTISED_10000baseKX4_Full BIT(18) +#endif +#ifndef ADVERTISED_10000baseKR_Full +#define ADVERTISED_10000baseKR_Full BIT(19) +#endif + +static inline unsigned long dev_trans_start(struct net_device *dev) +{ + return dev->trans_start; +} +#else /* < 2.6.31 */ +#ifndef HAVE_NETDEV_STORAGE_ADDRESS +#define HAVE_NETDEV_STORAGE_ADDRESS +#endif +#ifndef HAVE_NETDEV_HW_ADDR +#define HAVE_NETDEV_HW_ADDR +#endif +#ifndef HAVE_TRANS_START_IN_QUEUE +#define HAVE_TRANS_START_IN_QUEUE +#endif +#ifndef HAVE_INCLUDE_LINUX_MDIO_H +#define HAVE_INCLUDE_LINUX_MDIO_H +#endif +#include +#endif /* < 2.6.31 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) +#undef netdev_tx_t +#define netdev_tx_t int +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef NETIF_F_FCOE_MTU +#define NETIF_F_FCOE_MTU BIT(26) +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline int _kc_pm_runtime_get_sync() +{ + return 1; +} +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() +#else /* 2.6.0 => 2.6.32 */ +static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) +{ + return 1; +} +#ifndef pm_runtime_get_sync +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) +#endif +#endif /* 2.6.0 => 2.6.32 */ +#ifndef pm_runtime_put +#define pm_runtime_put(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_sync +#define pm_runtime_put_sync(dev) do {} while (0) +#endif +#ifndef pm_runtime_resume +#define pm_runtime_resume(dev) do {} while (0) +#endif +#ifndef pm_schedule_suspend +#define pm_schedule_suspend(dev, t) do {} while (0) +#endif +#ifndef pm_runtime_set_suspended +#define pm_runtime_set_suspended(dev) do {} while (0) +#endif +#ifndef pm_runtime_disable +#define pm_runtime_disable(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_noidle +#define pm_runtime_put_noidle(dev) do {} while (0) +#endif +#ifndef pm_runtime_set_active +#define pm_runtime_set_active(dev) do {} while (0) +#endif +#ifndef pm_runtime_enable +#define pm_runtime_enable(dev) do {} while (0) +#endif +#ifndef pm_runtime_get_noresume +#define pm_runtime_get_noresume(dev) do {} while (0) +#endif +#else /* < 2.6.32 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_EXTENDED +#endif /* RHEL >= 6.2 && RHEL < 7.0 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_OPS_EXT +#define HAVE_NDO_SET_FEATURES +#endif /* RHEL >= 6.6 && RHEL < 7.0 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +#define HAVE_NETDEV_OPS_FCOE_ENABLE +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_OPS_GETAPP +#define HAVE_DCBNL_OPS_GETAPP +#endif +#endif /* CONFIG_DCB */ +#include +/* IOV bad DMA target work arounds require at least this kernel rev support */ +#define HAVE_PCIE_TYPE +#endif /* < 2.6.32 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#ifndef pci_pcie_cap +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) +#endif +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ +/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ +#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* RHEL6 or SLES11 SP1 */ +#ifndef __percpu +#define __percpu +#endif /* __percpu */ + +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif /* PORT_DA */ +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) +#define HAVE_ETHTOOL_GSRSSH +#define HAVE_RHEL6_SRIOV_CONFIGURE +#define HAVE_RXFH_NONCONST +#endif /* RHEL > 6.5 */ +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + +#else /* < 2.6.33 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef dev_is_pci +#define dev_is_pci(d) ((d)->bus == &pci_bus_type) +#endif + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev,mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) +#endif +#ifndef pci_dev_run_wake +#define pci_dev_run_wake(pdev) (0) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ +} while(0) +#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, \ + netdev_name(netdev), ##args); \ +} while(0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifdef SET_SYSTEM_SLEEP_PM_OPS +#define HAVE_SYSTEM_SLEEP_PM_OPS +#endif + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) +#ifdef IGB_HWMON +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ + do { \ + static struct lock_class_key __key; \ + (attr)->key = &__key; \ + } while (0) +#else +#define sysfs_attr_init(attr) do {} while (0) +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* IGB_HWMON */ +#endif /* RHEL_RELEASE_CODE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline bool _kc_pm_runtime_suspended() +{ + return false; +} +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() +#else /* 2.6.0 => 2.6.34 */ +static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) +{ + return false; +} +#ifndef pm_runtime_suspended +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) +#endif +#endif /* 2.6.0 => 2.6.34 */ + +#ifndef pci_bus_speed +/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ +enum _kc_pci_bus_speed { + _KC_PCIE_SPEED_2_5GT = 0x14, + _KC_PCIE_SPEED_5_0GT = 0x15, + _KC_PCIE_SPEED_8_0GT = 0x16, + _KC_PCI_SPEED_UNKNOWN = 0xff, +}; +#define pci_bus_speed _kc_pci_bus_speed +#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT +#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT +#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT +#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN +#endif /* pci_bus_speed */ + +#else /* < 2.6.34 */ +#define HAVE_SYSTEM_SLEEP_PM_OPS +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif + +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); +#define simple_write_to_buffer _kc_simple_write_to_buffer + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef HAVE_PCI_DEV_IS_VIRTFN_BIT +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_DEV_IS_VIRTFN_BIT */ + return dev; +} +#endif /* ! RHEL >= 6.4 */ + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + +#ifndef numa_node_id +#define numa_node_id() 0 +#endif +#ifndef numa_mem_id +#define numa_mem_id numa_node_id +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_TX_MQ +#include +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); +#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + dev->egress_subqueue_count = txq; + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#else /* HAVE_TX_MQ */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused txq) +{ + return 0; +} +#endif /* HAVE_TX_MQ */ +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#ifndef ETH_FLAG_RXHASH +#define ETH_FLAG_RXHASH (1<<28) +#endif /* ETH_FLAG_RXHASH */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) +#define HAVE_IRQ_AFFINITY_HINT +#endif +struct device_node; +#else /* < 2.6.35 */ +#define HAVE_STRUCT_DEVICE_OF_NODE +#define HAVE_PM_QOS_REQUEST_LIST +#define HAVE_IRQ_AFFINITY_HINT +#include +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); +#define ethtool_op_set_flags _kc_ethtool_op_set_flags +u32 _kc_ethtool_op_get_flags(struct net_device *); +#define ethtool_op_get_flags _kc_ethtool_op_get_flags + +enum { + WQ_UNBOUND = 0, + WQ_RESCUER = 0, +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))) +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#endif + +#define u64_stats_update_begin(a) do { } while(0) +#define u64_stats_update_end(a) do { } while(0) +#define u64_stats_fetch_begin(a) do { } while(0) +#define u64_stats_fetch_retry_bh(a,b) (0) +#define u64_stats_fetch_begin_bh(a) (0) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) +#define HAVE_8021P_SUPPORT +#endif + +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0))) +static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) +{ + return; +} +#endif + +#else /* < 2.6.36 */ + +#define msleep(x) do { if (x > 20) \ + msleep(x); \ + else \ + usleep_range(1000 * x, 2000 * x); \ + } while (0) + +#define HAVE_PM_QOS_REQUEST_ACTIVE +#define HAVE_8021P_SUPPORT +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) +#define HAVE_NON_CONST_PCI_DRIVER_NAME +#ifndef netif_set_real_num_tx_queues +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + netif_set_real_num_tx_queues(dev, txq); + return 0; +} +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif +#ifndef netif_set_real_num_rx_queues +static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif +#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) +#endif +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ +#ifndef ETH_FLAG_TXVLAN +#define ETH_FLAG_TXVLAN BIT(7) +#endif /* ETH_FLAG_TXVLAN */ +#ifndef ETH_FLAG_RXVLAN +#define ETH_FLAG_RXVLAN BIT(8) +#endif /* ETH_FLAG_RXVLAN */ + +#define WQ_MEM_RECLAIM WQ_RESCUER + +static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) +{ + WARN_ON(skb->ip_summed != CHECKSUM_NONE); +} +#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) + +static inline void *_kc_vzalloc_node(unsigned long size, int node) +{ + void *addr = vmalloc_node(size, node); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc(_size) _kc_vzalloc(_size) + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \ + (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0))) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + return skb->protocol; + + if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) + return 0; + + return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; +} +#endif /* !RHEL5.7+ || RHEL6.0 */ + +#ifdef HAVE_HW_TIME_STAMP +#define SKBTX_HW_TSTAMP BIT(0) +#define SKBTX_IN_PROGRESS BIT(2) +#define SKB_SHARED_TX_IS_UNION +#endif + +#ifndef device_wakeup_enable +#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) +#ifndef HAVE_VLAN_RX_REGISTER +#define HAVE_VLAN_RX_REGISTER +#endif +#endif /* > 2.4.18 */ +#endif /* < 2.6.37 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define skb_checksum_start_offset(skb) skb_transport_offset(skb) +#else /* 2.6.22 -> 2.6.37 */ +static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) +#endif /* 2.6.22 -> 2.6.37 */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif +#ifndef DCB_CAP_DCBX_HOST +#define DCB_CAP_DCBX_HOST 0x01 +#endif +#ifndef DCB_CAP_DCBX_LLD_MANAGED +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#endif +#ifndef DCB_CAP_DCBX_VER_CEE +#define DCB_CAP_DCBX_VER_CEE 0x04 +#endif +#ifndef DCB_CAP_DCBX_VER_IEEE +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#endif +#ifndef DCB_CAP_DCBX_STATIC +#define DCB_CAP_DCBX_STATIC 0x10 +#endif +#endif /* CONFIG_DCB */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) +#define CONFIG_XPS +#endif /* RHEL_RELEASE_VERSION(6,2) */ +#endif /* < 2.6.38 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#ifndef TC_BITMASK +#define TC_BITMASK 15 +#endif +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM BIT(29) +#endif +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) +#endif +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef FCOE_MTU +#define FCOE_MTU 2158 +#endif +#endif +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); +#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) +u8 _kc_netdev_get_num_tc(struct net_device *dev); +#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev) +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc); +#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc)) +#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0) +#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0) +u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up); +#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up) +#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0) +#else /* RHEL6.1 or greater */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif /* HAVE_MQPRIO */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#ifndef IEEE_8021QAZ_TSA_STRICT +#define IEEE_8021QAZ_TSA_STRICT 0 +#endif +#ifndef IEEE_8021QAZ_TSA_ETS +#define IEEE_8021QAZ_TSA_ETS 2 +#endif +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#endif /* CONFIG_DCB */ +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ + +#ifndef udp_csum +#define udp_csum __kc_udp_csum +static inline __wsum __kc_udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { + csum = csum_add(csum, skb->csum); + } + return csum; +} +#endif /* udp_csum */ +#else /* < 2.6.39 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#endif +#endif /* CONFIG_DCB */ +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +/* use < 2.6.40 because of a Fedora 15 kernel update where they + * updated the kernel version to 2.6.40.x and they back-ported 3.0 features + * like set_phys_id for ethtool. + */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) +#ifdef ETHTOOL_GRXRINGS +#ifndef FLOW_EXT +#define FLOW_EXT 0x80000000 +union _kc_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kc_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kc_ethtool_rx_flow_spec { + __u32 flow_type; + union _kc_ethtool_flow_union h_u; + struct _kc_ethtool_flow_ext h_ext; + union _kc_ethtool_flow_union m_u; + struct _kc_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec +#endif /* FLOW_EXT */ +#endif + +#define pci_disable_link_state_locked pci_disable_link_state + +#ifndef PCI_LTR_VALUE_MASK +#define PCI_LTR_VALUE_MASK 0x000003ff +#endif +#ifndef PCI_LTR_SCALE_MASK +#define PCI_LTR_SCALE_MASK 0x00001c00 +#endif +#ifndef PCI_LTR_SCALE_SHIFT +#define PCI_LTR_SCALE_SHIFT 10 +#endif + +#else /* < 2.6.40 */ +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) +#define USE_LEGACY_PM_SUPPORT +#ifndef kfree_rcu +#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef kstrtol_from_user +#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) +static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, + unsigned int base, long *res) +{ + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(long) * 8 + 1 + 1]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return strict_strtol(buf, base, res); +} +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0) || \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,7))) +/* 20000base_blah_full Supported and Advertised Registers */ +#define SUPPORTED_20000baseMLD2_Full BIT(21) +#define SUPPORTED_20000baseKR2_Full BIT(22) +#define ADVERTISED_20000baseMLD2_Full BIT(21) +#define ADVERTISED_20000baseKR2_Full BIT(22) +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 3.0.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#ifndef __netdev_alloc_skb_ip_align +#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) +#endif /* __netdev_alloc_skb_ip_align */ +#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) +#define dcb_ieee_delapp(dev, app) 0 +#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) + +/* 1000BASE-T Control register */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +/* kernels less than 3.0.0 don't have this */ +#ifndef ETH_P_8021AD +#define ETH_P_8021AD 0x88A8 +#endif + +/* Stub definition for !CONFIG_OF is introduced later */ +#ifdef CONFIG_OF +static inline struct device_node * +pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev) +{ +#ifdef HAVE_STRUCT_DEVICE_OF_NODE + return pdev ? pdev->dev.of_node : NULL; +#else + return NULL; +#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */ +} +#endif /* CONFIG_OF */ +#else /* < 3.1.0 */ +#ifndef HAVE_DCBNL_IEEE_DELAPP +#define HAVE_DCBNL_IEEE_DELAPP +#endif +#endif /* < 3.1.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) +static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif +#ifdef ETHTOOL_GRXRINGS +#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#include +#endif +#define skb_frag_dma_map(dev,frag,offset,size,dir) \ + _kc_skb_frag_dma_map(dev,frag,offset,size,dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif +#define HAVE_IOMMU_PRESENT +#define HAVE_PM_QOS_REQUEST_LIST_NEW +#endif /* < 3.2.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) +/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than + * alloc_workqueue() to avoid compiler warning from -Wvarargs + */ +static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4))) +_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, + const char *fmt, ...) +{ + struct workqueue_struct *wq; + va_list args, temp; + unsigned int len; + char *p; + + va_start(args, fmt); + va_copy(temp, args); + len = vsnprintf(NULL, 0, fmt, temp); + va_end(temp); + + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) { + va_end(args); + return NULL; + } + + vsnprintf(p, len + 1, fmt, args); + va_end(args); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) + wq = create_workqueue(p); +#else + wq = alloc_workqueue(p, flags, max_active); +#endif + kfree(p); + + return wq; +} +#ifdef alloc_workqueue +#undef alloc_workqueue +#endif +#define alloc_workqueue(fmt, flags, max_active, args...) \ + _kc_alloc_workqueue(flags, max_active, fmt, ##args) + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) +typedef u32 netdev_features_t; +#endif +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) +#define netdev_completed_queue(_n, _p, _b) do {} while (0) +#define netdev_tx_sent_queue(_q, _b) do {} while (0) +#define netdev_sent_queue(_n, _b) do {} while (0) +#define netdev_tx_reset_queue(_q) do {} while (0) +#define netdev_reset_queue(_n) do {} while (0) +#endif +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#endif /* SLE_VERSION(11,3,0) */ +#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, + u8 *nexthdrp, + __be16 __always_unused *frag_offp) +{ + return ipv6_skip_exthdr(skb, start, nexthdrp); +} +#undef ipv6_skip_exthdr +#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) +#endif /* !SLES11sp4 or greater */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} +#endif + +#else /* ! < 3.3.0 */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define NUMTCS_RETURNS_U8 + +int _kc_simple_open(struct inode *inode, struct file *file); +#define simple_open _kc_simple_open +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +void _kc_skb_add_rx_frag(struct sk_buff * skb, int i, struct page *page, + int off, int size, unsigned int truesize); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) do { \ + eth_random_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ + +#ifndef for_each_set_bit_from +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit_from */ + +#else /* < 3.4.0 */ +#include +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \ + ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) +#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#define HAVE_PTP_1588_CLOCK +#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ +#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) + +#ifndef SIZE_MAX +#define SIZE_MAX (~(size_t)0) +#endif + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#ifndef ether_addr_equal +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) +#endif + +/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ +#ifdef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif +#else +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* < 3.5.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#endif +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#endif +#ifndef MDIO_EEE_10GT +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#endif +#ifndef MDIO_EEE_1000KX +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#endif +#ifndef MDIO_EEE_10GKX4 +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#endif +#ifndef MDIO_EEE_10GKR +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#endif + +#ifndef __GFP_MEMALLOC +#define __GFP_MEMALLOC 0 +#endif + +#ifndef eth_broadcast_addr +#define eth_broadcast_addr _kc_eth_broadcast_addr +static inline void _kc_eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, ETH_ALEN); +} +#endif + +#ifndef eth_random_addr +#define eth_random_addr _kc_eth_random_addr +static inline void _kc_eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#endif /* eth_random_addr */ + +#ifndef DMA_ATTR_SKIP_CPU_SYNC +#define DMA_ATTR_SKIP_CPU_SYNC 0 +#endif +#else /* < 3.6.0 */ +#define HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* < 3.6.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +#include +#ifndef ADVERTISED_40000baseKR4_Full +/* these defines were all added in one commit, so should be safe + * to trigger activiation on one define + */ +#define SUPPORTED_40000baseKR4_Full BIT(23) +#define SUPPORTED_40000baseCR4_Full BIT(24) +#define SUPPORTED_40000baseSR4_Full BIT(25) +#define SUPPORTED_40000baseLR4_Full BIT(26) +#define ADVERTISED_40000baseKR4_Full BIT(23) +#define ADVERTISED_40000baseCR4_Full BIT(24) +#define ADVERTISED_40000baseSR4_Full BIT(25) +#define ADVERTISED_40000baseLR4_Full BIT(26) +#endif + +#ifndef mmd_eee_cap_to_ethtool_sup_t +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} +#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ + __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) +#endif /* mmd_eee_cap_to_ethtool_sup_t */ + +#ifndef mmd_eee_adv_to_ethtool_adv_t +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisement (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} + +#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ + __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) +#endif /* mmd_eee_adv_to_ethtool_adv_t */ + +#ifndef ethtool_adv_to_mmd_eee_adv_t +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} +#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) +#endif /* ethtool_adv_to_mmd_eee_adv_t */ + +#ifndef pci_pcie_type +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +static inline u8 pci_pcie_type(struct pci_dev *pdev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + BUG_ON(!pos); + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); + return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; +} +#else /* < 2.6.24 */ +#define pci_pcie_type(x) (x)->pcie_type +#endif /* < 2.6.24 */ +#endif /* pci_pcie_type */ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) +#define ptp_clock_register(caps, args...) ptp_clock_register(caps) +#endif + +#ifndef pcie_capability_read_word +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) +#endif /* pcie_capability_read_word */ + +#ifndef pcie_capability_read_dword +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); +#define pcie_capability_read_dword(d,p,v) __kc_pcie_capability_read_dword(d,p,v) +#endif + +#ifndef pcie_capability_write_word +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) +#endif /* pcie_capability_write_word */ + +#ifndef pcie_capability_clear_and_set_word +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +#define pcie_capability_clear_and_set_word(d,p,c,s) \ + __kc_pcie_capability_clear_and_set_word(d,p,c,s) +#endif /* pcie_capability_clear_and_set_word */ + +#ifndef pcie_capability_clear_word +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear); +#define pcie_capability_clear_word(d, p, c) \ + __kc_pcie_capability_clear_word(d, p, c) +#endif /* pcie_capability_clear_word */ + +#ifndef PCI_EXP_LNKSTA2 +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8)) +#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) +#endif /* !RHEL6.8+ */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) +#include +#else + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} +#endif /* RHEL >= 6.6 */ + +/* We don't have @flags support prior to 3.7, so we'll simply ignore the flags + * parameter on these older kernels. + */ +#define __setup_timer(_timer, _fn, _data, _flags) \ + setup_timer((_timer), (_fn), (_data)) \ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(12,0,0) ) ) + +#ifndef mod_delayed_work +/** + * __mod_delayed_work - modify delay or queue delayed work + * @wq: workqueue to use + * @dwork: delayed work to queue + * @delay: number of jiffies to wait before queueing + * + * Return: %true if @dwork was pending and was rescheduled; + * %false if it wasn't pending + * + * Note: the dwork parameter was declared as a void* + * to avoid comptibility problems with early 2.6 kernels + * where struct delayed_work is not declared. Unlike the original + * implementation flags are not preserved and it shouldn't be + * used in the interrupt context. + */ +static inline bool __mod_delayed_work(struct workqueue_struct *wq, + void *dwork, + unsigned long delay) +{ + bool ret = cancel_delayed_work(dwork); + queue_delayed_work(wq, dwork, delay); + return ret; +} +#define mod_delayed_work(wq, dwork, delay) __mod_delayed_work(wq, dwork, delay) +#endif /* mod_delayed_work */ + +#endif /* !(RHEL >= 6.7) && !(SLE >= 12.0) */ +#else /* >= 3.7.0 */ +#include +#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif /* >= 3.7.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#ifndef pci_sriov_set_totalvfs +static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) +{ + return 0; +} +#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) +#endif +#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */ +#ifndef PCI_EXP_LNKCTL_ASPM_L0S +#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ +#endif +#ifndef PCI_EXP_LNKCTL_ASPM_L1 +#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ +#endif +#define HAVE_CONFIG_HOTPLUG +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { + 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; + +#ifndef is_link_local_ether_addr +static inline bool __kc_is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +} +#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) +#endif /* is_link_local_ether_addr */ + +#ifndef FLOW_MAC_EXT +#define FLOW_MAC_EXT 0x40000000 +#endif /* FLOW_MAC_EXT */ + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#define HAVE_SRIOV_CONFIGURE +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_2_5GB +#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_5_0GB +#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ +#endif + +#undef PCI_EXP_LNKCAP2_SLS_2_5GB +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_5_0GB +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_8_0GB +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ + +#else /* >= 3.8.0 */ +#ifndef __devinit +#define __devinit +#endif + +#ifndef __devinitdata +#define __devinitdata +#endif + +#ifndef __devinitconst +#define __devinitconst +#endif + +#ifndef __devexit +#define __devexit +#endif + +#ifndef __devexit_p +#define __devexit_p +#endif + +#ifndef HAVE_ENCAP_CSUM_OFFLOAD +#define HAVE_ENCAP_CSUM_OFFLOAD +#endif + +#ifndef HAVE_GRE_ENCAP_OFFLOAD +#define HAVE_GRE_ENCAP_OFFLOAD +#endif + +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +#define HAVE_BRIDGE_ATTRIBS +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ +#endif /* BRIDGE_MODE_VEB */ +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#endif /* BRIDGE_MODE_VEPA */ +#endif /* >= 3.8.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) + +#undef BUILD_BUG_ON +#ifdef __CHECKER__ +#define BUILD_BUG_ON(condition) (0) +#else /* __CHECKER__ */ +#ifndef __compiletime_warning +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_warning(message) __attribute__((warning(message))) +#else /* __GNUC__ */ +#define __compiletime_warning(message) +#endif /* __GNUC__ */ +#endif /* __compiletime_warning */ +#ifndef __compiletime_error +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_error(message) __attribute__((error(message))) +#define __compiletime_error_fallback(condition) do { } while (0) +#else /* __GNUC__ */ +#define __compiletime_error(message) +#define __compiletime_error_fallback(condition) \ + do { ((void)sizeof(char[1 - 2 * condition])); } while (0) +#endif /* __GNUC__ */ +#else /* __compiletime_error */ +#define __compiletime_error_fallback(condition) do { } while (0) +#endif /* __compiletime_error */ +#define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + bool __cond = !(condition); \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (__cond) \ + prefix ## suffix(); \ + __compiletime_error_fallback(__cond); \ + } while (0) + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else /* __OPTIMIZE__ */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* __OPTIMIZE__ */ +#endif /* __CHECKER__ */ + +#undef hlist_entry +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#undef hlist_entry_safe +#define hlist_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) + +#undef hlist_for_each_entry +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#undef hlist_for_each_entry_continue +#define hlist_for_each_entry_continue(pos, member) \ + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_from +#define hlist_for_each_entry_from(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hash_for_each +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry(obj, &name[bkt], member) + +#undef hash_for_each_safe +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +#undef hash_for_each_possible +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +#undef hash_for_each_possible_safe +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe(obj, tmp,\ + &name[hash_min(key, HASH_BITS(name))], member) + +#ifdef CONFIG_XPS +int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16); +#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) +#else /* CONFIG_XPS */ +#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) +#endif /* CONFIG_XPS */ + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define _kc_hashrnd 0xd631614b /* not so random hash salt */ +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +#define __netdev_pick_tx __kc_netdev_pick_tx +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else +#define HAVE_BRIDGE_FILTER +#define HAVE_FDB_DEL_NLATTR +#endif /* < 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef list_first_entry_or_null +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + return skb; +} +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#endif + +#ifdef HAVE_FDB_OPS +#if defined(HAVE_NDO_FDB_ADD_NLATTR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 flags); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr, u16 flags); +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags); +#endif /* HAVE_NDO_FDB_ADD_NLATTR */ +#if defined(HAVE_FDB_DEL_NLATTR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr); +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr); +#endif /* HAVE_FDB_DEL_NLATTR */ +#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add +#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del +#endif /* HAVE_FDB_OPS */ + +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif + +/* The definitions for these functions when CONFIG_OF_NET is defined are + * pulled in from . For kernels older than 3.5 we already have + * backports for when CONFIG_OF_NET is true. These are separated and + * duplicated in order to cover all cases so that all kernels get either the + * real definitions (when CONFIG_OF_NET is defined) or the stub definitions + * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real + * definitions). + */ +#ifndef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif + +#else /* >= 3.10.0 */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define USE_DEFAULT_FDB_DEL_DUMP +#define HAVE_SKB_INNER_NETWORK_HEADER + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0))) +#define HAVE_RHEL7_PCI_DRIVER_RH +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_RHEL7_PCI_RESET_NOTIFY +#endif /* RHEL >= 7.2 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define HAVE_GENEVE_RX_OFFLOAD +#endif /* RHEL >=7.3 && RHEL < 7.5 */ +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_RHEL7_NET_DEVICE_OPS_EXT +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* RHEL >= 7.3 */ + +/* new hooks added to net_device_ops_extended in RHEL7.4 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif /* RHEL >= 7.4 */ + +#endif /* RHEL >= 7.0 && RHEL < 8.0 */ + +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#endif /* RHEL >= 8.0 */ +#endif /* >= 3.10.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ) +#define netdev_notifier_info_to_dev(ptr) ptr +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#define HAVE_NDO_SET_VF_LINK_STATE +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#else /* >= 3.11.0 */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES +#endif /* >= 3.11.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width); +#ifndef pcie_get_minimum_link +#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) +#endif +#else /* >= 3.12.0 */ +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_VXLAN_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NDO_GET_PHYS_PORT_ID +#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK +#endif /* >= 3.12.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) +#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) +int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); +#ifndef u64_stats_init +#define u64_stats_init(a) do { } while(0) +#endif +#undef BIT_ULL +#define BIT_ULL(n) (1ULL << (n)) + +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) +{ + dev = pci_physfn(dev); + if (pci_is_root_bus(dev->bus)) + return NULL; + + return dev->bus->self; +} +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)) +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#endif +#ifndef list_next_entry +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif +#ifndef list_prev_entry +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20) ) +#define devm_kcalloc(dev, cnt, size, flags) \ + devm_kzalloc(dev, (cnt) * (size), flags) +#endif /* > 2.6.20 */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member) +#endif + +#else /* >= 3.13.0 */ +#define HAVE_VXLAN_CHECKS +#define HAVE_NDO_SELECT_QUEUE_ACCEL +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS +#endif + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) + +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#define dev_consume_skb_any(x) dev_kfree_skb_any(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) + +/* it isn't expected that this would be a #define unless we made it so */ +#ifndef skb_set_hash + +#define PKT_HASH_TYPE_NONE 0 +#define PKT_HASH_TYPE_L2 1 +#define PKT_HASH_TYPE_L3 2 +#define PKT_HASH_TYPE_L4 3 + +enum _kc_pkt_hash_types { + _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, + _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, + _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, + _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, +}; +#define pkt_hash_types _kc_pkt_hash_types + +#define skb_set_hash __kc_skb_set_hash +static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, + u32 __maybe_unused hash, + int __maybe_unused type) +{ +#ifdef HAVE_SKB_L4_RXHASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif +#ifdef NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} +#endif /* !skb_set_hash */ + +#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#ifndef HAVE_VXLAN_RX_OFFLOAD +#define HAVE_VXLAN_RX_OFFLOAD +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#endif + +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif + +#ifndef HAVE_VXLAN_CHECKS +#define HAVE_VXLAN_CHECKS +#endif /* HAVE_VXLAN_CHECKS */ +#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) +#define HAVE_NDO_DFWD_OPS +#endif + +#ifndef pci_enable_msix_range +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec); +#define pci_enable_msix_range __kc_pci_enable_msix_range +#endif + +#ifndef ether_addr_copy +#define ether_addr_copy __kc_ether_addr_copy +static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +#endif +} +#endif /* ether_addr_copy */ +int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags); +#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) + +#ifndef OPTIMIZE_HIDE_VAR +#ifdef __GNUC__ +#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) +#else +#include +#define OPTIMIZE_HIDE_VAR(var) barrier() +#endif +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,4,0))) +static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) +{ +#ifdef NETIF_F_RXHASH + return skb->rxhash; +#else + return 0; +#endif /* NETIF_F_RXHASH */ +} +#endif /* !RHEL > 5.9 && !SLES >= 10.4 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define request_firmware_direct request_firmware +#endif /* !RHEL || RHEL < 7.5 */ + +#else /* >= 3.14.0 */ + +/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ +#ifndef HAVE_NDO_DFWD_OPS +#define HAVE_NDO_DFWD_OPS +#endif +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif /* 3.14.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))) +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh +#endif + +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp) + +#else +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_PTP_1588_CLOCK_PINS +#define HAVE_NETDEV_PORT +#endif /* 3.15.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif +#ifndef __dev_uc_sync +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#endif /* HAVE_SET_RX_MODE */ + +static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_UNICAST + return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, + dev, sync, unsync); +#else + return 0; +#endif +} +#define __dev_uc_sync __kc_dev_uc_sync + +static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST + __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_uc_unsync __kc_dev_uc_unsync + +static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, + dev, sync, unsync); +#else + return 0; +#endif + +} +#define __dev_mc_sync __kc_dev_mc_sync + +static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_MULTICAST + __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_mc_unsync __kc_dev_mc_unsync +#endif /* __dev_uc_sync */ + +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +/* if someone backports this, hopefully they backport as a #define. + * declare it as zero on older kernels so that if it get's or'd in + * it won't effect anything, therefore preventing core driver changes + */ +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); +#define devm_kmemdup __kc_devm_kmemdup + +#else +#if ( ( LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0) ) && \ + ! ( SLE_VERSION_CODE && ( SLE_VERSION_CODE >= SLE_VERSION(12,4,0)) ) ) +#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY +#endif /* >= 3.16.0 && < 4.13.0 && !(SLES >= 12sp4) */ +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif /* 3.16.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#ifndef timespec64 +#define timespec64 timespec +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} +#define timespec64_equal timespec_equal +#define timespec64_compare timespec_compare +#define set_normalized_timespec64 set_normalized_timespec +#define timespec64_add_safe timespec_add_safe +#define timespec64_add timespec_add +#define timespec64_sub timespec_sub +#define timespec64_valid timespec_valid +#define timespec64_valid_strict timespec_valid_strict +#define timespec64_to_ns timespec_to_ns +#define ns_to_timespec64 ns_to_timespec +#define ktime_to_timespec64 ktime_to_timespec +#define ktime_get_ts64 ktime_get_ts +#define ktime_get_real_ts64 ktime_get_real_ts +#define timespec64_add_ns timespec_add_ns +#endif /* timespec64 */ +#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(7,4))) +#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a) +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3))) +static inline u64 ktime_get_ns(void) +{ + return ktime_to_ns(ktime_get()); +} + +static inline u64 ktime_get_real_ns(void) +{ + return ktime_to_ns(ktime_get_real()); +} + +static inline u64 ktime_get_boot_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} +#endif /* RHEL < 7.3 */ + +#else +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#include +#endif /* 3.17.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +#include +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); +#define skb_clone_sk __kc_skb_clone_sk +#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp +#endif +u32 __kc_eth_get_headlen(const struct net_device *dev, unsigned char *data, + unsigned int max_len); +#define eth_get_headlen __kc_eth_get_headlen +#ifndef ETH_P_XDSA +#define ETH_P_XDSA 0x00F8 +#endif +/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) +#define HAVE_SKBUFF_CSUM_LEVEL +#endif /* >= RH 7.1 */ + +/* RHEL 7.3 backported xmit_more */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_SKB_XMIT_MORE +#endif /* >= RH 7.3 */ + +#undef GENMASK +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#undef GENMASK_ULL +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#else /* 3.18.0 */ +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_SKB_XMIT_MORE +#define HAVE_SKB_INNER_PROTOCOL_TYPE +#endif /* 3.18.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) ) +#else +#define HAVE_NDO_FEATURES_CHECK +#endif /* 3.18.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,13) ) +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); }) +#endif +#endif /* 3.18.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +/* netdev_phys_port_id renamed to netdev_phys_item_id */ +#define netdev_phys_item_id netdev_phys_port_id + +static inline void _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) { + napi_complete(napi); +} +/* don't use our backport if the distro kernels already have it */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#define napi_complete_done _kc_napi_complete_done +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); +#define bitmap_print_to_pagebuf _kc_bitmap_print_to_pagebuf + +#ifndef NETDEV_RSS_KEY_LEN +#define NETDEV_RSS_KEY_LEN (13 * 4) +#endif +#if (!(RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))))) +#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) +#endif /* RHEL_RELEASE_CODE */ +void __kc_netdev_rss_key_fill(void *buffer, size_t len); +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif +#ifndef dev_alloc_pages +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE -1 +#endif +#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) +#endif +#ifndef dev_alloc_page +#define dev_alloc_page() dev_alloc_pages(0) +#endif +#if !defined(eth_skb_pad) && !defined(skb_put_padto) +/** + * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} +#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) + +static inline int __kc_eth_skb_pad(struct sk_buff *skb) +{ + return __kc_skb_put_padto(skb, ETH_ZLEN); +} +#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) +#endif /* eth_skb_pad && skb_put_padto */ + +#ifndef SKB_ALLOC_NAPI +/* RHEL 7.2 backported napi_alloc_skb and friends */ +static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) +{ + return netdev_alloc_skb_ip_align(napi->dev, length); +} +#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len) +#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len) +#endif /* SKB_ALLOC_NAPI */ +#define HAVE_CONFIG_PM_RUNTIME +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RXFH_HASHFUNC +#endif /* 6.7 < RHEL < 7.0 */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* RHEL > 7.1 */ +#ifndef napi_schedule_irqoff +#define napi_schedule_irqoff napi_schedule +#endif +#ifndef READ_ONCE +#define READ_ONCE(_x) ACCESS_ONCE(_x) +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_FDB_ADD_VID +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif +#ifndef writel_relaxed +#define writel_relaxed writel +#endif +#else /* 3.19.0 */ +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* 3.19.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) ) +/* vlan_tx_xx functions got renamed to skb_vlan */ +#ifndef skb_vlan_tag_get +#define skb_vlan_tag_get vlan_tx_tag_get +#endif +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present vlan_tx_tag_present +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif +#else +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif /* 3.20.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) ) +/* Definition for CONFIG_OF was introduced earlier */ +#if !defined(CONFIG_OF) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; } +#else /* !CONFIG_OF && RHEL < 7.3 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* !CONFIG_OF && RHEL < 7.3 */ +#else /* < 4.0 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* < 4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#ifndef NO_PTP_SUPPORT +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#else +#include +#endif +static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) +{ + tc->nsec += delta; +} + +static inline struct net_device * +of_find_net_device_by_node(struct device_node __always_unused *np) +{ + return NULL; +} + +#define timecounter_adjtime __kc_timecounter_adjtime +#endif +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))) +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#endif +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node); +#define cpumask_local_spread _kc_cpumask_local_spread +#endif +#else /* >= 4,1,0 */ +#define HAVE_NDO_GET_PHYS_PORT_NAME +#define HAVE_PTP_CLOCK_INFO_GETTIME64 +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_PASSTHRU_FEATURES_CHECK +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#define HAVE_NDO_SET_TX_MAXRATE +#endif /* 4,1,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9)) +#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,3,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0,47,71))) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,4,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65,0,0))) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) +{ +#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC + return page->pfmemalloc; +#else + return false; +#endif +} +#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */ +#else +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* 4.1.9 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; +#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return bus->self && bus->self->ari_enabled; +} +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_VF_STATS +#endif /* (RHEL7.2+) */ +#endif /* !(RHEL6.8+ || RHEL7.2+) */ +#else +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return false; +} +#endif /* 2.6.27 */ +#else +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#define HAVE_VF_STATS +#endif /* 4.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_NDO_SET_VF_TRUST +#endif /* (RHEL_RELEASE >= 7.3) */ +#ifndef CONFIG_64BIT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#include /* 32-bit readq/writeq */ +#else /* 3.3.0 => 4.3.x */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +#include +#endif /* 2.6.26 => 3.3.0 */ +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#define readq readq +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#define writeq writeq +#endif +#endif /* < 3.3.0 */ +#endif /* !CONFIG_64BIT */ +#else /* < 4.4.0 */ +#define HAVE_NDO_SET_VF_TRUST + +#ifndef CONFIG_64BIT +#include /* 32-bit readq/writeq */ +#endif /* !CONFIG_64BIT */ +#endif /* 4.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) +/* protect against a likely backport */ +#ifndef NETIF_F_CSUM_MASK +#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM +#endif /* NETIF_F_CSUM_MASK */ +#ifndef NETIF_F_SCTP_CRC +#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM +#endif /* NETIF_F_SCTP_CRC */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused); +#endif /* !(RHEL_RELEASE >= 7.3) */ +#else /* 4.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_GENEVE_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#endif /* 4.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,3)) +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) + return skb->head + skb->csum_start; +#else /* < 2.6.22 */ + return skb_transport_header(skb); +#endif +} +#endif + +#if !(UBUNTU_VERSION_CODE && \ + UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,0)) && \ + !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +static inline void napi_consume_skb(struct sk_buff *skb, + int __always_unused budget) +{ + dev_consume_skb_any(skb); +} + +#endif /* RHEL 7.2, SLES12 SP3 */ +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} +#endif +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) +static inline void page_ref_inc(struct page *page) +{ + get_page(page); +} +#else +#define HAVE_PAGE_COUNT_BULK_UPDATE +#endif +#ifndef IPV4_USER_FLOW +#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#endif + +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_FLOWER_ENC +#endif + +#if (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)) +#define HAVE_TC_SETUP_CLSFLOWER +#endif + +#else /* >= 4.6.0 */ +#define HAVE_PAGE_COUNT_BULK_UPDATE +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_PTP_CROSSTIMESTAMP +#define HAVE_TC_SETUP_CLSFLOWER +#endif /* 4.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))) +#define HAVE_NETIF_TRANS_UPDATE +#endif /* SLES12sp3+ || RHEL7.4+ */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_DEVLINK_SUPPORT +#endif /* RHEL 7.4, SLES12 SP3 */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#endif /* RHEL7.3+ || SLES12sp3+ */ +#else /* 4.7.0 */ +#define HAVE_DEVLINK_SUPPORT +#define HAVE_NETIF_TRANS_UPDATE +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#define HAVE_TCF_MIRRED_REDIRECT +#endif /* 4.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN, + UDP_TUNNEL_TYPE_GENEVE, +}; +struct udp_tunnel_info { + unsigned short type; + sa_family_t sa_family; + __be16 port; +}; +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) +#define HAVE_TCF_EXTS_TO_LIST +#define HAVE_PCI_IRQ_API +#endif + +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) &&\ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_io_regions(struct pci_dev *pdev, char *name) +#else +pci_request_io_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO), name); +} + +static inline void +pci_release_io_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO)); +} + +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_mem_regions(struct pci_dev *pdev, char *name) +#else +pci_request_mem_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), name); +} + +static inline void +pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +} +#endif /* !SLE_VERSION(12,3,0) */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* RHEL7.4+ || SLES12sp3+ */ +#else +#define HAVE_UDP_ENC_RX_OFFLOAD +#define HAVE_TCF_EXTS_TO_LIST +#define HAVE_ETHTOOL_NEW_50G_BITS +#define HAVE_PCI_IRQ_API +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#ifdef HAVE_TC_SETUP_CLSFLOWER +#if (!(RHEL_RELEASE_CODE) && !(SLE_VERSION_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0)))) +#define HAVE_TC_FLOWER_VLAN_IN_TAGS +#endif /* !RHEL_RELEASE_CODE && !SLE_VERSION_CODE || = RHEL_RELEASE_VERSION(7,4)) +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* RHEL7.4+ */ +#else /* >=4.9 */ +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* KERNEL_VERSION(4.9.0) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +/* SLES 12.3 and RHEL 7.5 backported this interface */ +#if (!SLE_VERSION_CODE && !RHEL_RELEASE_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +static inline bool _kc_napi_complete_done2(struct napi_struct *napi, + int __always_unused work_done) +{ + /* it was really hard to get napi_complete_done to be safe to call + * recursively without running into our own kcompat, so just use + * napi_complete + */ + napi_complete(napi); + + /* true means that the stack is telling the driver to go-ahead and + * re-enable interrupts + */ + return true; +} + +#ifdef napi_complete_done +#undef napi_complete_done +#endif +#define napi_complete_done _kc_napi_complete_done2 +#endif /* sles and rhel exclusion for < 4.10 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +#endif /* RHEL7.4+ */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12,3,0))) +#define HAVE_STRUCT_DMA_ATTRS +#endif /* (SLES == 12.3.0) */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif /* (SLES >= 12.3.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_STRUCT_DMA_ATTRS +#define HAVE_RHEL7_EXTENDED_MIN_MAX_MTU +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#ifndef dma_map_page_attrs +#define dma_map_page_attrs __kc_dma_map_page_attrs +static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#endif + +#ifndef dma_unmap_page_attrs +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs +static inline void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#endif + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + BUG_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#endif /* !SLE_VERSION(12,3,0) && !RHEL_VERSION(7,5) */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#endif + +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(15,0,0))) ||\ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,4)))) +#define page_frag_free __free_page_frag +#endif +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif /* ETH_MIN_MTU */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_PTP_ADJFINE +#endif +#else /* >= 4.10 */ +#define HAVE_TC_FLOWER_ENC +#define HAVE_NETDEVICE_MIN_MAX_MTU +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#define HAVE_NETDEV_TC_RESETS_XPS +#define HAVE_XPS_QOS_SUPPORT +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +#define HAVE_PTP_ADJFINE +/* kernel 4.10 onwards, as part of busy_poll rewrite, new state were added + * which is part of NAPI:state. If NAPI:state=NAPI_STATE_IN_BUSY_POLL, + * it means napi_poll is invoked in busy_poll context + */ +#define HAVE_NAPI_STATE_IN_BUSY_POLL +#define HAVE_TCF_MIRRED_EGRESS_REDIRECT +#endif /* 4.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#ifdef CONFIG_NET_RX_BUSY_POLL +#define HAVE_NDO_BUSY_POLL +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#define HAVE_VOID_NDO_GET_STATS64 +#endif /* (SLES >= 12.3.0) && (RHEL >= 7.5) */ +#else /* > 4.11 */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA +#endif /* 4.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_TCF_EXTS_HAS_ACTION +#endif +#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#endif /* SLES >= 12sp4 */ +#else /* > 4.13 */ +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_CHAIN_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK +#endif /* 4.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef ethtool_link_ksettings_del_link_mode +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ + __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) +#endif +#endif /* ETHTOOL_GLINKSETTINGS */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC +#endif + +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) + +#ifndef xdp_do_flush_map +#define xdp_do_flush_map() do {} while (0) +#endif +struct _kc_xdp_buff { + void *data; + void *data_end; + void *data_hard_start; +}; +#define xdp_buff _kc_xdp_buff +struct _kc_bpf_prog { +}; +#define bpf_prog _kc_bpf_prog +#else /* > 4.14 */ +#define HAVE_XDP_SUPPORT +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_TCF_EXTS_HAS_ACTION +#endif /* 4.14.0 */ + +/*****************************************************************************/ +#ifndef ETHTOOL_GLINKSETTINGS + +#define __ETHTOOL_LINK_MODE_MASK_NBITS 32 +#define ETHTOOL_LINK_MASK_SIZE BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS) + +/** + * struct ethtool_link_ksettings + * @link_modes: supported and advertising, single item arrays + * @link_modes.supported: bitmask of supported link speeds + * @link_modes.advertising: bitmask of currently advertised speeds + * @base: base link details + * @base.speed: current link speed + * @base.port: current port type + * @base.duplex: current duplex mode + * @base.autoneg: current autonegotiation settings + * + * This struct and the following macros provide a way to support the old + * ethtool get/set_settings API on older kernels, but in the style of the new + * GLINKSETTINGS API. In this way, the same code can be used to support both + * APIs as seemlessly as possible. + * + * It should be noted the old API only has support up to the first 32 bits. + */ +struct ethtool_link_ksettings { + struct { + u32 speed; + u8 port; + u8 duplex; + u8 autoneg; + } base; + struct { + unsigned long supported[ETHTOOL_LINK_MASK_SIZE]; + unsigned long advertising[ETHTOOL_LINK_MASK_SIZE]; + } link_modes; +}; + +#define ETHTOOL_LINK_NAME_advertising(mode) ADVERTISED_ ## mode +#define ETHTOOL_LINK_NAME_supported(mode) SUPPORTED_ ## mode +#define ETHTOOL_LINK_NAME(name) ETHTOOL_LINK_NAME_ ## name +#define ETHTOOL_LINK_CONVERT(name, mode) ETHTOOL_LINK_NAME(name)(mode) + +/** + * ethtool_link_ksettings_zero_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + */ +#define ethtool_link_ksettings_zero_link_mode(ptr, name)\ + (*((ptr)->link_modes.name) = 0x0) + +/** + * ethtool_link_ksettings_add_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_add_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) |= (typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_del_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to delete + */ +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) &= ~(typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_test_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_test_link_mode(ptr, name, mode)\ + (!!(*((ptr)->link_modes.name) & ETHTOOL_LINK_CONVERT(name, mode))) + +/** + * _kc_ethtool_ksettings_to_cmd - Convert ethtool_link_ksettings to ethtool_cmd + * @ks: ethtool_link_ksettings struct + * @cmd: ethtool_cmd struct + * + * Convert an ethtool_link_ksettings structure into the older ethtool_cmd + * structure. We provide this in kcompat.h so that drivers can easily + * implement the older .{get|set}_settings as wrappers around the new api. + * Hence, we keep it prefixed with _kc_ to make it clear this isn't actually + * a real function in the kernel. + */ +static inline void +_kc_ethtool_ksettings_to_cmd(struct ethtool_link_ksettings *ks, + struct ethtool_cmd *cmd) +{ + cmd->supported = (u32)ks->link_modes.supported[0]; + cmd->advertising = (u32)ks->link_modes.advertising[0]; + ethtool_cmd_speed_set(cmd, ks->base.speed); + cmd->duplex = ks->base.duplex; + cmd->autoneg = ks->base.autoneg; + cmd->port = ks->base.port; +} + +#endif /* !ETHTOOL_GLINKSETTINGS */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +#define phy_speed_to_str _kc_phy_speed_to_str +const char *_kc_phy_speed_to_str(int speed); +#else /* (LINUX >= 4.14.0) || (SLES > 12.3.0) || (RHEL > 7.5) */ +#include +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) +#define HAVE_TCF_BLOCK +#endif +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src); +#define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks +#else /* >= 4.15 */ +#define HAVE_NDO_BPF +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +/* The return value of the strscpy() and strlcpy() functions is different. + * This could be potentially hazard for the future. + * To avoid this the void result is forced. + * So it is not possible use this function with the return value. + * Return value is required in kernel 4.3 through 4.15 + */ +#define strscpy(...) (void)(strlcpy(__VA_ARGS__)) +#endif /* !RHEL >= 7.7 && !SLES12sp4+ && !SLES15sp1+ */ + +#define pci_printk(level, pdev, fmt, arg...) \ + dev_printk(level, &(pdev)->dev, fmt, ##arg) +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) + +#ifndef array_index_nospec +static inline unsigned long _kc_array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} + +#define array_index_nospec(index, size) \ +({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = _kc_array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + (typeof(_i)) (_i & _mask); \ +}) +#endif /* array_index_nospec */ +#ifndef DEFINE_SHOW_ATTRIBUTE +#define DEFINE_SHOW_ATTRIBUTE(name) \ +static int name##_open(struct inode *inode, struct file *f) \ +{ \ + return single_open(f, name##_show, inode->i_private); \ +} \ + \ +static const struct file_operations name##_fops = { \ + .owner = THIS_MODULE, \ + .open = name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} +#endif +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include +static inline bool +tc_cls_can_offload_and_chain0(const struct net_device *dev, + struct tc_cls_common_offload *common) +{ + if (!tc_can_offload(dev)) + return false; + if (common->chain_index) + return false; + + return true; +} +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ + +#ifndef netdev_level_once +#define netdev_level_once(level, dev, fmt, ...) \ +do { \ + static bool __print_once __read_mostly; \ + \ + if (!__print_once) { \ + __print_once = true; \ + netdev_printk(level, dev, fmt, ##__VA_ARGS__); \ + } \ +} while (0) + +#define netdev_emerg_once(dev, fmt, ...) \ + netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__) +#define netdev_alert_once(dev, fmt, ...) \ + netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__) +#define netdev_crit_once(dev, fmt, ...) \ + netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__) +#define netdev_err_once(dev, fmt, ...) \ + netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__) +#define netdev_warn_once(dev, fmt, ...) \ + netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__) +#define netdev_notice_once(dev, fmt, ...) \ + netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__) +#define netdev_info_once(dev, fmt, ...) \ + netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__) +#endif /* netdev_level_once */ + +#ifndef sizeof_field +#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER))) +#endif /* sizeof_field */ +#else /* >= 4.16 */ +#include +#define HAVE_XDP_BUFF_RXQ +#define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK +#define HAVE_TCF_MIRRED_DEV +#define HAVE_VF_STATS_DROPPED +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +#include +#include +#define PCIE_SPEED_16_0GT 0x17 +#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ +#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */ +#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ +void _kc_pcie_print_link_status(struct pci_dev *dev); +#define pcie_print_link_status _kc_pcie_print_link_status +#else /* >= 4.17.0 */ +#define HAVE_XDP_BUFF_IN_XDP_H +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) || (RHEL_RELEASE_CODE /* not in rhel yet */) +#ifndef atomic_long_try_cmpxchg_relaxed +#ifdef CONFIG_64BIT +#define ATOMIC_LONG_TYPE long int +#ifndef ATOMIC_LONG_PFX +#define ATOMIC_LONG_PFX(x) atomic64 ## x +#endif +#ifndef atomic64_cmpxchg_relaxed +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg +#define atomic64_cmpxchg_acquire atomic64_cmpxchg +#define atomic64_cmpxchg_release atomic64_cmpxchg +#endif +#else +#define ATOMIC_LONG_TYPE int +#ifndef ATOMIC_LONG_PFX +#define ATOMIC_LONG_PFX(x) atomic ## x +#endif +#ifndef atomic_cmpxchg_relaxed +#define atomic_cmpxchg_relaxed atomic_cmpxchg +#define atomic_cmpxchg_acquire atomic_cmpxchg +#define atomic_cmpxchg_release atomic_cmpxchg +#endif +#endif +#define __al_try_cmpxchg(type, _p, _po, _n) \ +({ \ + ATOMIC_LONG_TYPE *__po = (_po); \ + ATOMIC_LONG_TYPE __r, __o = *(ATOMIC_LONG_TYPE *)(__po); \ + __r = ATOMIC_LONG_PFX(_cmpxchg##type)((_p), __o, (_n)); \ + if (unlikely(__r != __o)) \ + *(ATOMIC_LONG_TYPE *)__po = __r; \ + likely(__r == __o); \ +}) +#define atomic_long_try_cmpxchg(l, old, new) __al_try_cmpxchg(, l, old, new) +#define atomic_long_try_cmpxchg_relaxed(l, old, new) __al_try_cmpxchg(_relaxed, l, old, new) +#define atomic_long_try_cmpxchg_acquire(l, old, new) __al_try_cmpxchg(_acquire, l, old, new) +#define atomic_long_try_cmpxchg_release(l, old, new) __al_try_cmpxchg(_release, l, old, new) +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#ifdef NETIF_F_HW_L2FW_DOFFLOAD +#include +#ifndef macvlan_supports_dest_filter +#define macvlan_supports_dest_filter _kc_macvlan_supports_dest_filter +static inline bool _kc_macvlan_supports_dest_filter(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PRIVATE || + macvlan->mode == MACVLAN_MODE_VEPA || + macvlan->mode == MACVLAN_MODE_BRIDGE; +} +#endif + +#if (!SLE_VERSION_CODE || (SLE_VERSION_CODE < SLE_VERSION(15,1,0))) +#ifndef macvlan_accel_priv +#define macvlan_accel_priv _kc_macvlan_accel_priv +static inline void *_kc_macvlan_accel_priv(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->fwd_priv; +} +#endif + +#ifndef macvlan_release_l2fw_offload +#define macvlan_release_l2fw_offload _kc_macvlan_release_l2fw_offload +static inline int _kc_macvlan_release_l2fw_offload(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + macvlan->fwd_priv = NULL; + return dev_uc_add(macvlan->lowerdev, dev->dev_addr); +} +#endif +#endif /* !SLES || SLES < 15.1 */ +#endif /* NETIF_F_HW_L2FW_DOFFLOAD */ +#include "kcompat_overflow.h" + +#if (SLE_VERSION_CODE < SLE_VERSION(15,1,0)) +#define firmware_request_nowarn request_firmware_direct + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,7))) +#if IS_ENABLED(CONFIG_NET_DEVLINK) +#include + +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL, + DEVLINK_PORT_FLAVOUR_CPU, + DEVLINK_PORT_FLAVOUR_DSA, + DEVLINK_PORT_FLAVOUR_PCI_PF, + DEVLINK_PORT_FLAVOUR_PCI_VF, +}; + +#endif /* CONFIG_NET_DEVLINK */ +#endif /* +#include +#define HAVE_XDP_FRAME_STRUCT +#define HAVE_XDP_SOCK +#define HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +#define NO_NDO_XDP_FLUSH +#define HAVE_AF_XDP_SUPPORT +#endif /* 4.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0))) +#define HAVE_DEVLINK_REGIONS +#endif +#define bitmap_alloc(nbits, flags) \ + kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags) +#define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO)) +#define bitmap_free(bitmap) kfree(bitmap) +#ifdef ETHTOOL_GLINKSETTINGS +#define ethtool_ks_clear(ptr, name) \ + ethtool_link_ksettings_zero_link_mode(ptr, name) +#define ethtool_ks_add_mode(ptr, name, mode) \ + ethtool_link_ksettings_add_link_mode(ptr, name, mode) +#define ethtool_ks_del_mode(ptr, name, mode) \ + ethtool_link_ksettings_del_link_mode(ptr, name, mode) +#define ethtool_ks_test(ptr, name, mode) \ + ethtool_link_ksettings_test_link_mode(ptr, name, mode) +#endif /* ETHTOOL_GLINKSETTINGS */ +#define HAVE_NETPOLL_CONTROLLER +#ifndef netdev_set_sb_channel +#define netdev_set_sb_channel(x, y) do { } while (0) +#endif +#ifndef netdev_unbind_sb_channel +#define netdev_unbind_sb_channel(x, y) do { } while (0) + +#endif +#define REQUIRE_PCI_CLEANUP_AER_ERROR_STATUS +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_BLOCK +#define HAVE_TCF_MIRRED_DEV +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#undef HAVE_TCF_EXTS_TO_LIST +#endif /* RHEL8.0+ */ +#else /* >= 4.19.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_NETDEV_SB_DEV +#undef HAVE_TCF_EXTS_TO_LIST +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#define HAVE_DEVLINK_REGIONS +#define HAVE_DEVLINK_PARAMS +#endif /* 4.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#define HAVE_XDP_UMEM_PROPS +#ifdef HAVE_AF_XDP_SUPPORT +#ifndef napi_if_scheduled_mark_missed +static inline bool __kc_napi_if_scheduled_mark_missed(struct napi_struct *n) +{ + unsigned long val, new; + + do { + val = READ_ONCE(n->state); + if (val & NAPIF_STATE_DISABLE) + return true; + + if (!(val & NAPIF_STATE_SCHED)) + return false; + + new = val | NAPIF_STATE_MISSED; + } while (cmpxchg(&n->state, val, new) != val); + + return true; +} + +#define napi_if_scheduled_mark_missed __kc_napi_if_scheduled_mark_missed +#endif /* !napi_if_scheduled_mark_missed */ +#endif /* HAVE_AF_XDP_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* RHEL >= 8.0 */ +#else /* >= 4.20.0 */ +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#define HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_VXLAN_TYPE +#endif /* 4.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#else /* RHEL >= 7.7 && RHEL < 8.0 || RHEL >= 8.1 */ +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} +#endif /* !(RHEL >= 7.7 && RHEL != 8.0) */ +#else /* >= 5.0.0 */ +#define HAVE_PHC_GETTIMEX64 +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM +#define HAVE_GENEVE_TYPE +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#define __static_assert(expr, msg, ...) _Static_assert(expr, msg) +#define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#endif /* RHEL < 8.1 */ +#else /* >= 5.1.0 */ +#define HAVE_NDO_FDB_ADD_EXTACK +#define NO_XDP_QUERY_XSK_UMEM +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#define HAVE_DEVLINK_PORT_PARAMS +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#if (defined HAVE_SKB_XMIT_MORE) && \ +(!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#define netdev_xmit_more() (skb->xmit_more) +#else +#define netdev_xmit_more() (0) +#endif + +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifndef eth_get_headlen +static inline u32 +__kc_eth_get_headlen(const struct net_device __always_unused *dev, void *data, + unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) __kc_eth_get_headlen(dev, data, len) +#endif /* !eth_get_headlen */ +#endif /* !RHEL >= 8.2 */ + +#ifndef mmiowb +#ifdef CONFIG_IA64 +#define mmiowb() asm volatile ("mf.a" ::: "memory") +#else +#define mmiowb() +#endif +#endif /* mmiowb */ +#if (RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)))) +#if IS_ENABLED(CONFIG_NET_DEVLINK) +#include +#ifndef devlink_port_attrs_set +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + enum devlink_port_flavour flavour, + u32 port_number, bool split, + u32 split_subport_number, + const unsigned char __always_unused *switch_id, + unsigned char __always_unused switch_id_len) +{ + devlink_port_attrs_set(devlink_port, flavour, port_number, split, + split_subport_number); +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set +#endif /* !devlink_port_attrs_set */ +#endif /* CONFIG_NET_DEVLINK */ +#endif /* = 5.2.0 */ +#define SPIN_UNLOCK_IMPLIES_MMIOWB +#define HAVE_NETDEV_XMIT_MORE +#endif /* 5.2.0 */ + + /*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) + +#if (!RHEL_RELEASE_CODE || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)))) +#if IS_ENABLED(CONFIG_NET_DEVLINK) +static inline void +devlink_flash_update_begin_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_end_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_status_notify(struct devlink __always_unused *devlink, + const char __always_unused *status_msg, + const char __always_unused *component, + unsigned long __always_unused done, + unsigned long __always_unused total) +{ +} +#endif /* CONFIG_NET_DEVLINK */ +#endif /* not RH or RH < 8.2 */ +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#define DEVLINK_INFO_VERSION_GENERIC_FW "fw" +#define DEVLINK_INFO_VERSION_GENERIC_ASIC_ID "asic.id" +#define DEVLINK_INFO_VERSION_GENERIC_ASIC_REV "asic.rev" +#endif /* 5.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)) + +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0)))) +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif + +#else /* >= 5.6.0 */ +#define HAVE_TX_TIMEOUT_TXQUEUE +#define HAVE_HWSTAMP_TX_ONESTEP_P2P +#endif /* 5.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) + +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID +#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id" +#endif + +#ifdef HAVE_DEVLINK_REGIONS +#define HAVE_DEVLINK_SNAPSHOT_CREATE_DESTRUCTOR +#endif /* HAVE_DEVLINK_REGIONS */ +#else /* >= 5.7.0 */ +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) + +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0)))) +#define HAVE_DEVLINK_UPDATE_PARAMS +#endif + +#else +#define HAVE_DEVLINK_UPDATE_PARAMS +#endif /* 5.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5))) +#define HAVE_DEVLINK_PREFETCH_FW +static inline void devlink_flash_update_begin_notify(struct devlink *dl) { } +static inline void devlink_flash_update_end_notify(struct devlink *dl) { } +#endif + +#else +#define HAVE_DEVLINK_PREFETCH_FW +static inline void devlink_flash_update_begin_notify(struct devlink *dl) { } +static inline void devlink_flash_update_end_notify(struct devlink *dl) { } +#endif /* 5.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,12,0)) +#if (!RHEL_RELEASE_CODE || (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,5)))) +static inline bool dev_page_is_reusable(struct page *page) +{ + return likely(page_to_nid(page) == numa_mem_id() && + !page_is_pfmemalloc(page)); +} +#endif +#endif /* 5.12.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,13,0)) + +void _kc_ethtool_sprintf(u8 **data, const char *fmt, ...); +#define ethtool_sprintf _kc_ethtool_sprintf +#endif /* 5.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0)) +#if (!RHEL_RELEASE_CODE || (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0)))) + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_VERSION(8, 6) == RHEL_RELEASE_CODE)) +#define HAVE_COALESCE_EXTACK +#endif + +#define ndo_eth_ioctl ndo_do_ioctl + +#if IS_ENABLED(CONFIG_NET_DEVLINK) +static inline struct devlink *_kc_devlink_alloc(const struct devlink_ops *ops, + size_t priv_size, + struct device *dev) +{ + return devlink_alloc(ops, priv_size); +} +#define devlink_alloc _kc_devlink_alloc +#endif /* CONFIG_NET_DEVLINK */ + +#else + +#if IS_ENABLED(CONFIG_NET_DEVLINK) +#define HAVE_VOID_DEVLINK_REGISTER +#endif /* CONFIG_NET_DEVLINK */ + +#endif /* not RH or RH < 9.0 */ + +#else + +#define HAVE_COALESCE_EXTACK + +#if IS_ENABLED(CONFIG_NET_DEVLINK) +#define HAVE_VOID_DEVLINK_REGISTER +#endif /* CONFIG_NET_DEVLINK */ + +#endif /* 5.15.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(5, 17, 0) > LINUX_VERSION_CODE) +#else +#define HAVE_RINGPARAM_EXTACK +#endif /* 5.17 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE) +static inline int skb_tcp_all_headers(const struct sk_buff *skb) +{ + return skb_transport_offset(skb) + tcp_hdrlen(skb); +} + +static inline int skb_inner_tcp_all_headers(const struct sk_buff *skb) +{ + return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); +} + +#else +#endif /* 6.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 0) && defined(netif_napi_add)) +#undef netif_napi_add +#define netif_napi_add(a, b, c) _netif_napi_add((a), (b), (c), NAPI_POLL_WEIGHT) +#else +#define netif_napi_add(a, b, c) netif_napi_add((a), (b), (c), NAPI_POLL_WEIGHT) +#endif + +#else +#endif /* 6.1 */ + +/* We don't support PTP on older RHEL kernels (needs more compat work) */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) +#undef CONFIG_PTP_1588_CLOCK +#undef CONFIG_PTP_1588_CLOCK_MODULE +#endif + +/* We don't support PTP on SUSE kernels (needs more compat work) */ +#ifdef CONFIG_SUSE_KERNEL +#undef CONFIG_PTP_1588_CLOCK +#undef CONFIG_PTP_1588_CLOCK_MODULE +#endif + +#endif /* _KCOMPAT_H_ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/kcompat_overflow.h b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/kcompat_overflow.h new file mode 100644 index 0000000000..c6010e9474 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/kcompat_overflow.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2019 Intel Corporation. */ + +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +#ifndef __LINUX_OVERFLOW_H +#define __LINUX_OVERFLOW_H + +#include + +/* + * In the fallback code below, we need to compute the minimum and + * maximum values representable in a given type. These macros may also + * be useful elsewhere, so we provide them outside the + * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. + * + * It would seem more obvious to do something like + * + * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) + * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) + * + * Unfortunately, the middle expressions, strictly speaking, have + * undefined behaviour, and at least some versions of gcc warn about + * the type_max expression (but not if -fsanitize=undefined is in + * effect; in that case, the warning is deferred to runtime...). + * + * The slightly excessive casting in type_min is to make sure the + * macros also produce sensible values for the exotic type _Bool. [The + * overflow checkers only almost work for _Bool, but that's + * a-feature-not-a-bug, since people shouldn't be doing arithmetic on + * _Bools. Besides, the gcc builtins don't allow _Bool* as third + * argument.] + * + * Idea stolen from + * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - + * credit to Christian Biere. + */ +/* The is_signed_type macro is redefined in a few places in various kernel + * headers. If this header is included at the same time as one of those, we + * will generate compilation warnings. Since we can't fix every old kernel, + * rename is_signed_type for this file to _kc_is_signed_type. This prevents + * the macro name collision, and should be safe since our drivers do not + * directly call the macro. + */ +#define _kc_is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - _kc_is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T)-(T)1)) + + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +/* + * For simplicity and code hygiene, the fallback code below insists on + * a, b and *d having the same type (similar to the min() and max() + * macros), whereas gcc's type-generic overflow checkers accept + * different types. Hence we don't just make check_add_overflow an + * alias for __builtin_add_overflow, but add type checks similar to + * below. + */ +#define check_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_add_overflow(__a, __b, __d); \ +}) + +#define check_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_sub_overflow(__a, __b, __d); \ +}) + +#define check_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_mul_overflow(__a, __b, __d); \ +}) + +#else + + +/* Checking for unsigned overflow is relatively easy without causing UB. */ +#define __unsigned_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a + __b; \ + *__d < __a; \ +}) +#define __unsigned_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a - __b; \ + __a < __b; \ +}) +/* + * If one of a or b is a compile-time constant, this avoids a division. + */ +#define __unsigned_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ +}) + +/* + * For signed types, detecting overflow is much harder, especially if + * we want to avoid UB. But the interface of these macros is such that + * we must provide a result in *d, and in fact we must produce the + * result promised by gcc's builtins, which is simply the possibly + * wrapped-around value. Fortunately, we can just formally do the + * operations in the widest relevant unsigned type (u64) and then + * truncate the result - gcc is smart enough to generate the same code + * with and without the (u64) casts. + */ + +/* + * Adding two signed integers can overflow only if they have the same + * sign, and overflow has happened iff the result has the opposite + * sign. + */ +#define __signed_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a + (u64)__b; \ + (((~(__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Subtraction is similar, except that overflow can now happen only + * when the signs are opposite. In this case, overflow has happened if + * the result has the opposite sign of a. + */ +#define __signed_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a - (u64)__b; \ + ((((__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Signed multiplication is rather hard. gcc always follows C99, so + * division is truncated towards 0. This means that we can write the + * overflow check like this: + * + * (a > 0 && (b > MAX/a || b < MIN/a)) || + * (a < -1 && (b > MIN/a || b < MAX/a) || + * (a == -1 && b == MIN) + * + * The redundant casts of -1 are to silence an annoying -Wtype-limits + * (included in -Wextra) warning: When the type is u8 or u16, the + * __b_c_e in check_mul_overflow obviously selects + * __unsigned_mul_overflow, but unfortunately gcc still parses this + * code and warns about the limited range of __b. + */ + +#define __signed_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ +}) + + +#define check_add_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_add_overflow(a, b, d), \ + __unsigned_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_sub_overflow(a, b, d), \ + __unsigned_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + + +#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ + +/** check_shl_overflow() - Calculate a left-shifted value and check overflow + * + * @a: Value to be shifted + * @s: How many bits left to shift + * @d: Pointer to where to store the result + * + * Computes *@d = (@a << @s) + * + * Returns true if '*d' cannot hold the result or when 'a << s' doesn't + * make sense. Example conditions: + * - 'a << s' causes bits to be lost when stored in *d. + * - 's' is garbage (e.g. negative) or so large that the result of + * 'a << s' is guaranteed to be 0. + * - 'a' is negative. + * - 'a << s' sets the sign bit, if any, in '*d'. + * + * '*d' will hold the results of the attempted shift, but is not + * considered "safe for use" if false is returned. + */ +#define check_shl_overflow(a, s, d) ({ \ + typeof(a) _a = a; \ + typeof(s) _s = s; \ + typeof(d) _d = d; \ + u64 _a_full = _a; \ + unsigned int _to_shift = \ + _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \ + *_d = (_a_full << _to_shift); \ + (_to_shift != _s || *_d < 0 || _a < 0 || \ + (*_d >> _to_shift) != _a); \ +}) + +/** + * array_size() - Calculate size of 2-dimensional array. + * + * @a: dimension one + * @b: dimension two + * + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + if (check_mul_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(n, size, &bytes)) + return SIZE_MAX; + if (check_add_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * struct_size() - Calculate size of structure with trailing array. + * @p: Pointer to the structure. + * @member: Name of the array member. + * @n: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @n @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, n) \ + __ab_c_size(n, \ + sizeof(*(p)->member) + __must_be_array((p)->member),\ + sizeof(*(p))) + +#endif /* __LINUX_OVERFLOW_H */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/net_dim.c b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/net_dim.c new file mode 100644 index 0000000000..32dcf7278c --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/eth/ionic/net_dim.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* + * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. + */ + +#ifndef CONFIG_DIMLIB +#include "dim.h" + +/* + * Net DIM profiles: + * There are different set of profiles for each CQ period mode. + * There are different set of profiles for RX/TX CQs. + * Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES + */ +#define NET_DIM_PARAMS_NUM_PROFILES 5 +#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 +#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128 +#define NET_DIM_DEF_PROFILE_CQE 1 +#define NET_DIM_DEF_PROFILE_EQE 1 + +#define NET_DIM_RX_EQE_PROFILES { \ + {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ +} + +#define NET_DIM_RX_CQE_PROFILES { \ + {2, 256}, \ + {8, 128}, \ + {16, 64}, \ + {32, 64}, \ + {64, 64} \ +} + +#define NET_DIM_TX_EQE_PROFILES { \ + {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \ +} + +#define NET_DIM_TX_CQE_PROFILES { \ + {5, 128}, \ + {8, 64}, \ + {16, 32}, \ + {32, 32}, \ + {64, 32} \ +} + +static const struct dim_cq_moder +rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = { + NET_DIM_RX_EQE_PROFILES, + NET_DIM_RX_CQE_PROFILES, +}; + +static const struct dim_cq_moder +tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = { + NET_DIM_TX_EQE_PROFILES, + NET_DIM_TX_CQE_PROFILES, +}; + +struct dim_cq_moder +net_dim_get_rx_moderation(u8 cq_period_mode, int ix) +{ + struct dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix]; + + cq_moder.cq_period_mode = cq_period_mode; + return cq_moder; +} +//EXPORT_SYMBOL(net_dim_get_rx_moderation); + +struct dim_cq_moder +net_dim_get_def_rx_moderation(u8 cq_period_mode) +{ + u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ? + NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE; + + return net_dim_get_rx_moderation(cq_period_mode, profile_ix); +} +//EXPORT_SYMBOL(net_dim_get_def_rx_moderation); + +struct dim_cq_moder +net_dim_get_tx_moderation(u8 cq_period_mode, int ix) +{ + struct dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix]; + + cq_moder.cq_period_mode = cq_period_mode; + return cq_moder; +} +//EXPORT_SYMBOL(net_dim_get_tx_moderation); + +struct dim_cq_moder +net_dim_get_def_tx_moderation(u8 cq_period_mode) +{ + u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ? + NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE; + + return net_dim_get_tx_moderation(cq_period_mode, profile_ix); +} +//EXPORT_SYMBOL(net_dim_get_def_tx_moderation); + +static int net_dim_step(struct dim *dim) +{ + if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2)) + return DIM_TOO_TIRED; + + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + case DIM_PARKING_TIRED: + break; + case DIM_GOING_RIGHT: + if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1)) + return DIM_ON_EDGE; + dim->profile_ix++; + dim->steps_right++; + break; + case DIM_GOING_LEFT: + if (dim->profile_ix == 0) + return DIM_ON_EDGE; + dim->profile_ix--; + dim->steps_left++; + break; + } + + dim->tired++; + return DIM_STEPPED; +} + +static void net_dim_exit_parking(struct dim *dim) +{ + dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT : DIM_GOING_RIGHT; + net_dim_step(dim); +} + +static int net_dim_stats_compare(struct dim_stats *curr, + struct dim_stats *prev) +{ + if (!prev->bpms) + return curr->bpms ? DIM_STATS_BETTER : DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) + return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER : + DIM_STATS_WORSE; + + if (!prev->ppms) + return curr->ppms ? DIM_STATS_BETTER : + DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) + return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER : + DIM_STATS_WORSE; + + if (!prev->epms) + return DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) + return (curr->epms < prev->epms) ? DIM_STATS_BETTER : + DIM_STATS_WORSE; + + return DIM_STATS_SAME; +} + +static bool net_dim_decision(struct dim_stats *curr_stats, struct dim *dim) +{ + int prev_state = dim->tune_state; + int prev_ix = dim->profile_ix; + int stats_res; + int step_res; + + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + stats_res = net_dim_stats_compare(curr_stats, + &dim->prev_stats); + if (stats_res != DIM_STATS_SAME) + net_dim_exit_parking(dim); + break; + + case DIM_PARKING_TIRED: + dim->tired--; + if (!dim->tired) + net_dim_exit_parking(dim); + break; + + case DIM_GOING_RIGHT: + case DIM_GOING_LEFT: + stats_res = net_dim_stats_compare(curr_stats, + &dim->prev_stats); + if (stats_res != DIM_STATS_BETTER) + dim_turn(dim); + + if (dim_on_top(dim)) { + dim_park_on_top(dim); + break; + } + + step_res = net_dim_step(dim); + switch (step_res) { + case DIM_ON_EDGE: + dim_park_on_top(dim); + break; + case DIM_TOO_TIRED: + dim_park_tired(dim); + break; + } + + break; + } + + if (prev_state != DIM_PARKING_ON_TOP || + dim->tune_state != DIM_PARKING_ON_TOP) + dim->prev_stats = *curr_stats; + + return dim->profile_ix != prev_ix; +} + +void net_dim(struct dim *dim, struct dim_sample end_sample) +{ + struct dim_stats curr_stats; + u16 nevents; + + switch (dim->state) { + case DIM_MEASURE_IN_PROGRESS: + nevents = BIT_GAP(BITS_PER_TYPE(u16), + end_sample.event_ctr, + dim->start_sample.event_ctr); + if (nevents < DIM_NEVENTS) + break; + dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats); + if (net_dim_decision(&curr_stats, dim)) { + dim->state = DIM_APPLY_NEW_PROFILE; + schedule_work(&dim->work); + break; + } + /* fall through */ + case DIM_START_MEASURE: + dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr, + end_sample.byte_ctr, &dim->start_sample); + dim->state = DIM_MEASURE_IN_PROGRESS; + break; + case DIM_APPLY_NEW_PROFILE: + break; + } +} +//EXPORT_SYMBOL(net_dim); +#endif diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/linux_ver.mk b/platform/pensando/dsc-drivers/src/drivers/linux/linux_ver.mk new file mode 100644 index 0000000000..c171251727 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/linux_ver.mk @@ -0,0 +1,211 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 1999 - 2019 Intel Corporation. + +# (thanks Intel!) +# +##################### +# Helpful functions # +##################### + +readlink = $(shell readlink -f ${1}) + +# helper functions for converting kernel version to version codes +get_kver = $(or $(word ${2},$(subst ., ,${1})),0) +get_kvercode = $(shell [ "${1}" -ge 0 -a "${1}" -le 255 2>/dev/null ] && \ + [ "${2}" -ge 0 -a "${2}" -le 255 2>/dev/null ] && \ + [ "${3}" -ge 0 -a "${3}" -le 255 2>/dev/null ] && \ + printf %d $$(( ( ${1} << 16 ) + ( ${2} << 8 ) + ( ${3} ) )) ) + +################ +# depmod Macro # +################ + +cmd_depmod = /sbin/depmod $(if ${SYSTEM_MAP_FILE},-e -F ${SYSTEM_MAP_FILE}) \ + $(if $(strip ${INSTALL_MOD_PATH}),-b ${INSTALL_MOD_PATH}) \ + -a ${KVER} + +################ +# dracut Macro # +################ + +cmd_initrd := $(shell \ + if which dracut > /dev/null 2>&1 ; then \ + echo "dracut --force"; \ + elif which update-initramfs > /dev/null 2>&1 ; then \ + echo "update-initramfs -u"; \ + fi ) + +##################### +# Environment tests # +##################### + +DRIVER_UPPERCASE := $(shell echo ${DRIVER} | tr "[:lower:]" "[:upper:]") + +ifeq (,${BUILD_KERNEL}) +BUILD_KERNEL=$(shell uname -r) +endif + +# Kernel Search Path +# All the places we look for kernel source +KSP := /lib/modules/${BUILD_KERNEL}/build \ + /lib/modules/${BUILD_KERNEL}/source \ + /usr/src/linux-${BUILD_KERNEL} \ + /usr/src/linux-$(shell echo ${BUILD_KERNEL} | sed 's/-.*//') \ + /usr/src/kernel-headers-${BUILD_KERNEL} \ + /usr/src/kernel-source-${BUILD_KERNEL} \ + /usr/src/linux-$(shell echo ${BUILD_KERNEL} | sed 's/\([0-9]*\.[0-9]*\)\..*/\1/') \ + /usr/src/linux \ + /usr/src/kernels/${BUILD_KERNEL} \ + /usr/src/kernels + +# prune the list down to only values that exist and have an include/linux +# sub-directory. We can't use include/config because some older kernels don't +# have this. +test_dir = $(shell [ -e ${dir}/include/linux -o -e ${dir}/include/generated ] && echo ${dir}) +KSP := $(foreach dir, ${KSP}, ${test_dir}) + +# we will use this first valid entry in the search path +ifeq (,${KSRC}) + KSRC := $(firstword ${KSP}) +endif + +ifeq (,${KSRC}) + $(warning *** Kernel header files not in any of the expected locations.) + $(warning *** Install the appropriate kernel development package, e.g.) + $(error kernel-devel, for building kernel modules and try again) +else +ifeq (/lib/modules/${BUILD_KERNEL}/source, ${KSRC}) + KOBJ := /lib/modules/${BUILD_KERNEL}/build +else + KOBJ := ${KSRC} +endif +endif + +# Version file Search Path +VSP := ${KOBJ}/include/generated/utsrelease.h \ + ${KOBJ}/include/linux/utsrelease.h \ + ${KOBJ}/include/linux/version.h \ + ${KOBJ}/include/generated/uapi/linux/version.h \ + /boot/vmlinuz.version.h + +# Config file Search Path +CSP := ${KOBJ}/include/generated/autoconf.h \ + ${KOBJ}/include/linux/autoconf.h \ + /boot/vmlinuz.autoconf.h + +# System.map Search Path (for depmod) +MSP := ${KSRC}/System.map \ + /boot/System.map-${BUILD_KERNEL} + +# prune the lists down to only files that exist +test_file = $(shell [ -f ${file} ] && echo ${file}) +VSP := $(foreach file, ${VSP}, ${test_file}) +CSP := $(foreach file, ${CSP}, ${test_file}) +MSP := $(foreach file, ${MSP}, ${test_file}) + + +# and use the first valid entry in the Search Paths +ifeq (,${VERSION_FILE}) + VERSION_FILE := $(firstword ${VSP}) +endif + +ifeq (,${CONFIG_FILE}) + CONFIG_FILE := $(firstword ${CSP}) +endif + +ifeq (,${SYSTEM_MAP_FILE}) + SYSTEM_MAP_FILE := $(firstword ${MSP}) +endif + +ifeq (,$(wildcard ${VERSION_FILE})) + $(error Linux kernel source not configured - missing version header file) +endif + +ifeq (,$(wildcard ${CONFIG_FILE})) + $(error Linux kernel source not configured - missing autoconf.h) +endif + +ifeq (,$(wildcard ${SYSTEM_MAP_FILE})) + $(warning Missing System.map file - depmod will not check for missing symbols) +endif + +ifneq ($(words $(subst :, ,$(CURDIR))), 1) + $(error Sources directory '$(CURDIR)' cannot contain spaces nor colons. Rename directory or move sources to another path) +endif + +####################### +# Linux Version Setup # +####################### + +# The following command line parameter is intended for development of KCOMPAT +# against upstream kernels such as net-next which have broken or non-updated +# version codes in their Makefile. They are intended for debugging and +# development purpose only so that we can easily test new KCOMPAT early. If you +# don't know what this means, you do not need to set this flag. There is no +# arcane magic here. + +# Convert LINUX_VERSION into LINUX_VERSION_CODE +ifneq (${LINUX_VERSION},) + LINUX_VERSION_CODE=$(call get_kvercode,$(call get_kver,${LINUX_VERSION},1),$(call get_kver,${LINUX_VERSION},2),$(call get_kver,${LINUX_VERSION},3)) +endif + +# Honor LINUX_VERSION_CODE +ifneq (${LINUX_VERSION_CODE},) + $(warning Forcing target kernel to build with LINUX_VERSION_CODE of ${LINUX_VERSION_CODE}$(if ${LINUX_VERSION}, from LINUX_VERSION=${LINUX_VERSION}). Do this at your own risk.) + KVER_CODE := ${LINUX_VERSION_CODE} + EXTRA_CFLAGS += -DLINUX_VERSION_CODE=${LINUX_VERSION_CODE} +endif + +# Determine SLE_LOCALVERSION_CODE for SuSE SLE >= 11 (needed by kcompat) +# This assumes SuSE will continue setting CONFIG_LOCALVERSION to the string +# appended to the stable kernel version on which their kernel is based with +# additional versioning information (up to 3 numbers), a possible abbreviated +# git SHA1 commit id and a kernel type, e.g. CONFIG_LOCALVERSION=-1.2.3-default +# or CONFIG_LOCALVERSION=-999.gdeadbee-default +ifeq (1,$(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\ + grep -m 1 CONFIG_SUSE_KERNEL | awk '{ print $$3 }')) + +ifneq (10,$(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\ + grep -m 1 CONFIG_SLE_VERSION | awk '{ print $$3 }')) + + LOCALVERSION := $(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\ + grep -m 1 CONFIG_LOCALVERSION | awk '{ print $$3 }' |\ + cut -d'-' -f2 | sed 's/\.g[[:xdigit:]]\{7\}//') + LOCALVER_A := $(shell echo ${LOCALVERSION} | cut -d'.' -f1) + LOCALVER_B := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f2) + LOCALVER_C := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f3) + SLE_LOCALVERSION_CODE := $(shell expr ${LOCALVER_A} \* 65536 + \ + 0${LOCALVER_B} \* 256 + 0${LOCALVER_C}) + EXTRA_CFLAGS += -DSLE_LOCALVERSION_CODE=${SLE_LOCALVERSION_CODE} +endif +endif + + +# get the kernel version - we use this to find the correct install path +KVER := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VERSION_FILE} | grep UTS_RELEASE | \ + awk '{ print $$3 }' | sed 's/\"//g') + +# assume source symlink is the same as build, otherwise adjust KOBJ +ifneq (,$(wildcard /lib/modules/${KVER}/build)) + ifneq (${KSRC},$(call readlink,/lib/modules/${KVER}/build)) + KOBJ=/lib/modules/${KVER}/build + endif +endif + +ifeq (${KVER_CODE},) + KVER_CODE := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VSP} 2> /dev/null |\ + grep -m 1 LINUX_VERSION_CODE | awk '{ print $$3 }' | sed 's/\"//g') +endif + +# minimum_kver_check +# +# helper function to provide uniform output for different drivers to abort the +# build based on kernel version check. Usage: "$(call minimum_kver_check,2,6,XX)". +define _minimum_kver_check +ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,${1},${2},${3}) ]; echo "$$?")) + $$(warning *** Aborting the build.) + $$(error This driver is not supported on kernel versions older than ${1}.${2}.${3}) +endif +endef +minimum_kver_check = $(eval $(call _minimum_kver_check,${1},${2},${3})) + diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/mdev/Makefile b/platform/pensando/dsc-drivers/src/drivers/linux/mdev/Makefile new file mode 100644 index 0000000000..22b423ce43 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/mdev/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_MDEV) := mdev.o + +mdev-y := mdev_drv.o diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/mdev/mdev_drv.c b/platform/pensando/dsc-drivers/src/drivers/linux/mdev/mdev_drv.c new file mode 100644 index 0000000000..c3cb3baeb2 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/mdev/mdev_drv.c @@ -0,0 +1,670 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2017-2021 Pensando Systems, Inc + * Copyright (C) 2008 Magnus Damm + * + * Based on uio_pdrv.c by Uwe Kleine-Koenig, + * Copyright (C) 2008 by Digi International Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mdev_drv.h" + +#define DEVINFO_SIZE 0x1000 +#define DRVCFG_SIZE 0x80 +#define MSIXCFG_SIZE 0x40 +#define DOORBELL_PG_SIZE 0x8 +#define TSTAMP_SIZE 0x8 +#define MDEV_NODE_NAME_LEN 0x8 + +typedef enum mdev_type { + MDEV_TYPE_MNET, + MDEV_TYPE_MCRYPT, +} mdev_type_t; + +struct mdev_dev; + +typedef int (*platform_rsrc_func_t)(struct mdev_dev *, + struct mdev_create_req *); +typedef int (*attach_func_t)(struct platform_device *); +typedef int (*detach_func_t)(struct platform_device *); + +struct mdev_dev { + struct device_node *of_node; + struct platform_device *pdev; + struct list_head node; + mdev_type_t type; + platform_rsrc_func_t platform_rsrc; + attach_func_t attach; + detach_func_t detach; +}; + +LIST_HEAD(mdev_list); + +static struct class *mdev_class; +static dev_t mdev_dev; +struct device *mdev_device; +struct device *mnet_device; +static unsigned int mdev_major; +static struct cdev mdev_cdev; + +/* Yuck */ +extern int ionic_probe(struct platform_device *pfdev); +extern int ionic_remove(struct platform_device *pfdev); + +struct uio_pdrv_genirq_platdata { + struct uio_info *uioinfo; + spinlock_t lock; + unsigned long flags; + struct platform_device *pdev; +}; + +/* Bits in uio_pdrv_genirq_platdata.flags */ +enum { + UIO_IRQ_DISABLED = 0, +}; + +static int uio_pdrv_genirq_open(struct uio_info *info, struct inode *inode) +{ + return 0; +} + +static int uio_pdrv_genirq_release(struct uio_info *info, struct inode *inode) +{ + return 0; +} + +static irqreturn_t uio_pdrv_genirq_handler(int irq, struct uio_info *dev_info) +{ + struct uio_pdrv_genirq_platdata *priv = dev_info->priv; + + /* Just disable the interrupt in the interrupt controller, and + * remember the state so we can allow user space to enable it later. + */ + + spin_lock(&priv->lock); + if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags)) + disable_irq_nosync(irq); + spin_unlock(&priv->lock); + + return IRQ_HANDLED; +} + +static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) +{ + struct uio_pdrv_genirq_platdata *priv = dev_info->priv; + unsigned long flags; + + /* Allow user space to enable and disable the interrupt + * in the interrupt controller, but keep track of the + * state to prevent per-irq depth damage. + * + * Serialize this operation to support multiple tasks and concurrency + * with irq handler on SMP systems. + */ + + spin_lock_irqsave(&priv->lock, flags); + if (irq_on) { + if (__test_and_clear_bit(UIO_IRQ_DISABLED, &priv->flags)) + enable_irq(dev_info->irq); + } else { + if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags)) + disable_irq_nosync(dev_info->irq); + } + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int mdev_uio_pdrv_genirq_probe(struct platform_device *pdev) +{ + struct uio_info *uioinfo = dev_get_platdata(&pdev->dev); + struct uio_pdrv_genirq_platdata *priv; + struct uio_mem *uiomem; + int ret = -EINVAL; + int i; + + if (pdev->dev.of_node) { + /* alloc uioinfo for one device */ + uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo), + GFP_KERNEL); + if (!uioinfo) { + dev_err(&pdev->dev, "unable to kmalloc\n"); + return -ENOMEM; + } + uioinfo->name = pdev->name; + uioinfo->version = "devicetree"; + /* Multiple IRQs are not supported */ + } + + if (!uioinfo || !uioinfo->name || !uioinfo->version) { + dev_err(&pdev->dev, "missing platform_data\n"); + return ret; + } + + if (uioinfo->handler || uioinfo->irqcontrol || + uioinfo->irq_flags & IRQF_SHARED) { + dev_err(&pdev->dev, "interrupt configuration error\n"); + return ret; + } + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&pdev->dev, "unable to kmalloc\n"); + return -ENOMEM; + } + + priv->uioinfo = uioinfo; + spin_lock_init(&priv->lock); + priv->flags = 0; /* interrupt is enabled to begin with */ + priv->pdev = pdev; + + if (!uioinfo->irq) { +#if (KERNEL_VERSION(5, 10, 0) > LINUX_VERSION_CODE) + ret = platform_get_irq(pdev, 0); +#else + ret = platform_get_irq_optional(pdev, 0); +#endif + uioinfo->irq = ret; + if (ret == -ENXIO && pdev->dev.of_node) + uioinfo->irq = UIO_IRQ_NONE; + else if (ret < 0) { + dev_err(&pdev->dev, "failed to get IRQ: %d\n", ret); + return ret; + } + } + + uiomem = &uioinfo->mem[0]; + + for (i = 0; i < pdev->num_resources; ++i) { + struct resource *r = &pdev->resource[i]; + + if (r->flags != IORESOURCE_MEM) + continue; + + if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) { + dev_warn(&pdev->dev, "device has more than " + __stringify(MAX_UIO_MAPS) + " I/O memory resources.\n"); + break; + } + + uiomem->memtype = UIO_MEM_PHYS; + uiomem->addr = r->start & PAGE_MASK; + uiomem->offs = (r->start & (PAGE_SIZE - 1)); + uiomem->size = PAGE_ALIGN(resource_size(r)); + dev_info(&pdev->dev, "resource %d size %llu", i, uiomem->size); + uiomem->name = r->name; + ++uiomem; + } + + while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) { + uiomem->size = 0; + ++uiomem; + } + + /* This driver requires no hardware specific kernel code to handle + * interrupts. Instead, the interrupt handler simply disables the + * interrupt in the interrupt controller. User space is responsible + * for performing hardware specific acknowledge and re-enabling of + * the interrupt in the interrupt controller. + * + * Interrupt sharing is not supported. + */ + + uioinfo->handler = uio_pdrv_genirq_handler; + uioinfo->irqcontrol = uio_pdrv_genirq_irqcontrol; + uioinfo->open = uio_pdrv_genirq_open; + uioinfo->release = uio_pdrv_genirq_release; + uioinfo->priv = priv; + + ret = uio_register_device(&pdev->dev, priv->uioinfo); + if (ret) { + dev_err(&pdev->dev, "unable to register uio device\n"); + return ret; + } + + platform_set_drvdata(pdev, priv); + return 0; +} + +static int mdev_uio_pdrv_genirq_remove(struct platform_device *pdev) +{ + struct uio_pdrv_genirq_platdata *priv = platform_get_drvdata(pdev); + + uio_unregister_device(priv->uioinfo); + + priv->uioinfo->handler = NULL; + priv->uioinfo->irqcontrol = NULL; + + return 0; +} + +static int mdev_open(struct inode *inode, struct file *filep) +{ + return 0; +} + +static int mdev_close(struct inode *i, struct file *f) +{ + return 0; +} + +static int mdev_get_mnet_platform_rsrc(struct mdev_dev *mdev, + struct mdev_create_req *req) +{ + struct resource mnet_resource[] = { + { /*devinfo*/ + .flags = IORESOURCE_MEM, + .start = req->regs_pa, + .end = req->regs_pa + DEVINFO_SIZE - 1 + }, {/*drvcfg/intr_ctrl*/ + .flags = IORESOURCE_MEM, + .start = req->drvcfg_pa, + .end = req->drvcfg_pa + DRVCFG_SIZE - 1 + }, {/*msixcfg*/ + .flags = IORESOURCE_MEM, + .start = req->msixcfg_pa, + .end = req->msixcfg_pa + MSIXCFG_SIZE - 1 + }, {/*doorbell*/ + .flags = IORESOURCE_MEM, + .start = req->doorbell_pa, + .end = req->doorbell_pa + DOORBELL_PG_SIZE - 1 + }, {/*tstamp*/ + .flags = IORESOURCE_MEM, + .start = req->tstamp_pa, + .end = req->tstamp_pa + TSTAMP_SIZE - 1 + } + }; + + /* add resource info */ + return platform_device_add_resources(mdev->pdev, mnet_resource, + ARRAY_SIZE(mnet_resource)); +} + +static int mdev_get_mcrypt_platform_rsrc(struct mdev_dev *mdev, + struct mdev_create_req *req) +{ + struct resource mcrypt_resource[] = { + { /*devinfo*/ + .flags = IORESOURCE_MEM, + .start = req->regs_pa, + .end = req->regs_pa + DEVINFO_SIZE - 1 + }, {/*drvcfg/intr_ctrl*/ + .flags = IORESOURCE_MEM, + .start = req->drvcfg_pa, + .end = req->drvcfg_pa + DRVCFG_SIZE - 1 + }, {/*msixcfg*/ + .flags = IORESOURCE_MEM, + .start = req->msixcfg_pa, + .end = req->msixcfg_pa + MSIXCFG_SIZE - 1 + }, {/*doorbell*/ + .flags = IORESOURCE_MEM, + .start = req->doorbell_pa, + .end = req->doorbell_pa + DOORBELL_PG_SIZE - 1 + } + }; + + /* add resource info */ + return platform_device_add_resources(mdev->pdev, mcrypt_resource, + ARRAY_SIZE(mcrypt_resource)); +} + +static int mdev_attach_one(struct mdev_dev *mdev, + struct mdev_create_req *req) +{ + char *mdev_name = NULL; + int err = 0; + + mdev->pdev = of_find_device_by_node(mdev->of_node); + if (!mdev->pdev) { + dev_err(mdev_device, "Can't find device for of_node %s\n", + mdev->of_node->name); + err = -ENXIO; + goto err; + } + + err = (*mdev->platform_rsrc)(mdev, req); + if (err) { + dev_err(mdev_device, "Can't get platform resources\n"); + err = -ENOSPC; + goto err_unset_pdev; + } + + mdev_name = devm_kzalloc(mdev_device, MDEV_NAME_LEN + 1, GFP_KERNEL); + if (!mdev_name) { + dev_err(mdev_device, "Can't allocate memory for name\n"); + err = -ENOMEM; + goto err_unset_pdev; + } + + strncpy(mdev_name, req->name, MDEV_NAME_LEN); + mdev->pdev->name = mdev_name; + + /* call probe with this platform_device */ + err = (*mdev->attach)(mdev->pdev); + if (err) { + dev_err(mdev_device, "probe for %s failed: %d\n", + mdev->pdev->name, err); + goto err_free_name; + } + + dev_info(mdev_device, "%s created successfully\n", mdev->pdev->name); + return 0; + +err_free_name: + //devm_kfree(mdev_device, mdev->pdev->name); + //mdev->pdev->name = NULL; +err_unset_pdev: + mdev->pdev = NULL; +err: + return err; +} + +static int mdev_detach_one(struct mdev_dev *mdev) +{ + int err; + + if (!mdev->pdev) + return 0; + + dev_info(mdev_device, "Removing interface %s\n", mdev->pdev->name); + err = (*mdev->detach)(mdev->pdev); + if (err) { + dev_err(mdev_device, "Failed to remove %s\n", + mdev->pdev->name); + return err; + } + + dev_info(mdev_device, "Successfully removed %s\n", mdev->pdev->name); + + //devm_kfree(mdev_device, mdev->pdev->name); + mdev->pdev = NULL; + + return 0; +} + +static inline bool mdev_ioctl_matches(struct mdev_dev *mdev, uint32_t cmd) +{ + if (cmd == MDEV_CREATE_MNET && mdev->type == MDEV_TYPE_MNET) + return true; + + if (cmd == MDEV_CREATE_MCRYPT && mdev->type == MDEV_TYPE_MCRYPT) + return true; + + return false; +} + +static long mdev_ioctl(struct file *f, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + char name[MDEV_NAME_LEN+1] = {0}; + struct mdev_create_req req; + struct mdev_dev *mdev; + int ret = -EDQUOT; + + switch (cmd) { + case MDEV_CREATE_MNET: + case MDEV_CREATE_MCRYPT: + if (copy_from_user(&req, argp, sizeof(req))) { + dev_err(mdev_device, "copy_from_user failed\n"); + ret = -EFAULT; + break; + } + dev_info(mdev_device, "Creating %s %s\n", + req.name, req.is_uio_dev ? "(UIO)" : ""); + + /* scan the list to see if it already exists, + * and if so, quietly ignore this request + */ + list_for_each_entry(mdev, &mdev_list, node) { + if (mdev->pdev && + !strncmp(mdev->pdev->name, req.name, MDEV_NAME_LEN)) + return 0; + } + + /* find the first useful empty slot */ + list_for_each_entry(mdev, &mdev_list, node) { + if (mdev->pdev || !mdev_ioctl_matches(mdev, cmd)) + continue; + + if (req.is_uio_dev) { + mdev->attach = mdev_uio_pdrv_genirq_probe; + mdev->detach = mdev_uio_pdrv_genirq_remove; + } else if (mdev->type == MDEV_TYPE_MNET) { + mdev->attach = ionic_probe; + mdev->detach = ionic_remove; + } else { + ret = -EINVAL; + break; + } + + ret = mdev_attach_one(mdev, &req); + break; + } + break; + + case MDEV_DESTROY: + if (copy_from_user(name, argp, MDEV_NAME_LEN)) { + dev_err(mdev_device, "copy_from_user failed\n"); + ret = -EFAULT; + break; + } + dev_info(mdev_device, "Removing %s\n", name); + list_for_each_entry(mdev, &mdev_list, node) { + if (!mdev->pdev || + strncmp(mdev->pdev->name, name, MDEV_NAME_LEN)) + continue; + + ret = mdev_detach_one(mdev); + break; + } + break; + + default: + dev_dbg(mdev_device, "Invalid ioctl %d\n", cmd); + ret = -EINVAL; + break; + } + + return ret; +} + +static int mdev_probe(struct platform_device *pfdev) +{ + return 0; +} + +static int mdev_remove(struct platform_device *pfdev) +{ + struct mdev_dev *mdev, *tmp; + + list_for_each_entry_safe(mdev, tmp, &mdev_list, node) { + (void)mdev_detach_one(mdev); + list_del(&mdev->node); + devm_kfree(mdev_device, mdev); + } + + return 0; +} + +static const struct of_device_id mdev_of_match[] = { + {.compatible = "pensando,mnet"}, + {.compatible = "pensando,mcrypt"}, + {/* end of table */} +}; + +static struct platform_driver mdev_driver = { + .probe = mdev_probe, + .remove = mdev_remove, + .driver = { + .name = "pensando-mdev", + .owner = THIS_MODULE, + .of_match_table = mdev_of_match, + }, +}; + +static const struct file_operations mdev_fops = { + .owner = THIS_MODULE, + .open = mdev_open, + .release = mdev_close, + .unlocked_ioctl = mdev_ioctl, +}; + +static int mdev_init_dev_list(uint32_t max_dev, const char *pfx, + platform_rsrc_func_t platform_rsrc) +{ + char of_node_name[MDEV_NODE_NAME_LEN + 1] = {0}; + struct mdev_dev *mdev; + uint32_t i; + + for (i = 0; i < max_dev; i++) { + mdev = devm_kzalloc(mdev_device, sizeof(*mdev), GFP_KERNEL); + if (!mdev) + return -ENOMEM; + + snprintf(of_node_name, sizeof(of_node_name), "%s%u", + pfx, i); + mdev->of_node = of_find_node_by_name(NULL, of_node_name); + + /* skip any node not found in device tree */ + if (mdev->of_node == NULL) { + devm_kfree(mdev_device, mdev); + continue; + } + + dev_info(mdev_device, "Found node %s\n", mdev->of_node->name); + mdev->platform_rsrc = platform_rsrc; + list_add_tail(&mdev->node, &mdev_list); + + // TODO: Should this put() happen when driver unloads? + of_node_put(mdev->of_node); + } + + return 0; +} + +static int __init mdev_init(void) +{ + struct mdev_dev *mdev, *tmp; + int ret; + + mdev_class = class_create(THIS_MODULE, DRV_NAME); + if (IS_ERR(mdev_class)) { + ret = PTR_ERR(mdev_class); + goto error_out; + } + + ret = alloc_chrdev_region(&mdev_dev, 0, NUM_MDEV_DEVICES, + MDEV_CHAR_DEV_NAME); + if (ret < 0) + goto error_destroy_class; + + mdev_major = MAJOR(mdev_dev); + + pr_info("Pensando mdev driver: mdev_major = %d\n", mdev_major); + + mdev_device = device_create(mdev_class, NULL, + MKDEV(mdev_major, 0), NULL, DRV_NAME); + if (IS_ERR(mdev_device)) { + pr_err("Failed to create device %s", DRV_NAME); + ret = PTR_ERR(mdev_class); + goto error_unregister_chrdev; + } + + dev_info(mdev_device, "device %s created\n", DRV_NAME); + +#ifndef MDEV_HACK + mnet_device = device_create(mdev_class, NULL, + MKDEV(mdev_major, 1), NULL, DRV_NAME_ALT); + if (IS_ERR(mnet_device)) { + pr_err("Failed to create device %s", DRV_NAME_ALT); + ret = PTR_ERR(mdev_class); + goto error_destroy_mdev; + } + + dev_info(mdev_device, "device %s created\n", DRV_NAME_ALT); +#endif + + cdev_init(&mdev_cdev, &mdev_fops); + + mdev_cdev.owner = THIS_MODULE; + + ret = cdev_add(&mdev_cdev, mdev_dev, NUM_MDEV_DEVICES); + if (ret) { + dev_err(mdev_device, "Error adding character device %s\n", + MDEV_CHAR_DEV_NAME); + goto error_destroy_mnet; + } + + ret = mdev_init_dev_list(MAX_MNET_DEVICES, "mnet", + mdev_get_mnet_platform_rsrc); + if (ret) + goto error_destroy_cdev; + + ret = mdev_init_dev_list(MAX_MCRYPT_DEVICES, "mcrypt", + mdev_get_mcrypt_platform_rsrc); + if (ret) + goto error_destroy_list; + + ret = platform_driver_register(&mdev_driver); + if (ret) + goto error_destroy_list; + + return 0; + +error_destroy_list: + list_for_each_entry_safe(mdev, tmp, &mdev_list, node) { + list_del(&mdev->node); + devm_kfree(mdev_device, mdev); + } +error_destroy_cdev: + cdev_del(&mdev_cdev); +error_destroy_mnet: +#ifndef MDEV_HACK + device_destroy(mdev_class, MKDEV(mdev_major, 1)); +error_destroy_mdev: +#endif + device_destroy(mdev_class, MKDEV(mdev_major, 0)); +error_unregister_chrdev: + unregister_chrdev_region(mdev_dev, NUM_MDEV_DEVICES); +error_destroy_class: + class_destroy(mdev_class); +error_out: + return ret; +} + +static void __exit mdev_cleanup(void) +{ + platform_driver_unregister(&mdev_driver); + cdev_del(&mdev_cdev); +#ifndef MDEV_HACK + device_destroy(mdev_class, MKDEV(mdev_major, 1)); +#endif + device_destroy(mdev_class, MKDEV(mdev_major, 0)); + unregister_chrdev_region(mdev_dev, NUM_MDEV_DEVICES); + class_destroy(mdev_class); +} + +module_init(mdev_init); +module_exit(mdev_cleanup); + +MODULE_AUTHOR("Pensando Systems"); +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/mdev/mdev_drv.h b/platform/pensando/dsc-drivers/src/drivers/linux/mdev/mdev_drv.h new file mode 100644 index 0000000000..4d5c6e98b9 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/mdev/mdev_drv.h @@ -0,0 +1,47 @@ +#ifndef _MDEV_DRV_H +#define _MDEV_DRV_H + +#include + +#define DRV_VERSION "0.1" +#define DRV_DESCRIPTION "Pensando mdev Driver" + +/* XXX There is a bug in older versions of the mnet driver - it fails to call + * cdev_del() on removal, leaving a landmine in the kobj_map. We can work around + * the issue by making sure this module loads at the same point in the map. + * Hence leaving the DRV_NAME as "mnet" and creating only a single device. + * + * Ideally this can be removed when we no longer support NDU from affected versions. + */ +#define MDEV_HACK + +#ifdef MDEV_HACK +#define DRV_NAME "mnet" +#define MDEV_CHAR_DEV_NAME "pen-mnet" +#define NUM_MDEV_DEVICES 1 /* The parent device(s) */ +#else +#define DRV_NAME "mdev" +#define DRV_NAME_ALT "mnet" +#define MDEV_CHAR_DEV_NAME "pen-mdev" +#define NUM_MDEV_DEVICES 2 /* The parent device(s) */ +#endif + +#define MAX_MNET_DEVICES 32 +#define MAX_MCRYPT_DEVICES 32 +#define MDEV_NAME_LEN 32 + +struct mdev_create_req { + uint64_t regs_pa; + uint64_t drvcfg_pa; + uint64_t msixcfg_pa; + uint64_t doorbell_pa; + uint64_t tstamp_pa; + int is_uio_dev; + char name[MDEV_NAME_LEN]; +}; + +#define MDEV_CREATE_MNET _IOWR('Q', 11, struct mdev_create_req) +#define MDEV_DESTROY _IOW('Q', 12, const char*) +#define MDEV_CREATE_MCRYPT _IOWR('Q', 13, struct mdev_create_req) + +#endif /* _MDEV_DRV_H */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/mnet_uio_pdrv_genirq/Makefile b/platform/pensando/dsc-drivers/src/drivers/linux/mnet_uio_pdrv_genirq/Makefile new file mode 100644 index 0000000000..94eaf0d28a --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/mnet_uio_pdrv_genirq/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_MNET_UIO_PDRV_GENIRQ) := mnet_uio_pdrv_genirq.o + +mnet_uio_pdrv_genirq-y := mnet_uio_pdrv_genirq_drv.o diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/mnet_uio_pdrv_genirq/mnet_uio_pdrv_genirq_drv.c b/platform/pensando/dsc-drivers/src/drivers/linux/mnet_uio_pdrv_genirq/mnet_uio_pdrv_genirq_drv.c new file mode 100644 index 0000000000..0b7ad249b3 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/mnet_uio_pdrv_genirq/mnet_uio_pdrv_genirq_drv.c @@ -0,0 +1,291 @@ +/* + * drivers/uio/uio_pdrv_genirq.c + * + * Userspace I/O platform driver with generic IRQ handling code. + * + * Copyright (C) 2008 Magnus Damm + * + * Based on uio_pdrv.c by Uwe Kleine-Koenig, + * Copyright (C) 2008 by Digi International Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define DRIVER_NAME "uio_pdrv_genirq" + +struct uio_pdrv_genirq_platdata { + struct uio_info *uioinfo; + spinlock_t lock; + unsigned long flags; + struct platform_device *pdev; +}; + +/* Bits in uio_pdrv_genirq_platdata.flags */ +enum { + UIO_IRQ_DISABLED = 0, +}; + +static int uio_pdrv_genirq_open(struct uio_info *info, struct inode *inode) +{ + // struct uio_pdrv_genirq_platdata *priv = info->priv; + + /* Wait until the Runtime PM code has woken up the device */ + //pm_runtime_get_sync(&priv->pdev->dev); + return 0; +} + +static int uio_pdrv_genirq_release(struct uio_info *info, struct inode *inode) +{ + // struct uio_pdrv_genirq_platdata *priv = info->priv; + + /* Tell the Runtime PM code that the device has become idle */ + //pm_runtime_put_sync(&priv->pdev->dev); + return 0; +} + +static irqreturn_t uio_pdrv_genirq_handler(int irq, struct uio_info *dev_info) +{ + struct uio_pdrv_genirq_platdata *priv = dev_info->priv; + + /* Just disable the interrupt in the interrupt controller, and + * remember the state so we can allow user space to enable it later. + */ + + spin_lock(&priv->lock); + if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags)) + disable_irq_nosync(irq); + spin_unlock(&priv->lock); + + return IRQ_HANDLED; +} + +static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) +{ + struct uio_pdrv_genirq_platdata *priv = dev_info->priv; + unsigned long flags; + + /* Allow user space to enable and disable the interrupt + * in the interrupt controller, but keep track of the + * state to prevent per-irq depth damage. + * + * Serialize this operation to support multiple tasks and concurrency + * with irq handler on SMP systems. + */ + + spin_lock_irqsave(&priv->lock, flags); + if (irq_on) { + if (__test_and_clear_bit(UIO_IRQ_DISABLED, &priv->flags)) + enable_irq(dev_info->irq); + } else { + if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags)) + disable_irq_nosync(dev_info->irq); + } + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +int mnet_uio_pdrv_genirq_probe(struct platform_device *pdev) +{ + struct uio_info *uioinfo = dev_get_platdata(&pdev->dev); + struct uio_pdrv_genirq_platdata *priv; + struct uio_mem *uiomem; + int ret = -EINVAL; + int i; + + if (pdev->dev.of_node) { + /* alloc uioinfo for one device */ + uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo), + GFP_KERNEL); + if (!uioinfo) { + dev_err(&pdev->dev, "unable to kmalloc\n"); + return -ENOMEM; + } + uioinfo->name = pdev->name; + uioinfo->version = "devicetree"; + /* Multiple IRQs are not supported */ + } + + if (!uioinfo || !uioinfo->name || !uioinfo->version) { + dev_err(&pdev->dev, "missing platform_data\n"); + return ret; + } + + if (uioinfo->handler || uioinfo->irqcontrol || + uioinfo->irq_flags & IRQF_SHARED) { + dev_err(&pdev->dev, "interrupt configuration error\n"); + return ret; + } + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&pdev->dev, "unable to kmalloc\n"); + return -ENOMEM; + } + + priv->uioinfo = uioinfo; + spin_lock_init(&priv->lock); + priv->flags = 0; /* interrupt is enabled to begin with */ + priv->pdev = pdev; + + if (!uioinfo->irq) { + ret = platform_get_irq(pdev, 0); + uioinfo->irq = ret; + if (ret == -ENXIO && pdev->dev.of_node) + uioinfo->irq = UIO_IRQ_NONE; + else if (ret < 0) { + dev_err(&pdev->dev, "failed to get IRQ\n"); + return ret; + } + } + + uiomem = &uioinfo->mem[0]; + + for (i = 0; i < pdev->num_resources; ++i) { + struct resource *r = &pdev->resource[i]; + + if (r->flags != IORESOURCE_MEM) + continue; + + if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) { + dev_warn(&pdev->dev, "device has more than " + __stringify(MAX_UIO_MAPS) + " I/O memory resources.\n"); + break; + } + + uiomem->memtype = UIO_MEM_PHYS; + uiomem->addr = r->start & PAGE_MASK; + uiomem->offs = (r->start & (PAGE_SIZE - 1)); + uiomem->size = PAGE_ALIGN(resource_size(r)); +#if 1 + dev_info(&pdev->dev, "resource %d size %llu", i, uiomem->size); + uiomem->name = r->name; +#endif + ++uiomem; + } + + while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) { + uiomem->size = 0; + ++uiomem; + } + + /* This driver requires no hardware specific kernel code to handle + * interrupts. Instead, the interrupt handler simply disables the + * interrupt in the interrupt controller. User space is responsible + * for performing hardware specific acknowledge and re-enabling of + * the interrupt in the interrupt controller. + * + * Interrupt sharing is not supported. + */ + + uioinfo->handler = uio_pdrv_genirq_handler; + uioinfo->irqcontrol = uio_pdrv_genirq_irqcontrol; + uioinfo->open = uio_pdrv_genirq_open; + uioinfo->release = uio_pdrv_genirq_release; + uioinfo->priv = priv; + + /* Enable Runtime PM for this device: + * The device starts in suspended state to allow the hardware to be + * turned off by default. The Runtime PM bus code should power on the + * hardware and enable clocks at open(). + */ + //pm_runtime_enable(&pdev->dev); + + ret = uio_register_device(&pdev->dev, priv->uioinfo); + if (ret) { + dev_err(&pdev->dev, "unable to register uio device\n"); + // pm_runtime_disable(&pdev->dev); + return ret; + } + + platform_set_drvdata(pdev, priv); + return 0; +} + +int mnet_uio_pdrv_genirq_remove(struct platform_device *pdev) +{ + struct uio_pdrv_genirq_platdata *priv = platform_get_drvdata(pdev); + + uio_unregister_device(priv->uioinfo); + // pm_runtime_disable(&pdev->dev); + + priv->uioinfo->handler = NULL; + priv->uioinfo->irqcontrol = NULL; + + return 0; +} + +#if 0 +static int uio_pdrv_genirq_runtime_nop(struct device *dev) +{ + /* Runtime PM callback shared between ->runtime_suspend() + * and ->runtime_resume(). Simply returns success. + * + * In this driver pm_runtime_get_sync() and pm_runtime_put_sync() + * are used at open() and release() time. This allows the + * Runtime PM code to turn off power to the device while the + * device is unused, ie before open() and after release(). + * + * This Runtime PM callback does not need to save or restore + * any registers since user space is responsbile for hardware + * register reinitialization after open(). + */ + return 0; +} + +static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = { + .runtime_suspend = uio_pdrv_genirq_runtime_nop, + .runtime_resume = uio_pdrv_genirq_runtime_nop, +}; + +#ifdef CONFIG_OF +static struct of_device_id uio_of_genirq_match[] = { + { /* This is filled with module_parm */ }, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, uio_of_genirq_match); +module_param_string(of_id, uio_of_genirq_match[0].compatible, 128, 0); +MODULE_PARM_DESC(of_id, "Openfirmware id of the device to be handled by uio"); +#endif + +static struct platform_driver uio_pdrv_genirq = { + .probe = uio_pdrv_genirq_probe, + .remove = uio_pdrv_genirq_remove, + .driver = { + .name = DRIVER_NAME, + .pm = &uio_pdrv_genirq_dev_pm_ops, + .of_match_table = of_match_ptr(uio_of_genirq_match), + }, +}; + +EXPORT_SYMBOL(uio_pdrv_genirq_probe); +EXPORT_SYMBOL(uio_pdrv_genirq_remove); +module_platform_driver(uio_pdrv_genirq); +#endif + +EXPORT_SYMBOL(mnet_uio_pdrv_genirq_probe); +EXPORT_SYMBOL(mnet_uio_pdrv_genirq_remove); + +MODULE_AUTHOR("Magnus Damm"); +MODULE_DESCRIPTION("Userspace I/O platform driver with generic IRQ handling"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/Makefile b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/Makefile new file mode 100644 index 0000000000..8c78f9ec0b --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/Makefile @@ -0,0 +1,42 @@ +# +# usage: make KDIR=/path/to/kernel/build/area +# +MODNAME = pciesvc + +obj-m := $(MODNAME).o + +$(shell echo '#define PCIESVC_VERSION "'`date`'"' >version.h) + +kpci := + +pciesvc-src := $(shell cd $(PWD) && ls pciesvc/src/*.c) +pciesvc-obj := $(patsubst %.c,%.o,$(pciesvc-src)) +kpci += $(pciesvc-obj) + +INCLUDES = -I$(PWD) \ + -I$(PWD)/pciesvc/include \ + -I$(PWD)/pciesvc/src + +$(MODNAME)-y := $(kpci) kpcimgr_module.o kpcinterface.o kpci_entry.o \ + kpci_kexec.o kpci_test.o pciesvc_end.o + + +KDIR := /lib/modules/$(shell uname -r)/build +PWD := $(shell pwd) +UTS := X$(shell grep UTS_RELEASE $(KDIR)/include/generated/utsrelease.h) +REL := $(shell echo $(UTS) | awk '{ print $$3 }' | sed -e 's/"//g') + +KCFLAGS = -fno-jump-tables -fno-stack-protector -fno-function-sections +KCFLAGS += -fno-data-sections -fno-store-merging -mstrict-align +KCFLAGS += $(INCLUDES) -DASIC_ELBA -DPCIESVC_SYSTEM_EXTERN +KOPT = KCFLAGS="$(KCFLAGS)" + +all: + $(MAKE) -C $(KDIR) M=$(PWD) $(KOPT) modules + @mkdir -p $(REL) + @mv $(patsubst %.o,%.ko,$(obj-m)) $(REL) + @echo Checking for illegal relocations... + tools/reloc_check $(REL)/$(MODNAME).ko + +clean: + $(MAKE) -C $(KDIR) M=$(PWD) clean diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/README.md b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/README.md new file mode 100644 index 0000000000..858048fc73 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/README.md @@ -0,0 +1,22 @@ +# pciesvc module + +## Overview + +This driver module is a companion to the kpcimgr driver. This module +provide support for servicing the pcie bus hardware "indirect" and +"notify" transaction interrupts. This driver runs on the Pensando ARM cpu. + +The core of the driver is built using sources from the pciesvc library +with only a thin wrapper of code here to package the pciesvc core +and register with the kpcimgr driver by calling "kpcimgr_module_register". + +## Building + +The Makefile in this directory can be used to build the module. +If the kernel build support files are in /lib/modules then "make" will +find them. If kernel build support files are in another path then +specify on the make command line with "make KDIR=/path/to/kernel". + +## History + +2022-12-02 - initial version diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpci_constants.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpci_constants.h new file mode 100644 index 0000000000..c0c01330e3 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpci_constants.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021, 2022, Oracle and/or its affiliates. + */ +/* + * Layout of non-Linux Memory: + * (base address provided in device tree and may change) + * C500 0000 SHMEM segment (pciehw_shmem_t) [0x942440 bytes ~9.25Mb] + * C5F0 0000 kpcimgr state (kstate_t) [3 * 64k] + * C5F3 0000 relocated code [Allow 256k] + * C5F7 0000 available for stack when in nommu mode (64k) + * C5F8 0000 top of stack + * C5FF FFFF end of 1M allotted range + */ +#define SHMEM_KSTATE_OFFSET 0xF00000 +#define SHMEM_KSTATE_SIZE 0x30000 +#define KSTATE_STACK_OFFSET 0x80000 +#define KSTATE_CODE_OFFSET (SHMEM_KSTATE_OFFSET + SHMEM_KSTATE_SIZE) +#define KSTATE_CODE_SIZE (256 * 1024) +#define KSTATE_MAGIC 0x1743BA1F + +/* size of trace data arrays */ +#define DATA_SIZE 100 +#define MSG_BUF_SIZE 32768 + +/* uart and time related constants */ +#define PEN_UART 0x4800 +#define UART_THR 0 +#define UART_LSR 0x14 +#define DATA_READY 1 +#define OK_TO_WRITE 0x20 +#define UART_THRE_BIT 5 + +/* phases */ +#define NOMMU 0 +#define NORMAL 1 +#define NUM_PHASES 2 + +#define MSI_INDIRECT_IDX 0 /* indirect vector */ +#define MSI_NOTIFY_IDX 1 /* notify vector */ +#define MSI_NVECTORS 2 + diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpci_entry.S b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpci_entry.S new file mode 100644 index 0000000000..cd6e4f10e9 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpci_entry.S @@ -0,0 +1,364 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021, 2022, Oracle and/or its affiliates. + */ + +/* + * Low Level Functions for kpcimgr (a.k.a. pciesvc glue layer) + * + * Author: rob.gardner@oracle.com + */ + +#include +#include +#include +#include "kpci_constants.h" + +/* Calling conventions for printl: */ +/* We use x12 as the branch link register and x13 as the first function arg */ +#define return_addr x12 +#define arg0 x13 + +/* defines for exception count and cpuid */ +#define ex_count tpidr_el0 +#define cpuidreg tpidr_el1 + + /* macro to print a char given in x17 */ + .macro putc0 + mov x16, PEN_UART + strb w17, [x16, #UART_THR] +11: ldrb w17, [x16, #UART_LSR] + tbz x17, #UART_THRE_BIT, 11b + .endm + + /* macro to print a given literal char */ + .macro putc, c + mov x17, \c + putc0 + .endm + + /* macro to print a literal string */ + .macro print, msg + adr x18, 77f +76: ldrb w17, [x18], #1 + cbz x17, 78f + putc0 + b 76b +77: .asciz "\msg" + .align 2 +78: + .endm + + /* macro to print a literal string with added cr/lf */ + .macro println, msg + print "\msg" + print "\r\n" + .endm + + /* macro to print a system register */ + .macro printsr, reg + print "\reg" + putc ':' + mrs arg0, \reg + adr return_addr, 99f + b printl +99: + .endm + + /* print delineation marker */ + .macro delineate, c + mov x15, #4 +4: putc \c + sub x15, x15, 1 + cbnz x15, 4b + .endm + + /* macro to drop to exception level 1 */ + .macro drop_to_el1 + mrs x29, CurrentEL + asr x29, x29, 2 + tbnz x29, #0, 88f /* what? already at EL1 */ + putc '2' + + /* set up EL2 exception vectors */ + adr x29, xcpt_vectors + msr vbar_el2, x29 + isb + + /* do the actual drop to EL1 */ + putc '#' + adr x29, 88f + msr elr_el2, x29 + eret +88: + putc '1' + msr cpuidreg, x2 /* save cpu number */ + + /* save original address of spin table */ + adr x29, spin_table_start_addr + str x0, [x29] + + /* limit number of times the exception handler runs */ + mov x16, 2 + msr ex_count, x16 + putc '!' + + /* set up EL1 exception vectors */ + adr x29, xcpt_vectors + msr vbar_el1, x29 + isb + putc 'V' + + /* unmask Serror */ + msr daifclr, #(1|4|8) + putc 'D' + .endm + + /* macro to print the exception count value */ + .macro print_ex_count + putc '(' + print "ex_count:" + mrs x15, ex_count + add x17, x15, '0' + putc0 + putc ')' + .endm + + /* macro to print the cpu number */ + .macro print_cpuid + putc '<' + print "CPU" + mrs x17, cpuidreg + add x17, x17, '0' + putc0 + putc '>' + .endm + + /* macro to print exception level */ + .macro print_el + putc '[' + print "EL" + mrs x17, CurrentEL + asr x17, x17, 2 + add x17, x17, '0' + putc0 + putc ']' + .endm + + + /* + * This is the actual entry point for the first + * cpu to be hijacked. After dropping to EL1, + * we just need to set up a stack and we can + * jump to C code to do the real work. + */ + SYM_CODE_START(__kpcimgr_cpu_holding_pen) + + delineate '>' + drop_to_el1 + + /* load kstate base and set initial stack pointer */ + adr x0, kstate_paddr + ldr x0, [x0] + add x3, x0, KSTATE_STACK_OFFSET + mov sp, x3 + + /* jump to the real holding pen */ + bl kpcimgr_cpu_holding_pen + + /* when C returns control here, we're done */ + putc '=' + /* trap to EL2 and return to spin table */ + mov x0, #1 + hvc #0 + + /* we should never get here */ + putc 'Q' + b .exit + + SYM_CODE_END(__kpcimgr_cpu_holding_pen) + + /* + * This is the entry point for the second hijacked + * cpu. Its job is to run the serial thread, which + * can interact with a console user should the need + * arise. Similar to the holding pen thread, we + * drop to EL1, set up our own unique stack, and + * jump to C. + */ + SYM_CODE_START(__kpcimgr_serial_thread) + + delineate ']' + drop_to_el1 + + putc '\\' + adr x1, kstate_paddr + ldr x0, [x1] + add x3, x0, KSTATE_STACK_OFFSET + sub x3, x3, 0x2000 /* need a stack, different from other thread */ + mov sp, x3 + bl kpcimgr_serial_thread + putc '+' + + /* trap to EL2 and return to spin table */ + mov x0, #1 + hvc #0 + + /* we should never get here */ + b .exit + SYM_CODE_END(__kpcimgr_serial_thread) + + /* C callable functions */ + + /* long read_el(void) */ + SYM_CODE_START(read_el) + mrs x0, CurrentEL + lsr x0, x0, #2 + ret + SYM_CODE_END(read_el) + + /* int cpuid(void) */ + SYM_CODE_START(cpuid) + mrs x0, cpuidreg + ret + SYM_CODE_END(cpuid) + + /* int release(void) */ + SYM_CODE_START(release) + adr x1, spin_table_start_addr + ldr x1, [x1] + ldr x0, [x1,#0x10] + ret + SYM_CODE_END(release) + +/* + * printl, basically performs a printf("[%lx]") + * + * We use a few registers indiscriminately, but I am + * reasonably sure they are not used elsewhere + */ +#define shiftval x14 +#define nchars x15 +#define nibble x17 + + SYM_CODE_START(printl) + putc '[' + mov nchars, #0 /* number of characters actually printed */ + mov shiftval, #64 +.loop_top: + sub shiftval, shiftval, 4 + lsr nibble, arg0, shiftval + and nibble, nibble, #0xf + + cbnz nibble, .print /* always print a non-zero nibble */ + cbz shiftval, .print /* always print the last nibble, even if zero */ + cbz nchars, .loop_bottom /* don't print leading zeros */ + +.print: + add nchars, nchars, 1 + add nibble, nibble, #'0' + cmp nibble, #'0'+0xA + b.lt 1f + add nibble, nibble, #-0xA-'0'+'A' +1: putc0 +.loop_bottom: + cbnz shiftval, .loop_top + + putc ']' + br return_addr + SYM_CODE_END(printl) + + /* + * Exception handler + * + * Mainly used to deal with Serror + * + * EL2 exceptions are fatal, but exceptions that arrive here + * at EL1 cause some useful output to the console, and return. + * The number of exceptions handled this way is limited to a few. + * The Serror exception is an exception to this rule. + */ + SYM_CODE_START(exception_handler) + print_el + mrs x29, CurrentEL + cmp x29, #8 + b.ne 1f + + /* EL2 (fatal) */ + printsr elr_el2 + b .exit + + /* EL1 */ +1: printsr elr_el1 + printsr far_el1 + printsr spsr_el1 + printsr esr_el1 + printsr sctlr_el1 + + print_ex_count + + /* limit number of times we go through this code */ + /* to avoid an infinite stream of exceptions */ + mrs x15, ex_count + cbz x15, .exit + sub x15, x15, 1 + msr ex_count, x15 + + print "\r\n" + eret + + /* + * Finish by jumping back to the original + * spin table + */ +.exit: + print_el + print_cpuid + println "done" + adr x29, spin_table_start_addr + ldr x0, [x29] + br x0 + + SYM_CODE_END(exception_handler) + + .macro hyper, c + .align 7 + putc \c + b .exit + .endm + + .macro exlog, c + .align 7 + putc \c + print_el + b exception_handler + .endm + + .macro serror, c + .align 7 + putc \c + mov x16, #3 + msr ex_count, x16 + b exception_handler + .endm + + + .align 3 +spin_table_start_addr: + .dword + + /* The actual Exception Vector table, used for both EL1 and EL2 */ + .align 11 +xcpt_vectors: +/* Current exception level with SP_EL0 */ + exlog 'A' /* Sync */ + exlog 'B' /* IRQ/vIRQ */ + exlog 'C' /* FIQ/cFIQ */ + exlog 'D' /* SError/vSError */ +/* Current exception level with SP_ELx, x>0 */ + hyper 'H' /* Sync */ + exlog 'I' /* IRQ/vIRQ */ + exlog 'Q' /* FIQ/cFIQ */ + serror 'S' /* SError/vSError */ + + diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpci_kexec.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpci_kexec.c new file mode 100644 index 0000000000..0a3bd64b4e --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpci_kexec.c @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021, 2022, Oracle and/or its affiliates. + */ + +/* + * Kernel PCIE Manager - kexec related code + * + * Author: rob.gardner@oracle.com + */ + +#include "kpcimgr_api.h" +#include "pciesvc.h" +#include "pciesvc_system.h" + +#define TICKS_PER_US 200 +#define TICKS_PER_MS (1000*TICKS_PER_US) +#define TICKS_PER_SEC (1000*TICKS_PER_MS) + +int holding_pen_idx; +unsigned long kstate_paddr; +kstate_t *kstate = NULL; + +void set_kstate(kstate_t *ks) +{ + kstate = ks; + kstate_paddr = ks->shmembase + SHMEM_KSTATE_OFFSET; +} + +int virtual(void) +{ + return (unsigned long)kstate != kstate_paddr; +} + +/* called in physical mode */ +void kpcimgr_nommu_poll(kstate_t *ks) +{ + kpcimgr_poll(ks, 0, NOMMU); + ks->trace_data[NOMMU][LAST_CALL_TIME] = read_sysreg(cntvct_el0); + +} + +void kpcimgr_cpu_holding_pen(kstate_t *ks) +{ + long npolls = 0; + int i; + + set_kstate(ks); + ks->uart_addr = (void *) PEN_UART; + if (ks->debug) + _uart_write((void *) PEN_UART, 'C'); + kpcimgr_init_poll(ks); + + kpr_err("%s with EL%ld on cpu%d\n", __func__, read_el(), cpuid()); + + holding_pen_idx = 0; + kpr_err("going into poll loop...\n"); + + while (1) { + if (ks->debug) + _uart_write((void *) PEN_UART, 'S'); + + kpcimgr_nommu_poll(ks); + npolls++; + + for (i=0; i<10; i++) { + if (release()) { + kpcimgr_nommu_poll(ks); + kpr_err("poll loop done, returning after %ld polls.\n", npolls); + return; + } + kp_udelay(1*1000); /* 1ms */ + } + } +} + +void serial_help(void) +{ + kpr_err("Commands:\n"); + kpr_err(" c Cpu id\n"); + kpr_err(" e Event queue\n"); + kpr_err(" f Show/set cfgval\n"); + kpr_err(" h Show help message\n"); + kpr_err(" m Memory ranges\n"); + kpr_err(" q Quit serial thread\n"); + kpr_err(" r Reboot\n"); + kpr_err(" s Serror trigger\n"); + kpr_err(" t Report Stats\n"); +} + +void set_cfgval(kstate_t *ks) +{ + int cfgval = 0, modify = 0; + char c; + + kpr_err("New cfgval: "); + while (1) { + while (uart_read(ks, &c) == 0); + uart_write(ks, c); + if (c >= '0' && c <= '9') + cfgval = (cfgval << 4) + (c - '0'); + else if (c >= 'a' && c <= 'f') + cfgval = (cfgval << 4) + (10 + c - 'a'); + else if (c >= 'A' && c <= 'F') + cfgval = (cfgval << 4) + (10 + c - 'A'); + else + break; + modify = 1; + } + if (modify) { + kpr_err("\r\ncfgval set to %x\n", cfgval); + ks->cfgval = cfgval; + } + else + kpr_err("\r\ncfgval not modified\n"); +} + +#define WDOG_REGS (void *)0x1400 +#define WDOG_CONTROL_REG_OFFSET 0x00 +#define WDOG_CONTROL_REG_WDT_EN_MASK 0x01 +#define WDOG_CONTROL_REG_RESP_MODE_MASK 0x02 +void watchdog_reboot(void) +{ + u32 val = readl(WDOG_REGS + WDOG_CONTROL_REG_OFFSET); + + kpr_err("Rebooting...\n"); + /* Disable interrupt mode; always perform system reset. */ + val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK; + /* Enable watchdog. */ + val |= WDOG_CONTROL_REG_WDT_EN_MASK; + writel(val, WDOG_REGS + WDOG_CONTROL_REG_OFFSET); +} + + +void serial_input(char c) +{ + kstate_t *ks = get_kstate(); + int n; + + switch (c) { + case 'c': case 'C': + kpr_err("serial thread running on cpu#%d\n", cpuid()); + break; + case 'e': case 'E': + n = ks->evq_head - ks->evq_tail; + if (n < 0) + n += EVENT_QUEUE_LENGTH; + kpr_err("event queue contains %d records\n", n); + break; + case 'f': case 'F': + kpr_err("cfgval = %x\n", ks->cfgval); + set_cfgval(ks); + break; + case '?': + case 'h': + case 'H': + serial_help(); + break; + case 'm': case 'M': + for (n=0; nnranges; n++) { + struct mem_range_t *mr = &ks->mem_ranges[n]; + kpr_err("range [%lx..%lx] mapped at %lx\n", + mr->base, mr->end, mr->vaddr); + } + break; + case 'q': case 'Q': + __asm__("hvc #0;" ::); + break; + case 'r': case 'R': + watchdog_reboot(); + break; + case 's': + case 'S': + trigger_serr(0x100); + break; + case 't': + case 'T': + kpcimgr_report_stats(ks, NOMMU, 1, 1); + break; + default: + kpr_err("'%c' unknown command\n", c); + break; + } +} + + +void kpcimgr_serial_thread(kstate_t *ks) +{ + unsigned long start = read_sysreg(cntvct_el0); + int warning_printed = 0; + + ks->uart_addr = (void *) PEN_UART; + set_kstate(ks); + + kpr_err("%s el%d on cpu%d\n", __func__, read_el(), cpuid()); + while (!release()) { + char c; + if (uart_read(ks, &c)) + serial_input(c); + if (!warning_printed && time_elapsed(start, 2*TICKS_PER_SEC)) { + kpr_err("Serial thread running for >2s, 'H' for help\n"); + warning_printed = 1; + } + } + kpr_err("%s done\n", __func__); +} + + +/* + * Called from kpcimgr when the secondary CPUs are being taken + * offline. We return a physical address which the secondary CPU will + * jump to. The global 'holding_pen_idx' keeps a count of how many + * times we've been called so that we can return the appropriate + * function pointer for a given cpu. It would seem that there are some + * very dangerous race conditions here: + * + * 1. Can't this function be called concurrently on multiple CPUs? + * No, it cannot, because we are called by kpcimgr_get_entry(), + * which protects against this with a spinlock. + * + * 2. holding_pen_idx is reset to zero in kpcimgr_cpu_holding_pen(), + * and can't that execute on CPU1 while this function executes + * concurrently on CPU2? + * Good question! The answer is yes, they can execute + * simultaneously, but it is not a race because they will operate + * on different memory. When this function is called, it is in + * virtual mode, with the code and data in normal module_alloc'ed + * memory. But when kpcimgr_cpu_holding_pen() executes, it is + * running in physical mode from a copy of the code and data that + * has been relocated to persistent memory. Thus, references to + * 'holding_pen_idx' in these two functions refer to different + * memory locations. + */ + +unsigned long kpcimgr_get_holding_pen(unsigned long old_entry, unsigned int cpu) +{ + kstate_t *ks = get_kstate(); + unsigned long offset, entry; + extern void __kpcimgr_cpu_holding_pen(void); + extern void __kpcimgr_serial_thread(void); + + if (ks == NULL || ks->valid != KSTATE_MAGIC || !ks->running || !ks->have_persistent_mem) + return old_entry; + + if (cpu == 0) + return old_entry; + + switch (holding_pen_idx) { + case 0: + offset = (unsigned long) __kpcimgr_cpu_holding_pen - (unsigned long) ks->code_base; + break; + case 1: + offset = (unsigned long) __kpcimgr_serial_thread - (unsigned long) ks->code_base; + break; + default: + return old_entry; + } + holding_pen_idx++; + + entry = ks->shmembase + KSTATE_CODE_OFFSET + offset; + kpr_err("%s(cpu%d) entry = %lx\n", __func__, cpu, entry); + return entry; +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpci_test.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpci_test.c new file mode 100644 index 0000000000..75a582ca8e --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpci_test.c @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021, 2022, Oracle and/or its affiliates. + */ + +/* + * Kernel PCIE Manager - test/serial/debug code + * + * Author: rob.gardner@oracle.com + */ + +#include "kpcimgr_api.h" +#include "pciesvc.h" +#include "pciesvc_system.h" + +#define TICKS_PER_US 200 +#define TICKS_PER_MS (1000*TICKS_PER_US) +#define TICKS_PER_SEC (1000*TICKS_PER_MS) + +/* + * kp_udelay + * + * Like kernel udelay(), but avoids an external call. + */ +void kp_udelay(unsigned long us) +{ + unsigned long last = read_sysreg(cntvct_el0); + unsigned long now, elapsed = 0; + unsigned long limit = us * TICKS_PER_US; + + while (elapsed < limit) { + now = read_sysreg(cntvct_el0); + if (now > last) + elapsed += now - last; + last = now; + } +} + +int time_elapsed(unsigned long start, unsigned long elapsed) +{ + unsigned long now = read_sysreg(cntvct_el0); + + if (now > start + elapsed) + return 1; + + if (now < start && now > elapsed) /* good enough */ + return 1; + + return 0; +} + +/* + * Very simple global spin lock: + * Not very well throught out or tested since it is + * not used for any important purpose. It is only + * used by the serial puts() function. + */ +unsigned long lock_table[16]; + +void kp_lock(void) +{ + int i, cpu = cpuid(); + unsigned long sum; + + while (1) { + lock_table[cpu] = 1; + __asm__ __volatile__("dsb sy;" ::); + + for (sum=0, i=0; i<16; i++) + sum += lock_table[i]; + + if (sum == 1) /* acquired lock */ + return; + + lock_table[cpu] = 0; + __asm__ __volatile__("dsb sy;" ::); + kp_udelay(1000+cpu*1000); /* a few ms */ + } +} + +void kp_unlock(void) +{ + lock_table[cpuid()] = 0; + __asm__ __volatile__("dsb sy;" ::); +} + +/* + * Mini serial output driver + * + * We want to avoid a potential infinite loop if something + * goes wrong with the uart, so let's wait no more than 1ms + * for the transmitter shift register to become empty. The + * baud rate is 115200, so theoretically, the shift register + * should never take longer than 100us to become empty. + */ +void _uart_write(unsigned char *reg, char c) +{ + int i; + + for (i=0; i<10; i++) { + if (*(reg + UART_LSR) & OK_TO_WRITE) + break; + kp_udelay(100); + } + *(reg + UART_THR) = c; + for (i=0; i<10; i++) { + if (*(reg + UART_LSR) & OK_TO_WRITE) + break; + kp_udelay(100); + } +} + +void uart_write(kstate_t *ks, char c) +{ + _uart_write(ks->uart_addr, c); +} + +int uart_read(kstate_t *ks, char *c) +{ + volatile unsigned char *reg = ks->uart_addr; + if (*(reg + UART_LSR) & DATA_READY) { + *c = *(reg + UART_THR); + return 1; + } + return 0; +} + +void uart_write_debug(kstate_t *ks, char c) +{ + if (ks->debug) + _uart_write(ks->uart_addr, c); +} + +void kdbg_puts(const char *s) +{ + kstate_t *ks = get_kstate(); + + if (ks->uart_addr == NULL) + return; + + kp_lock(); + for ( ; *s; s++) { + uart_write(ks, *s); + if (*s == '\n') + uart_write(ks, '\r'); + } + kp_unlock(); +} + +/* + * For testing, this causes an SERR to be generated + */ +void trigger_serr(int val) +{ + const uint64_t good_bad_pa = 0x20141000; + uint32_t dummy; + + kdbg_puts("kpcimgr: triggering serr\n"); + if (val == 0x100) + dummy = pciesvc_reg_rd32(good_bad_pa); + else + pciesvc_pciepreg_rd32(good_bad_pa, &dummy); +} + +void kpcimgr_report_stats(kstate_t *ks, int phase, int always, int rightnow) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + unsigned long now = read_sysreg(cntvct_el0); + uint64_t cfgrd, cfgwr, memrd, memwr; + static unsigned long last_call = 0; + pciemgr_stats_t *s; + pciehw_port_t *p; + + if (!always && (now - last_call) < 5 * TICKS_PER_SEC) + return; + + p = &pshmem->port[0]; + s = &p->stats; + cfgrd = s->ind_cfgrd - ks->ind_cfgrd; + cfgwr = s->ind_cfgwr - ks->ind_cfgwr; + memrd = s->ind_memrd - ks->ind_memrd; + memwr = s->ind_memwr - ks->ind_memwr; + + if (!always && (cfgrd + cfgwr + memrd + memwr) == 0) + return; + + if (rightnow || ks->debug) { + kpr_err("KPCIMGR: called %d times during %s phase: %lld cfgrd, %lld cfgwr, %lld memrd, %lld memwr\n", + ks->ncalls, (phase == NOMMU) ? "nommu" : "normal", + cfgrd, cfgwr, memrd, memwr); + kpr_err(" %d ind_intr, %d not_intr, %d event_intr\n", ks->ind_intr, ks->not_intr, ks->event_intr); + } + + ks->ind_cfgrd = s->ind_cfgrd; + ks->ind_cfgwr = s->ind_cfgwr; + ks->ind_memrd = s->ind_memrd; + ks->ind_memwr = s->ind_memwr; + + last_call = read_sysreg(cntvct_el0); +} + diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpcimgr_api.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpcimgr_api.h new file mode 100644 index 0000000000..8bfcda1cca --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpcimgr_api.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021, 2022, Oracle and/or its affiliates. + */ +#ifndef __KPCIMGR_API_H__ +#define __KPCIMGR_API_H__ + +#ifdef __KERNEL__ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif + +#include "kpci_constants.h" + +#define K_ENTRY_INIT_INTR 0 +#define K_ENTRY_INIT_POLL 1 +#define K_ENTRY_SHUT 2 +#define K_ENTRY_POLL 3 +#define K_ENTRY_HOLDING_PEN 4 +#define K_ENTRY_INDIRECT_INTR 5 +#define K_ENTRY_NOTIFY_INTR 6 +#define K_ENTRY_INIT_FN 7 +#define K_ENTRY_CMD_READ 8 +#define K_ENTRY_CMD_WRITE 9 +#define K_ENTRY_GET_VERSION 10 +#define K_NUM_ENTRIES 16 + +struct kpcimgr_entry_points_t { + int expected_mgr_version; + int lib_version_major; + int lib_version_minor; + void *code_end; + void *entry_point[K_NUM_ENTRIES]; +}; + +/* upcalls */ +#define WAKE_UP_EVENT_QUEUE 1 +#define PRINT_LOG_MSG 2 +#define PREG_READ 3 + +/* event queue sizing */ +#define EVENT_QUEUE_LENGTH 1024 +#define EVENT_SIZE 128 + +/* max command size for sysfs cmd node */ +#define CMD_SIZE 4096 + +/* max number of memory ranges from device tree */ +#define NUM_MEMRANGES 32 + +struct kpcimgr_state_t { + /* essential state */ + int valid; + int debug; + int running; + int active_port; + int have_persistent_mem; + int lib_version_major; + int lib_version_minor; + + /* timestamps and general trace data */ + long kexec_time; + long driver_start_time; + unsigned long trace_data[NUM_PHASES][DATA_SIZE]; + + /* virtual addresses */ + void *uart_addr; + void *code_base; + void *persistent_base; + void *upcall; + void *pfdev; + void *shmemva; + + unsigned long shmembase, shmem_size, code_size; + struct mem_range_t { + unsigned long base, end; + void *vaddr; + } mem_ranges[NUM_MEMRANGES]; + int nranges; + int hwmem_idx; + + /* interrupt vectors */ + struct msi_info { + unsigned long msgaddr; + unsigned int msgdata; + } msi[MSI_NVECTORS]; + + /* stats for work done */ + int ind_cfgrd, ind_cfgwr; + int ind_memrd, ind_memwr; + int ncalls; + int ind_intr, not_intr, event_intr; + + int unused1[7]; /* was version=2 code_offsets[], keep evq* compat */ + + /* Event queue handling */ + int evq_head, evq_tail; + char evq[EVENT_QUEUE_LENGTH][EVENT_SIZE]; + + /* debugging */ + void *mod; + int msg_idx; + int cfgval; + + /* offsets into relocated library code */ + int code_offsets[K_NUM_ENTRIES]; +}; + +typedef struct kpcimgr_state_t kstate_t; +_Static_assert(sizeof(kstate_t) < SHMEM_KSTATE_SIZE, + "kstate size insufficient"); + +/* trace_data[] elements */ +#define FIRST_CALL_TIME 0 +#define FIRST_SEQNUM 1 +#define LAST_SEQNUM 2 +#define TAG 3 +#define PA_BAD_CNT 4 +#define NUM_CHECKS 5 +#define NUM_CALLS 6 +#define NUM_PENDINGS 7 +#define LAST_CALL_TIME 8 +#define EARLY_POLL 9 +#define MAX_DATA 10 + +#define KPCIMGR_DEV "/dev/kpcimgr" +#define KPCIMGR_NAME "kpcimgr" +#define PFX KPCIMGR_NAME ": " +#define KPCIMGR_KERNEL_VERSION 3 + +#ifdef __KERNEL__ +int kpcimgr_module_register(struct module *mod, + struct kpcimgr_entry_points_t *ep, int relocate); +void kpcimgr_start_running(void); +void kpcimgr_stop_running(void); +void kpcimgr_sysfs_setup(struct platform_device *pfdev); +void *kpci_memcpy(void *dst, const void *src, size_t n); +void wake_up_event_queue(void); +int aarch64_insn_read(void *addr, u32 *insnp); +extern spinlock_t kpcimgr_lock; + +#define reset_stats(k) \ + kpci_memset((void *)&(k)->trace_data[0][0], 0, sizeof((k)->trace_data)) + +static inline void set_init_state(kstate_t *k) +{ + k->trace_data[NORMAL][FIRST_CALL_TIME] = 0; + k->ncalls = 0; +} + +static inline kstate_t *get_kstate(void) +{ + extern kstate_t *kstate; + return kstate; +} +#endif + +#endif diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpcimgr_module.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpcimgr_module.c new file mode 100644 index 0000000000..ee7bc49b08 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/kpcimgr_module.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021, 2022, Oracle and/or its affiliates. + */ + +/* + * PCIESVC Library Loader + * + * Author: rob.gardner@oracle.com + */ + +#include +#include +#include +#include + +MODULE_LICENSE("GPL"); + +#include "kpcimgr_api.h" +#include "version.h" + +static int relocate = 0; +#ifdef DEBUG_KPCIMGR +module_param(relocate, int, 0600); +MODULE_PARM_DESC(relocate, "specifies whether or not to relocate module"); +#endif + +extern char pciesvc_end; +extern void kpcimgr_init_intr(void *); +extern void kpcimgr_init_fn(void *); +extern void kpcimgr_version_fn(char **); +extern void kpcimgr_init_poll(void *); +extern void pciesvc_shut(int); +extern void kpcimgr_poll(kstate_t *, int, int); +extern unsigned long kpcimgr_get_holding_pen(unsigned long, unsigned int); +extern int kpcimgr_ind_intr(void *, int); +extern int kpcimgr_not_intr(void *, int); +extern void kpcimgr_undefined_entry(void); +extern int pciesvc_sysfs_cmd_read(void *, char *, int *); +extern int pciesvc_sysfs_cmd_write(void *, char *, size_t, int *); + +extern int pciesvc_version_major; +extern int pciesvc_version_minor; + +static int __init pciesvc_dev_init(void) +{ + struct kpcimgr_entry_points_t ep; + int i, ret = 0; + + /* initialize entry_points struct via executable code so that + * PC relative relocations are generated */ + ep.expected_mgr_version = 3; + ep.lib_version_major = pciesvc_version_major; + ep.lib_version_minor = pciesvc_version_minor; + ep.code_end = &pciesvc_end; + + for (i=0; iactive_port; + + msi = &ks->msi[MSI_INDIRECT_IDX]; + p.params_v0.ind_intr = 1; + p.params_v0.ind_msgaddr = msi->msgaddr; + p.params_v0.ind_msgdata = msi->msgdata; + + msi = &ks->msi[MSI_NOTIFY_IDX]; + p.params_v0.not_intr = 1; + p.params_v0.not_msgaddr = msi->msgaddr; + p.params_v0.not_msgdata = msi->msgdata; + + if (pciesvc_init(&p)) + kpr_err("%s: pciesvc_init failed\n", __func__); + + /* clear out any pending transactions */ + kpcimgr_poll(ks, 0, NORMAL); +} + +/* + * Initialize pciesvc for polling based operation + */ +void kpcimgr_init_poll(kstate_t *ks) +{ + pciesvc_params_t p; + + set_kstate(ks); + memset(&p, 0, sizeof(pciesvc_params_t)); + + p.version = 0; + p.params_v0.port = ks->active_port; + + p.params_v0.ind_poll = 1; + p.params_v0.not_poll = 1; + + pciesvc_init(&p); +} + +/* + * Main poll function + * + * Essentially a wrapper for pciesvc_poll() that + * updates statistics, does some error checking, + * and outputs some debugging information. + */ +void kpcimgr_poll(kstate_t *ks, int index, int phase) +{ + int i, result; + long ts = read_sysreg(cntvct_el0); + + set_kstate(ks); + ks->ncalls++; + + if (ks->trace_data[phase][FIRST_CALL_TIME] == 0) { + uart_write_debug(ks, 'F'); + ks->trace_data[phase][FIRST_CALL_TIME] = ts; + + if (phase == NOMMU) + kpcimgr_report_stats(ks, NORMAL, 1, 0); + else + kpcimgr_report_stats(ks, NOMMU, 1, 0); + } + + ks->trace_data[phase][NUM_CALLS]++; + + if (phase == NOMMU) + uart_write_debug(ks, 'M'); + + if (ks->valid != KSTATE_MAGIC) { + uart_write_debug(ks, 'V'); + return; + } + + if (!ks->running) { + uart_write_debug(ks, 'P'); + return; + } + + ks->trace_data[phase][LAST_CALL_TIME] = ts; + ks->trace_data[phase][NUM_CHECKS]++; + + if (ks->debug & 0x300) { + trigger_serr(ks->debug & 0x300); + ks->debug &= ~0x300; + } + + for (i=0; i<10; i++) { + + result = pciesvc_poll(0); + /* + * return value: + * 1: valid pending and handled + * 0: nothing pending + */ + + if (result == 0) + break; + if (result == -1) { + uart_write_debug(ks, '?'); + break; + } + + uart_write_debug(ks, 'h'); + + ks->trace_data[phase][NUM_PENDINGS]++; + } + kpcimgr_report_stats(ks, phase, 0, 0); +} + +/* + * ISR for Indirect Interrupt + */ +int kpcimgr_ind_intr(kstate_t *ks, int port) +{ + int ret; + + set_kstate(ks); + ret = pciesvc_indirect_intr(port); + if (ks->debug & 0x300) { + trigger_serr(ks->debug & 0x300); + ks->debug &= ~0x300; + } + + return ret; +} + +/* + * ISR for Notify Interrupt + */ +int kpcimgr_not_intr(kstate_t *ks, int port) +{ + set_kstate(ks); + return pciesvc_notify_intr(port); +} + +/* + * Return a VA from one of our known ranges + * + * If we're running with the MMU turned off, then just return the + * physical address. + * + */ +void *kpcimgr_va_get(unsigned long pa, unsigned long sz) +{ + kstate_t *ks = get_kstate(); + int i; + + if (!virtual()) + return (void *) pa; + + for (i=0; inranges; i++) { + struct mem_range_t *mr = &ks->mem_ranges[i]; + if (pa >= mr->base && pa < mr->end) + return mr->vaddr + (pa - mr->base); + } + + kpr_err("%s: bad pa 0x%lx\n", __func__, pa); + pciesvc_assert(0); + return NULL; +} + +/* + * Reverse translation: return a physical address + * corresponding to some virtual address. + */ +u64 pciesvc_vtop(const void *hwmemva) +{ + kstate_t *ks = get_kstate(); + u64 hwptr = (u64) hwmemva; + int i; + + for (i=0; inranges; i++) { + struct mem_range_t *mr = &ks->mem_ranges[i]; + u64 size; + + /* was a physical address passed in to us? */ + if (hwptr >= mr->base && hwptr < mr->end) + return hwptr; + size = mr->end - mr->base; + if (hwmemva >= mr->vaddr && + hwmemva < mr->vaddr + size) + return mr->base + (hwmemva - mr->vaddr); + } + return 0; +} + +/* + * Up calls from pciesvc + */ +uint32_t +pciesvc_reg_rd32(const uint64_t pa) +{ + u_int32_t val, *va = kpcimgr_va_get(pa, 4); + + pciesvc_assert((pa & 0x3) == 0); + val = readl(va); + __asm__ __volatile__("isb; dsb sy;" ::); + + return val; +} + +static inline void +pciesvc_reg_rd32w(const uint64_t pa, uint32_t *w, const uint32_t nw) +{ + int i; + + for (i = 0; i < nw; i++) { + w[i] = pciesvc_reg_rd32(pa + (i * 4)); + } +} + +void +pciesvc_pciepreg_rd32(const uint64_t pa, uint32_t *dest) +{ + u_int32_t val, (*upcall)(int req, unsigned long pa); + kstate_t *ks = get_kstate(); + + pciesvc_assert((pa & 0x3) == 0); + upcall = ks->upcall; + if (upcall && virtual()) + val = upcall(PREG_READ, pa); + else + val = pciesvc_reg_rd32(pa); + + *dest = val; +} + +void +pciesvc_reg_wr32(const uint64_t pa, const uint32_t val) +{ + u_int32_t *va = kpcimgr_va_get(pa, 4); + + pciesvc_assert((pa & 0x3) == 0); + writel(val, va); +} + +static inline void +pciesvc_reg_wr32w(const uint64_t pa, const uint32_t *w, const uint32_t nw) +{ + int i; + + for (i = 0; i < nw; i++) { + pciesvc_reg_wr32(pa + (i * 4), w[i]); + } +} + +/* + * Similar calls implemented in terms of rd32/wr32. + */ +typedef union { + u_int32_t l; + u_int16_t h[2]; + u_int8_t b[4]; +} iodata_t; + +int +pciesvc_mem_rd(const uint64_t pa, void *buf, const size_t sz) +{ + uint64_t pa_aligned; + uint8_t idx; + iodata_t v; + + switch (sz) { + case 1: + pa_aligned = pa & ~0x3; + idx = pa & 0x3; + v.l = pciesvc_reg_rd32(pa_aligned); + *(uint8_t *)buf = v.b[idx]; + break; + case 2: + pa_aligned = pa & ~0x3; + idx = (pa & 0x3) >> 1; + v.l = pciesvc_reg_rd32(pa_aligned); + *(uint16_t *)buf = v.h[idx]; + break; + case 4: + case 8: + pciesvc_reg_rd32w(pa, (uint32_t *)buf, sz >> 2); + break; + default: + return -1; + } + return 0; +} + +void +pciesvc_mem_wr(const uint64_t pa, const void *buf, const size_t sz) +{ + uint64_t pa_aligned; + uint8_t idx; + iodata_t v; + + switch (sz) { + case 1: + pa_aligned = pa & ~0x3; + idx = pa & 0x3; + v.l = pciesvc_reg_rd32(pa_aligned); + v.b[idx] = *(uint8_t *)buf; + pciesvc_reg_wr32(pa_aligned, v.l); + break; + case 2: + pa_aligned = pa & ~0x3; + idx = (pa & 0x3) >> 1; + v.l = pciesvc_reg_rd32(pa_aligned); + v.h[idx] = *(uint16_t *)buf; + pciesvc_reg_wr32(pa_aligned, v.l); + break; + case 4: + case 8: + pciesvc_reg_wr32w(pa, (uint32_t *)buf, sz >> 2); + break; + default: + break; + } +} + +void +pciesvc_mem_barrier(void) +{ + mb(); +} + +/* + * We need our own memset/memcpy functions because we + * cannot call any kernel functions. And even if we could, + * we need to avoid cache operations since "non-linux" memory + * is non-cached. + */ +void * +pciesvc_memset(void *s, int c, size_t n) +{ + if (((uintptr_t)s & 0x3) == 0 && (n & 0x3) == 0) { + volatile u_int32_t *p; + int i; + + c &= 0xff; + c = ((c << 0) | + (c << 8) | + (c << 16) | + (c << 24)); + for (p = s, i = 0; i < n >> 2; i++, p++) { + *p = c; + } + } else { + volatile u_int8_t *p; + int i; + + for (p = s, i = 0; i < n; i++, p++) { + *p = c; + } + } + + return s; +} + +void * +pciesvc_memcpy(void *dst, const void *src, size_t n) +{ + volatile u_int8_t *d = dst; + const u_int8_t *s = src; + int i; + + for (i = 0; i < n; i++) { + *d++ = *s++; + } + return dst; +} + +void * +pciesvc_memcpy_toio(void *dsthw, const void *src, size_t n) +{ + return pciesvc_memcpy(dsthw, src, n); +} + +void * +pciesvc_shmem_get(void) +{ + kstate_t *ks = get_kstate(); + + if (virtual()) + return ks->shmemva; + else + return (void *) ks->shmembase; +} + +void *pciesvc_hwmem_get(void) +{ + kstate_t *ks = get_kstate(); + + if (virtual()) + return ks->mem_ranges[ks->hwmem_idx].vaddr; + else + return (void *) ks->mem_ranges[ks->hwmem_idx].base; +} + +void +pciesvc_log(const char *msg) +{ + kstate_t *ks = get_kstate(); + u64 (*upcall)(int req, char *msg); + + upcall = ks->upcall; + if (upcall && virtual()) + upcall(PRINT_LOG_MSG, (char *)msg); + else + kdbg_puts((char *)msg); +} + +void wakeup_event_queue(void) +{ + kstate_t *ks = get_kstate(); + u64 (*upcall)(int req); + + upcall = ks->upcall; + if (upcall && virtual()) + upcall(WAKE_UP_EVENT_QUEUE); +} + +/* + * Event Queue Handler + * + * Event queue semantics: + * evq_head = index of slot used for next insertion + * evq_tail = index of slot used for next removal + * queue is empty when head == tail + * queue is full when (head + 1) % queue_size == tail + * queue is nearly full when (head + 2) % queue_size == tail + * + * Only head is modified here, and the read() function only + * modifies tail, so theoretically no race can exist. It is + * possible for the reader to see an empty queue momentarily + * or the handler to see a full queue momentarily, but these + * situations do not justify adding locks. + */ +int pciesvc_event_handler(pciesvc_eventdata_t *evdata, const size_t evsize) +{ + kstate_t *ks = get_kstate(); + int ret = 0; + static int was_full = 0; + + if (evsize != sizeof(pciesvc_eventdata_t)) { + kpr_err("%s: evsize != sizeof(pciesvc_eventdata_t))\n", __func__); + return -1; + } + + if ((ks->evq_head + 1) % EVENT_QUEUE_LENGTH == ks->evq_tail) { + if (!was_full) + pciesvc_log(KERN_INFO "pciesvc_event_handler: event queue full\n"); + was_full = 1; + return -1; + } + was_full = 0; + + if ((ks->evq_head + 2) % EVENT_QUEUE_LENGTH == ks->evq_tail) { + pciesvc_log(KERN_INFO "pciesvc_event_handler: event queue almost full\n"); + evdata->evtype = PCIESVC_EV_QFULL; + ret = -1; + } + + pciesvc_memcpy_toio((void *)ks->evq[ks->evq_head], evdata, sizeof(pciesvc_eventdata_t)); + + ks->evq_head = (ks->evq_head + 1) % EVENT_QUEUE_LENGTH; + wakeup_event_queue(); + return ret; +} + +void pciesvc_debug_cmd(uint32_t *cmd) +{ + kstate_t *ks = get_kstate(); + uint32_t delayus; + + switch (*cmd) { + case 0x17: + *cmd = virtual(); + return; + case 0x19: + *cmd = ks->cfgval; + return; + case 0x100: + case 0x200: + ks->debug |= *cmd; + return; + default: + delayus = *cmd; + if (delayus) { + pciesvc_usleep(delayus); + } + break; + } +} + +/* + * cmd read/write + */ +int pciesvc_sysfs_cmd_read(kstate_t *ks, char *buf, loff_t off, size_t count, int *exists) +{ + int ret; + + if (exists) + *exists = 1; + + ret = pciesvc_cmd_read(buf, off, count); + return ret < 0 ? -EINVAL : ret; +} + +int pciesvc_sysfs_cmd_write(kstate_t *ks, char *buf, loff_t off, size_t count, int *exists) +{ + int ret; + + if (exists) + *exists = 1; + + ret = pciesvc_cmd_write(buf, off, count); + return ret < 0 ? -EINVAL : ret; +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/indirect_entry.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/indirect_entry.h new file mode 100644 index 0000000000..42e35a00f6 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/indirect_entry.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, Pensando Systems Inc. + */ + +#ifndef __INDIRECT_ENTRY_H__ +#define __INDIRECT_ENTRY_H__ + +#include "tlpauxinfo.h" + +typedef enum { +#define PCIEIND_REASON_DEF(NAME, VAL) \ + PCIEIND_REASON_##NAME = VAL, +#include "indirect_reason.h" + PCIEIND_REASON_MAX +} pcieind_reason_t; + +/* + * Completion Status field values + * PCIe 4.0, Table 2-34. + */ +typedef enum { + PCIECPL_SC = 0x0, /* Successful Completion */ + PCIECPL_UR = 0x1, /* Unsupported Request */ + PCIECPL_CRS = 0x2, /* Config Retry Status */ + PCIECPL_CA = 0x4, /* Completer Abort */ +} pciecpl_t; + +#define INDIRECT_TLPSZ 64 + +typedef struct indirect_entry_s { + u_int32_t port; + pciecpl_t cpl; /* PCIECPL_* completion type */ + u_int32_t completed:1; /* completion has been delivered */ + u_int32_t data[4]; + u_int8_t rtlp[INDIRECT_TLPSZ]; + tlpauxinfo_t info; +} indirect_entry_t; + +#endif /* __INDIRECT_ENTRY_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/indirect_reason.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/indirect_reason.h new file mode 100644 index 0000000000..b88abb7927 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/indirect_reason.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, Pensando Systems Inc. + */ + +#ifndef PCIEIND_REASON_DEF +#define PCIEIND_REASON_DEF(NAME, VAL) +#endif + +PCIEIND_REASON_DEF(RSRV0, 0) +PCIEIND_REASON_DEF(RSRV1, 1) +PCIEIND_REASON_DEF(MSG, 2) +PCIEIND_REASON_DEF(UNSUPPORTED, 3) +PCIEIND_REASON_DEF(PMV, 4) +PCIEIND_REASON_DEF(DBPMV, 5) +PCIEIND_REASON_DEF(ATOMIC, 6) +PCIEIND_REASON_DEF(PMTMISS, 7) +PCIEIND_REASON_DEF(PMRMISS, 8) +PCIEIND_REASON_DEF(PRTMISS, 9) +PCIEIND_REASON_DEF(DBF2VFIDMISS, 10) +PCIEIND_REASON_DEF(PRTOOR, 11) +PCIEIND_REASON_DEF(VFIDOOR, 12) +PCIEIND_REASON_DEF(BDFOOR, 13) +PCIEIND_REASON_DEF(PMRIND, 14) +PCIEIND_REASON_DEF(PRTIND, 15) +PCIEIND_REASON_DEF(PMRECC, 16) +PCIEIND_REASON_DEF(PRTECC, 17) + +#undef PCIEIND_REASON_DEF diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/notify_entry.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/notify_entry.h new file mode 100644 index 0000000000..91eb2767e4 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/notify_entry.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018,2021, Pensando Systems Inc. + */ + +#ifndef __NOTIFY_ENTRY_H__ +#define __NOTIFY_ENTRY_H__ + +#include "tlpauxinfo.h" + +#define NOTIFY_TLPSZ 48 + +typedef struct notify_entry_s { + uint8_t rtlp[NOTIFY_TLPSZ]; + tlpauxinfo_t info; +} notify_entry_t; + +#endif /* __NOTIFY_ENTRY_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciehdevice_types.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciehdevice_types.h new file mode 100644 index 0000000000..4ecb7a1a0c --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciehdevice_types.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Pensando Systems Inc. + */ + +#ifndef __PCIEHDEVICE_TYPES_H__ +#define __PCIEHDEVICE_TYPES_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +typedef enum pciehdevice_type_e { + PCIEHDEVICE_NONE, + PCIEHDEVICE_ETH, + PCIEHDEVICE_MGMTETH, + PCIEHDEVICE_ACCEL, + PCIEHDEVICE_NVME, + PCIEHDEVICE_VIRTIO, + PCIEHDEVICE_PCIESTRESS, + PCIEHDEVICE_DEBUG, + PCIEHDEVICE_RCDEV, + PCIEHDEVICE_CRYPT, + PCIEHDEVICE_UPT, + PCIEHDEVICE_SERIAL, + PCIEHDEVICE_CORE, +} pciehdevice_type_t; + +#define PCIEHDEVICE_OVERRIDE_INTRGROUPS 8 + +#ifdef __cplusplus +} +#endif + +#endif /* __PCIEHDEVICE_TYPES_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciehw.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciehw.h new file mode 100644 index 0000000000..443a128b04 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciehw.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2018,2021, Pensando Systems Inc. + */ + +#ifndef __PCIESVC_PCIEHW_H__ +#define __PCIESVC_PCIEHW_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +#define PCIEHW_NPORTS 8 +#define PCIEHW_NDEVS 1024 +#define PCIEHW_CFGSHIFT 11 +#define PCIEHW_CFGSZ (1 << PCIEHW_CFGSHIFT) +#define PCIEHW_NROMSK 128 +#define PCIEHW_NPMT PMT_COUNT +#define PCIEHW_NPRT PRT_COUNT +#define PCIEHW_NBAR 6 /* 6 cfgspace BARs */ + +#ifdef __cplusplus +} +#endif + +#endif /* __PCIESVC_PCIEHW_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciehwmem.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciehwmem.h new file mode 100644 index 0000000000..fc8335a909 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciehwmem.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021, Pensando Systems Inc. + */ + +#ifndef __PCIEHWMEM_H__ +#define __PCIEHWMEM_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +#include "pciehw.h" + +#define PCIEHW_NOTIFYSZ (1 * 1024 * 1024) + +typedef struct pciehw_mem_s { + u_int8_t notify_area[PCIEHW_NPORTS][PCIEHW_NOTIFYSZ] + __attribute__((aligned(PCIEHW_NOTIFYSZ))); + /* page of zeros to back cfgspace */ + u_int8_t zeros[4096] __attribute__((aligned(4096))); + u_int8_t cfgcur[PCIEHW_NDEVS][PCIEHW_CFGSZ] __attribute__((aligned(4096))); + u_int32_t notify_intr_dest[PCIEHW_NPORTS]; /* notify intr dest */ + u_int32_t indirect_intr_dest[PCIEHW_NPORTS]; /* indirect intr dest */ + u_int32_t magic; /* PCIEHW_MAGIC when initialized */ + u_int32_t version; /* PCIEHW_VERSION when initialized */ +} pciehw_mem_t; + +#ifdef __cplusplus +} +#endif + +#endif /* __PCIEHWMEM_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciemgr_stats.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciemgr_stats.h new file mode 100644 index 0000000000..980fa9bad7 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciemgr_stats.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019, Pensando Systems Inc. + */ + +#ifndef __PCIEMGR_STATS_H__ +#define __PCIEMGR_STATS_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +typedef union pciemgr_stats { + struct { + +#define PCIEMGR_STATS_DEF(S) \ + uint64_t S; +#include "pciemgr_stats_defs.h" + + }; + /* pad to 64 entries, room to grow */ + uint64_t _pad[64]; + +} pciemgr_stats_t; + +#ifdef __cplusplus +} +#endif + +#endif /* __PCIEMGR_STATS_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciemgr_stats_defs.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciemgr_stats_defs.h new file mode 100644 index 0000000000..d690ecd133 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciemgr_stats_defs.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019, Pensando Systems Inc. + */ + +#ifndef PCIEMGR_STATS_DEF +#define PCIEMGR_STATS_DEF(st) +#endif + +PCIEMGR_STATS_DEF(not_intr) +PCIEMGR_STATS_DEF(not_spurious) +PCIEMGR_STATS_DEF(not_polled) +PCIEMGR_STATS_DEF(not_cnt) +PCIEMGR_STATS_DEF(not_max) +PCIEMGR_STATS_DEF(not_cfgrd) +PCIEMGR_STATS_DEF(not_cfgwr) +PCIEMGR_STATS_DEF(not_memrd) +PCIEMGR_STATS_DEF(not_memwr) +PCIEMGR_STATS_DEF(not_iord) +PCIEMGR_STATS_DEF(not_iowr) +PCIEMGR_STATS_DEF(not_unknown) + +#define notify_reason_stats not_rsrv0 +PCIEMGR_STATS_DEF(not_rsrv0) +PCIEMGR_STATS_DEF(not_rsrv1) +PCIEMGR_STATS_DEF(not_msg) +PCIEMGR_STATS_DEF(not_unsupported) +PCIEMGR_STATS_DEF(not_pmv) +PCIEMGR_STATS_DEF(not_dbpmv) +PCIEMGR_STATS_DEF(not_atomic) +PCIEMGR_STATS_DEF(not_pmtmiss) +PCIEMGR_STATS_DEF(not_pmrmiss) +PCIEMGR_STATS_DEF(not_prtmiss) +PCIEMGR_STATS_DEF(not_bdf2vfidmiss) +PCIEMGR_STATS_DEF(not_prtoor) +PCIEMGR_STATS_DEF(not_vfidoor) +PCIEMGR_STATS_DEF(not_bdfoor) +PCIEMGR_STATS_DEF(not_pmrind) +PCIEMGR_STATS_DEF(not_prtind) +PCIEMGR_STATS_DEF(not_pmrecc) +PCIEMGR_STATS_DEF(not_prtecc) + +PCIEMGR_STATS_DEF(ind_intr) +PCIEMGR_STATS_DEF(ind_spurious) +PCIEMGR_STATS_DEF(ind_polled) +PCIEMGR_STATS_DEF(ind_cfgrd) +PCIEMGR_STATS_DEF(ind_cfgwr) +PCIEMGR_STATS_DEF(ind_memrd) +PCIEMGR_STATS_DEF(ind_memwr) +PCIEMGR_STATS_DEF(ind_iord) +PCIEMGR_STATS_DEF(ind_iowr) +PCIEMGR_STATS_DEF(ind_unknown) + +PCIEMGR_STATS_DEF(healthlog) + +#undef PCIEMGR_STATS_DEF diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pcieshmem.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pcieshmem.h new file mode 100644 index 0000000000..a7974dbc7a --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pcieshmem.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2018,2021-2022, Pensando Systems Inc. + */ + +#ifndef __PCIESHMEM_H__ +#define __PCIESHMEM_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +#include "pciehdevice_types.h" +#include "pciehw.h" +#include "pciemgr_stats.h" +#include "pmt.h" +#include "prt.h" + +enum pciehw_cfghnd_e { + PCIEHW_CFGHND_NONE, + PCIEHW_CFGHND_CMD, + PCIEHW_CFGHND_DEV_BARS, + PCIEHW_CFGHND_ROM_BAR, + PCIEHW_CFGHND_BRIDGECTL, + PCIEHW_CFGHND_MSIX, + PCIEHW_CFGHND_VPD, + PCIEHW_CFGHND_PCIE_DEVCTL, + PCIEHW_CFGHND_SRIOV_CTRL, + PCIEHW_CFGHND_SRIOV_BARS, + PCIEHW_CFGHND_DBG_DELAY, + PCIEHW_CFGHND_BRIDGE_BUS, +}; +typedef enum pciehw_cfghnd_e pciehw_cfghnd_t; + +typedef enum pciehwbartype_e { + PCIEHWBARTYPE_NONE, /* invalid bar type */ + PCIEHWBARTYPE_MEM, /* 32-bit memory bar */ + PCIEHWBARTYPE_MEM64, /* 64-bit memory bar */ + PCIEHWBARTYPE_IO, /* 32-bit I/O bar */ +} pciehwbartype_t; + +typedef enum pciehw_barhnd_e { + PCIEHW_BARHND_NONE, + PCIEHW_BARHND_SERIAL, + PCIEHW_BARHND_VIRTIO, +} pciehw_barhnd_t; + +typedef union pciehwbar_u { + struct { + u_int64_t size; /* total size of this bar */ + u_int32_t valid:1; /* valid bar for this dev */ + u_int32_t loaded:1; /* pmts loaded */ + u_int32_t ovrds:1; /* override pmts chained on ovrd */ + pciehwbartype_t type; /* PCIEHWBARTYPE_* */ + u_int8_t cfgidx; /* config bars index (0-5) */ + u_int8_t hnd; /* indirect/notify handling */ + u_int16_t bdf; /* host bdf of bar owner */ + u_int32_t pmtb; /* pmt base for bar */ + u_int32_t pmtc; /* pmt count for bar */ + u_int16_t ovrd; /* override pmts */ + u_int16_t _unused; + u_int64_t addr; /* addr of this bar */ + }; + u_int8_t _pad[64]; +} pciehwbar_t; + +typedef u_int32_t pciehwdevh_t; + +#define PCIEHW_ROMSKSZ (PCIEHW_CFGSZ / sizeof (u_int32_t)) +#define PCIEHW_CFGHNDSZ (PCIEHW_CFGSZ / sizeof (u_int32_t)) + +#define NOVRDINTR 8 + +/* + * If PCIEHDEVICE_OVERRIDE_INTRGROUPS increases we'll have + * to grow the shared memory region with special handling. + */ +#if NOVRDINTR < PCIEHDEVICE_OVERRIDE_INTRGROUPS +# error "NOVRDINTR < PCIEHDEVICE_OVERRIDE_INTRGROUPS" +#endif + +typedef struct ovrdintr_s { + u_int32_t intrb; /* ovrd intr base */ + u_int32_t intrc; /* ovrd intr count */ +} ovrdintr_t; + +typedef union pciehwdev_u { + struct { + char name[32]; /* device name */ + int port; /* pcie port */ + u_int16_t pf:1; /* is pf */ + u_int16_t vf:1; /* is vf */ + u_int16_t flexvf:1; /* is flexvf */ + u_int16_t totalvfs; /* totalvfs provisioned */ + u_int16_t numvfs; /* current numvfs */ + u_int16_t vfidx; /* if is vf, vf position */ + u_int16_t bdf; /* bdf of this dev */ + u_int8_t type; /* PCIEHDEVICE_* */ + u_int8_t novrdintr; /* number valid in ovrdintr[] */ + u_int32_t lifb; /* lif base for this dev */ + u_int32_t lifc; /* lif count for this dev */ + u_int32_t intrb; /* intr resource base */ + u_int32_t intrc; /* intr resource count */ + u_int32_t intrdmask:1; /* reset val for drvcfg.mask */ + u_int32_t cfgloaded:1; /* cfg pmt entries loaded */ + pciehwdevh_t parenth; /* handle to parent */ + pciehwdevh_t childh; /* handle to child */ + pciehwdevh_t peerh; /* handle to peer */ + u_int8_t intpin; /* legacy int pin */ + u_int8_t romsksel[PCIEHW_ROMSKSZ]; /* cfg read-only mask selectors */ + u_int8_t cfgpmtf[PCIEHW_CFGHNDSZ]; /* cfg pmt flags */ + u_int8_t cfghnd[PCIEHW_CFGHNDSZ]; /* cfg indirect/notify handlers */ + pciehwbar_t bar[PCIEHW_NBAR]; /* bar info */ + pciehwbar_t rombar; /* option rom bar */ + u_int16_t sriovctrl; /* current sriov ctrl reg */ + u_int16_t enabledvfs; /* current numvfs enabled */ + pciehwdevh_t hwdevh; /* handle to this dev */ + u_int32_t pmtb; /* pmt base for cfg */ + u_int32_t pmtc; /* pmt count for cfg */ + ovrdintr_t ovrdintr[NOVRDINTR]; /* override intr resources */ + }; + u_int8_t _pad[4096]; +} pciehwdev_t; + +typedef union pciehw_port_u { + struct { + u_int8_t secbus; /* bridge secondary bus */ + pciemgr_stats_t stats; + }; + u_int8_t _pad[1024]; +} pciehw_port_t; + +typedef union pciehw_sprt_u { + struct { + prt_t prt; /* shadow copy of prt */ + u_int16_t next; /* next link for chained prts */ + }; + u_int8_t _pad[32]; +} pciehw_sprt_t; + +typedef union pciehw_spmt_u { + struct { + u_int64_t baroff; /* bar addr offset */ + u_int64_t swrd; /* reads handled by sw (not/ind) */ + u_int64_t swwr; /* writes handled by sw (not/ind) */ + pciehwdevh_t owner; /* current owner of this entry */ + u_int8_t loaded:1; /* is loaded into hw */ + u_int8_t vf0:1; /* sriov vf0 apply enabledvfs limit */ + u_int8_t vf0stride:5; /* sriov vf0 addr mask stride */ + u_int8_t chain:1; /* chained pmts on next */ + u_int8_t cfgidx; /* cfgidx for bar we belong to */ + pmt_t pmt; /* shadow copy of pmt */ + u_int64_t vf0base:52; /* sriov vf0 resource base address */ + u_int64_t pmtstart:6; /* sriov vf0 addr mask start */ + u_int16_t next; /* next link for chained pmts */ + }; + u_int8_t _pad[128]; +} pciehw_spmt_t; + +typedef struct pciehw_sromsk_s { + u_int32_t entry; + u_int32_t count; +} pciehw_sromsk_t; + +#define PCIEHW_MAGIC 0x706d656d /* 'pmem' */ +#define PCIEHW_VERSION 0x1 + +#define PCIEHW_VPDSZ 1024 +#define PCIEHW_SERIALSZ 1024 + +typedef struct pciehw_shmem_s { + u_int32_t magic; /* PCIEHW_MAGIC when initialized */ + u_int32_t version; /* PCIEHW_VERSION when initialized */ + u_int32_t hwinit:1; /* hw is initialized */ + u_int32_t notify_verbose:1; /* notify logs all */ + u_int32_t skip_notify:1; /* notify skips if ring full */ + u_int32_t pmtpri:1; /* support pmt pri */ + u_int32_t evregistered:1; /* event handler registered flag */ + u_int32_t allocdev; + u_int32_t allocpmt_high; /* high priority pmt free sequential */ + u_int32_t allocprt; /* prt free sequential */ + u_int32_t notify_ring_mask; + pciehwdevh_t rooth[PCIEHW_NPORTS]; + pciehwdev_t dev[PCIEHW_NDEVS]; + pciehw_port_t port[PCIEHW_NPORTS]; + pciehw_sromsk_t sromsk[PCIEHW_NROMSK]; + pciehw_spmt_t spmt[PCIEHW_NPMT]; + pciehw_sprt_t sprt[PCIEHW_NPRT]; + u_int8_t cfgrst[PCIEHW_NDEVS][PCIEHW_CFGSZ]; + u_int8_t cfgmsk[PCIEHW_NDEVS][PCIEHW_CFGSZ]; + u_int8_t vpddata[PCIEHW_NDEVS][PCIEHW_VPDSZ]; + u_int8_t serial[PCIEHW_NPORTS][PCIEHW_SERIALSZ]; + u_int32_t freepmt_high; /* high priority pmt free list */ + u_int32_t allocpmt_low; /* low priority pmt free sequential */ + u_int32_t freepmt_low; /* low priority pmt free list */ + u_int32_t allocpmt_vf0adj; /* low pri vf0 adjust (never freed) */ + u_int32_t freeprt_slab; /* prt free slab adjacent */ +} pciehw_shmem_t; + +#ifdef __cplusplus +} +#endif + +#endif /* __PCIESHMEM_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciesvc.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciesvc.h new file mode 100644 index 0000000000..f4cfcc5714 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciesvc.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021, Pensando Systems Inc. + * Copyright (c) 2022, Advanced Micro Devices, Inc. + */ + +#ifndef __PCIESVC_H__ +#define __PCIESVC_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +#include "pmt.h" +#include "prt.h" +#include "pciehwmem.h" +#include "pcieshmem.h" +#include "pciesvc_event.h" +#include "pciesvc_cmd.h" + +#define PCIESVC_VERSION_MAJ 3 +#define PCIESVC_VERSION_MIN 1 + +typedef struct pciesvc_params_v0_s { + int port; /* port to config */ + uint32_t ind_poll:1; /* indirect trans poll */ + uint32_t ind_intr:1; /* indirect trans intr */ + uint32_t not_poll:1; /* notify trans poll */ + uint32_t not_intr:1; /* notify trans intr */ + uint32_t mac_poll:1; /* mac poll */ + uint32_t mac_intr:1; /* mac intr */ + uint64_t ind_msgaddr; /* ind_intr=1: intr msg addr */ + uint32_t ind_msgdata; /* ind_intr=1: intr msg data */ + uint64_t not_msgaddr; /* not_intr=1: intr msg addr */ + uint32_t not_msgdata; /* not_intr=1: intr msg addr */ +} pciesvc_params_v0_t; + +typedef struct pciesvc_params_s { + int version; + union { + pciesvc_params_v0_t params_v0; + }; +} pciesvc_params_t; + +int pciesvc_init(pciesvc_params_t *params); +void pciesvc_shut(const int port); + +/* + * Return value: + * <0 error + * =0 no work done + * >0 work done + */ +int pciesvc_poll(const int port); + +int pciesvc_indirect_poll_init(const int port); +int pciesvc_indirect_poll(const int port); +int pciesvc_indirect_intr_init(const int port, + u_int64_t msgaddr, u_int32_t msgdata); +int pciesvc_indirect_intr(const int port); + +int pciesvc_notify_poll_init(const int port); +int pciesvc_notify_poll(const int port); +int pciesvc_notify_intr_init(const int port, + u_int64_t msgaddr, u_int32_t msgdata); +int pciesvc_notify_intr(const int port); + +int pciesvc_cmd_read(char *buf, const long int off, const size_t count); +int pciesvc_cmd_write(const char *buf, const long int off, const size_t count); + +extern int pciesvc_version_major; +extern int pciesvc_version_minor; + +void pciesvc_get_version(int *maj, int *min); + +extern pciesvc_logpri_t pciesvc_log_level; + +#ifdef __cplusplus +} +#endif + +#endif /* __PCIESVC_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciesvc_cmd.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciesvc_cmd.h new file mode 100644 index 0000000000..bfde686488 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciesvc_cmd.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Advanced Micro Devices, Inc. + */ + +#ifndef __PCIESVC_CMD_H__ +#define __PCIESVC_CMD_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +typedef enum pciesvc_cmdcode_e { + PCIESVC_CMD_NOP = 0, + PCIESVC_CMD_SET_LOG_LEVEL = 1, +} pciesvc_cmdcode_t; + +typedef enum pciesvc_cmdstatus_e { + PCIESVC_CMDSTATUS_SUCCESS = 0, + PCIESVC_CMDSTATUS_UNKNOWN_CMD = 1, +} pciesvc_cmdstatus_t; + +typedef struct pciesvc_cmd_nop_s { + uint32_t cmd; +} pciesvc_cmd_nop_t; + +typedef struct pciesvc_cmdres_nop_s { + uint32_t status; +} pciesvc_cmdres_nop_t; + +typedef struct pciesvc_cmd_set_log_level_s { + uint32_t cmd; + uint32_t log_level; +} pciesvc_cmd_set_log_level_t; + +typedef struct pciesvc_cmdres_set_log_level_s { + uint32_t status; + uint32_t old_level; +} pciesvc_cmdres_set_log_level_t; + +typedef union pciesvc_cmd_u { + uint32_t words[16]; + uint8_t cmd; + pciesvc_cmd_nop_t nop; + pciesvc_cmd_set_log_level_t set_log_level; +} pciesvc_cmd_t; + +typedef union pciesvc_cmdres_u { + uint32_t words[16]; + uint8_t status; + pciesvc_cmdres_nop_t nop; + pciesvc_cmdres_set_log_level_t set_log_level; +} pciesvc_cmdres_t; + +#ifdef __cplusplus +} +#endif + +#endif /* __PCIESVC_CMD_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciesvc_event.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciesvc_event.h new file mode 100644 index 0000000000..3e7364d820 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciesvc_event.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020, Pensando Systems Inc. + */ + +#ifndef __PCIESVC_EVENT_H__ +#define __PCIESVC_EVENT_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +typedef enum pciesvc_event_e { + PCIESVC_EV_NONE, + PCIESVC_EV_MEMRD_NOTIFY, + PCIESVC_EV_MEMWR_NOTIFY, + PCIESVC_EV_SRIOV_NUMVFS, + PCIESVC_EV_RESET, + PCIESVC_EV_QFULL, + PCIESVC_EV_MGMTCHG, + PCIESVC_EV_LOGMSG, +} pciesvc_event_t; + +typedef struct pciesvc_memrw_notify_s { + u_int64_t baraddr; /* PCIe bar address */ + u_int64_t baroffset; /* bar-local offset */ + u_int8_t cfgidx; /* bar cfgidx */ + u_int32_t size; /* i/o size */ + u_int64_t localpa; /* local physical address */ + u_int64_t data; /* data, if write */ +} pciesvc_memrw_notify_t; + +typedef struct pciesvc_sriov_numvfs_s { + u_int16_t numvfs; /* number of vfs enabled */ +} pciesvc_sriov_numvfs_t; + +typedef enum pciesvc_rsttype_e { + PCIESVC_RSTTYPE_NONE, + PCIESVC_RSTTYPE_BUS, /* bus reset */ + PCIESVC_RSTTYPE_FLR, /* function level reset */ + PCIESVC_RSTTYPE_VF, /* vf reset from sriov ctrl vfe */ +} pciesvc_rsttype_t; + +typedef struct pciesvc_reset_s { + pciesvc_rsttype_t rsttype; /* RSTTYPE_* */ + u_int32_t lifb; /* lif base */ + u_int32_t lifc; /* lif count */ +} pciesvc_reset_t; + +typedef enum pciesvc_logpri_e { + PCIESVC_LOGPRI_DEBUG, + PCIESVC_LOGPRI_INFO, + PCIESVC_LOGPRI_WARN, + PCIESVC_LOGPRI_ERROR, +} pciesvc_logpri_t; + +typedef struct pciesvc_logmsg_s { + pciesvc_logpri_t pri; /* log priority LOGPRI_ */ + char msg[80]; /* log string, NULL-terminated */ +} pciesvc_logmsg_t; + +typedef struct pciesvc_eventdata_s { + pciesvc_event_t evtype; /* PCIESVC_EV_* */ + u_int8_t port; /* PCIe port */ + u_int32_t lif; /* lif if event for lifs */ + union { + pciesvc_memrw_notify_t memrw_notify; /* EV_MEMRD/WR_NOTIFY */ + pciesvc_sriov_numvfs_t sriov_numvfs; /* EV_SRIOV_NUMVFS */ + pciesvc_reset_t reset; /* EV_RESET */ + pciesvc_logmsg_t logmsg; /* EV_LOGMSG */ + }; +} pciesvc_eventdata_t; + +#ifdef __cplusplus +} +#endif + +#endif /* __PCIESVC_EVENT_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciesvc_local.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciesvc_local.h new file mode 100644 index 0000000000..c25e12fce5 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pciesvc_local.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021-2022, Pensando Systems Inc. + */ + +#ifndef __PCIESVC_LOCAL_H__ +#define __PCIESVC_LOCAL_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +union pciehwdev_u; typedef union pciehwdev_u pciehwdev_t; +union pciehwbar_u; typedef union pciehwbar_u pciehwbar_t; +typedef u_int32_t pciehwdevh_t; + +u_int64_t pciehw_bar_getsize(pciehwbar_t *phwbar); +void pciehw_bar_setaddr(pciehwbar_t *phwbar, const u_int64_t addr); +void pciehw_bar_load_ovrds(pciehwbar_t *phwbar); +void pciehw_bar_unload_ovrds(pciehwbar_t *phwbar); +void pciehw_bar_load(pciehwdev_t *phwdev, pciehwbar_t *phwbar); +void pciehw_cfg_load(pciehwdev_t *phwdev); +void pciehw_pmt_setaddr(pciehwbar_t *phwbar, const u_int64_t addr); +void pciehw_reset_bus(pciehwdev_t *phwdev, const u_int8_t bus); +uint32_t pciehw_vpd_read(pciehwdevh_t hwdevh, const uint16_t addr); +void pciehw_vpd_write(pciehwdevh_t hwdevh, + const uint16_t addr, const uint32_t data); + +u_int16_t pciehwdev_get_hostbdf(const pciehwdev_t *phwdev); + +void pciehw_sriov_ctrl(pciehwdev_t *phwdev, + const u_int16_t ctrl, const u_int16_t numvfs); + +struct pmt_s; typedef struct pmt_s pmt_t; +int pmt_reserve_vf0adj(const int n); +int pmt_alloc(const int n, const int pri); +void pmt_free(const int pmtb, const int pmtc); +void pmt_get(const int pmti, pmt_t *pmt); +void pmt_set(const int pmti, const pmt_t *pmt); +void pmt_bar_set_bdf(pmt_t *pmt, const u_int16_t bdf); +u_int64_t pmt_bar_getaddr(const pmt_t *pmt); +void pmt_bar_setaddr(pmt_t *pmt, const u_int64_t addr); + +union pmt_entry_u; typedef union pmt_entry_u pmt_entry_t; +struct pmt_datamask_s; typedef struct pmt_datamask_s pmt_datamask_t; +void pmt_entry_enc(pmt_entry_t *pmte, const pmt_datamask_t *dm); +void pmt_entry_dec(const pmt_entry_t *pmte, pmt_datamask_t *dm); + +union prt_u; typedef union prt_u prt_t; +int prt_alloc(const int n); +void prt_free(const int prtbase, const int prtcount); +void prt_get(const int prti, prt_t *prt); +void prt_set(const int prti, const prt_t *prt); + +#ifdef __cplusplus +} +#endif + +#endif /* __PCIESVC_LOCAL_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pcietlp.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pcietlp.h new file mode 100644 index 0000000000..34bac8138f --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pcietlp.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019, Pensando Systems Inc. + */ + +#ifndef __PCIETLP_H__ +#define __PCIETLP_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +/* + * PCIe Transaction Layer Protocol, based on + * PCI Express Base Specification + * Revision 4.0 Version 1.0 + * September 27, 2017 + */ + +typedef enum pcie_stlp_type_e { + PCIE_STLP_MALFORMED, /* malformed tlp */ + PCIE_STLP_CFGRD, /* cfg (type 0) read */ + PCIE_STLP_CFGWR, /* cfg (type 0) write */ + PCIE_STLP_CFGRD1, /* cfg (type 1) read */ + PCIE_STLP_CFGWR1, /* cfg (type 1) write */ + PCIE_STLP_MEMRD, /* memory read */ + PCIE_STLP_MEMWR, /* memory write */ + PCIE_STLP_MEMRD64, /* memory read - 64-bit addr */ + PCIE_STLP_MEMWR64, /* memory write - 64-bit addr */ + PCIE_STLP_IORD, /* I/O space read */ + PCIE_STLP_IOWR, /* I/O space write */ + PCIE_STLP_MSG, /* message */ + PCIE_STLP_MSGD, /* message with data */ +} pcie_stlp_type_t; + +typedef struct pcie_stlp_s { + u_int8_t type; /* tlp type PCIE_STLP_* */ + u_int16_t reqid; /* requester id */ + u_int16_t tag; /* tag of request */ + u_int16_t bdf; /* bus,dev,fun of request */ + u_int16_t size; /* size of request */ + u_int64_t addr; /* address */ + u_int64_t data; /* payload data */ +} pcie_stlp_t; + +int pcietlp_decode(pcie_stlp_t *stlp, const void *rtlp, const size_t rtlpsz); +int pcietlp_encode(const pcie_stlp_t *stlp, void *rtlp, const size_t rtlpsz); +char *pcietlp_get_error(void); +char *pcietlp_buf(const pcie_stlp_t *stlp, void *buf, const size_t bufsz); +char *pcietlp_str(const pcie_stlp_t *stlp); + +/* + * PCIe Base Spec, Table 2-2 + */ +#define PCIE_TLP_FMT_3DW 0x0 /* 3 DW header, no data (read) */ +#define PCIE_TLP_FMT_4DW 0x1 /* 4 DW header, no data (read) */ +#define PCIE_TLP_FMT_3DWD 0x2 /* 3 DW header, with data (write) */ +#define PCIE_TLP_FMT_4DWD 0x3 /* 4 DW header, with data (write) */ +#define PCIE_TLP_FMT_PREF 0x4 /* TLP prefix */ + +#define mk_tlp_type(fmt, type) (PCIE_TLP_FMT_##fmt << 5 | ((type) & 0x1f)) + +/* + * PCIe Base Spec, Table 2-3 + */ +typedef enum pcie_tlp_type_e { + PCIE_TLP_TYPE_MEMRD = mk_tlp_type(3DW, 0x0), + PCIE_TLP_TYPE_MEMRD64 = mk_tlp_type(4DW, 0x0), + PCIE_TLP_TYPE_MEMWR = mk_tlp_type(3DWD, 0x0), + PCIE_TLP_TYPE_MEMWR64 = mk_tlp_type(4DWD, 0x0), + PCIE_TLP_TYPE_IORD = mk_tlp_type(3DW, 0x2), + PCIE_TLP_TYPE_IOWR = mk_tlp_type(3DWD, 0x2), + PCIE_TLP_TYPE_CFGRD0 = mk_tlp_type(3DW, 0x4), + PCIE_TLP_TYPE_CFGWR0 = mk_tlp_type(3DWD, 0x4), + PCIE_TLP_TYPE_CFGRD1 = mk_tlp_type(3DW, 0x5), + PCIE_TLP_TYPE_CFGWR1 = mk_tlp_type(3DWD, 0x5), +} pcie_tlp_type_t; + +typedef struct pcie_tlp_common_hdr_s { + /* dword 0 */ + u_int32_t type:8; /* transaction type */ + + u_int32_t th:1; /* tlp hint */ + u_int32_t ln:1; /* lightweight notification */ + u_int32_t attr_hi:1; /* attributes[2] */ + u_int32_t t8:1; /* tag[8] */ + u_int32_t tc:3; /* traffic class */ + u_int32_t t9:1; /* tag[9] */ + + u_int32_t len_hi:2; /* length[8:9] (dw) */ + u_int32_t at:2; /* at[0:1] */ + u_int32_t attr_lo:2; /* attributes[0:1] */ + u_int32_t ep:1; /* error poisoned */ + u_int32_t td:1; /* tlp digest */ + + u_int32_t len_lo:8; /* length[0:7] (dw) */ + + /* dword 1 */ + u_int32_t reqid:16; /* requester id */ + + u_int32_t tag:8; /* transaction tag */ + + u_int32_t fbe:4; /* first dw byte enable */ + u_int32_t lbe:4; /* last dw byte enable */ +} __attribute__((packed)) pcie_tlp_common_hdr_t; + +typedef struct pcie_tlp_cfg_s { + /* dword 0 */ + u_int32_t type:8; /* transaction type */ + + u_int32_t th:1; /* tlp hint */ + u_int32_t ln:1; /* lightweight notification */ + u_int32_t attr_hi:1; /* attributes[2] */ + u_int32_t t8:1; /* tag[8] */ + u_int32_t tc:3; /* traffic class */ + u_int32_t t9:1; /* tag[9] */ + + u_int32_t len_hi:2; /* length[8:9] (dw) */ + u_int32_t at:2; /* at[0:1] */ + u_int32_t attr_lo:2; /* attributes[0:1] */ + u_int32_t ep:1; /* error poisoned */ + u_int32_t td:1; /* tlp digest */ + + u_int32_t len_lo:8; /* length[0:7] (dw) */ + + /* dword 1 */ + u_int32_t reqid:16; /* requester id */ + + u_int32_t tag:8; /* transaction tag */ + + u_int32_t fbe:4; /* first dw byte enable */ + u_int32_t lbe:4; /* last dw byte enable */ + + /* dword 2 */ + u_int32_t bdf:16; /* bus,dev,fun target */ + + u_int32_t extreg:4; /* extended register number */ + u_int32_t rsrv:4; /* reserved */ + + u_int32_t reg:8; /* register number */ +} __attribute__((packed)) pcie_tlp_cfg_t; + +typedef struct pcie_tlp_mem32_s { + /* dword 0 */ + u_int32_t type:8; /* transaction type */ + + u_int32_t th:1; /* tlp hint */ + u_int32_t ln:1; /* lightweight notification */ + u_int32_t attr_hi:1; /* attributes[2] */ + u_int32_t t8:1; /* tag[8] */ + u_int32_t tc:3; /* traffic class */ + u_int32_t t9:1; /* tag[9] */ + + u_int32_t len_hi:2; /* length[8:9] (dw) */ + u_int32_t at:2; /* at[0:1] */ + u_int32_t attr_lo:2; /* attributes[0:1] */ + u_int32_t ep:1; /* error poisoned */ + u_int32_t td:1; /* tlp digest */ + + u_int32_t len_lo:8; /* length[0:7] (dw) */ + + /* dword 1 */ + u_int32_t reqid:16; /* requester id */ + + u_int32_t tag:8; /* transaction tag */ + + u_int32_t fbe:4; /* first dw byte enable */ + u_int32_t lbe:4; /* last dw byte enable */ + + /* dword 2 */ + u_int32_t addr; /* address[31:2] */ +} __attribute__((packed)) pcie_tlp_mem32_t; + +/* I/O similar to mem32 */ +typedef pcie_tlp_mem32_t pcie_tlp_io_t; + +typedef struct pcie_tlp_mem64_s { + /* dword 0 */ + u_int32_t type:8; /* transaction type */ + + u_int32_t th:1; /* tlp hint */ + u_int32_t ln:1; /* lightweight notification */ + u_int32_t attr_hi:1; /* attributes[2] */ + u_int32_t t8:1; /* tag[8] */ + u_int32_t tc:3; /* traffic class */ + u_int32_t t9:1; /* tag[9] */ + + u_int32_t len_hi:2; /* length[8:9] (dw) */ + u_int32_t at:2; /* at[0:1] */ + u_int32_t attr_lo:2; /* attributes[0:1] */ + u_int32_t ep:1; /* error poisoned */ + u_int32_t td:1; /* tlp digest */ + + u_int32_t len_lo:8; /* length[0:7] (dw) */ + + /* dword 1 */ + u_int32_t reqid:16; /* requester id */ + + u_int32_t tag:8; /* transaction tag */ + + u_int32_t fbe:4; /* first dw byte enable */ + u_int32_t lbe:4; /* last dw byte enable */ + + /* dword 2 */ + u_int32_t addr_hi; /* address[63:32] */ + + /* dword 3 */ + u_int32_t addr_lo; /* address[31:2] */ +} __attribute__((packed)) pcie_tlp_mem64_t; + +#ifdef __cplusplus +} +#endif + +#endif /* __PCIETLP_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pmt.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pmt.h new file mode 100644 index 0000000000..79cff5a770 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/pmt.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2018, Pensando Systems Inc. + */ + +#ifndef __PCIESVC_PMT_H__ +#define __PCIESVC_PMT_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +/****************************************************************** + * PCIe Match Table (PMT) + * + * PMT entry is the tcam entry used to match the incoming PCIe TLP. + * The corresponding PMR entry provides auxiliary information used + * in processing the transaction after the PMT match determines the + * entry that should be used for processing the TLP. + */ + +#define PMT_COUNT 1024 +#define PMT_NWORDS 5 +#define PMR_NWORDS 4 + +/* terminator index for chained pmts */ +#define PMT_INVALID ((u_int16_t)-1) + +/* + * pmt_alloc priority. + * Lower pmtpri corresponds to lower index in tcam so higher priority. + */ +typedef enum pmtpri_e { + PMTPRI_HIGH, /* high priority in tcam */ + PMTPRI_LOW, /* low priority in tcam */ + PMTPRI_VF0ADJ, /* vf0 adjust entry */ + + PMTPRI_CFG = PMTPRI_HIGH, /* cfg space pmt entry */ + PMTPRI_BAR = PMTPRI_HIGH, /* bar pmt entry */ + PMTPRI_FLEXVF = PMTPRI_LOW, /* flexvf bar pmt default entry */ + PMTPRI_FLEXVFOVRD = PMTPRI_HIGH, /* flexvf bar pmt override entry */ +} pmtpri_t; + +/* defines for PMT.type and PMR.type fields */ +#define PMT_TYPE_CFG 0 /* host cfg */ +#define PMT_TYPE_MEM 1 /* host mem bar */ +#define PMT_TYPE_RC 2 /* rc dma */ +#define PMT_TYPE_IO 5 /* host I/O bar */ + +/* all PMTs start with these common fields */ +#define PMT_CMN_FIELDS \ + u_int64_t valid :1; /* entry is valid */ \ + u_int64_t tblid :2; /* table id */ \ + u_int64_t type :3; /* PMT_TYPE_* */ \ + u_int64_t port :3; /* incoming pcie port */ \ + u_int64_t rw :1 /* 0=read, 1=write */ + +/* common pmt entry format */ +typedef struct { + PMT_CMN_FIELDS; +} __attribute__((packed)) pmt_cmn_format_t; + +/* cfg pmt entry format */ +typedef struct { + PMT_CMN_FIELDS; + u_int64_t bdf :16; /* bdf of tlp */ + u_int64_t addrdw :10; /* config space dw address */ + u_int64_t rsrv :28; +} __attribute__((packed)) pmt_cfg_format_t; + +/* bar pmt entry format */ +typedef struct { + PMT_CMN_FIELDS; + u_int64_t addrdw :50; /* tlp address */ + u_int64_t rsrv :4; +} __attribute__((packed)) pmt_bar_format_t; + +/****************************************************************** + * PMR entry is the RAM extension of the corresponding PMT entry + * containing auxiliary information used by hw after the PMT tcam + * match is determined. + */ + +/* cfg pmr entry format */ +typedef struct { + u_int64_t valid :1; /* entry is valid */ + u_int64_t type :3; /* matches PMT.type */ + u_int64_t vfbase :11; /* vf base for vf id range for entry */ + u_int64_t indirect :1; /* sw handles tlp */ + u_int64_t notify :1; /* notify sw */ + u_int64_t pstart :3; /* port wildcard base */ + u_int64_t bstart :8; /* bus wildcard base */ + u_int64_t dstart :5; /* device wildcard base */ + u_int64_t fstart :3; /* function wildcard base */ + u_int64_t plimit :3; /* port wildcard limit */ + u_int64_t blimit :8; /* bus wildcard limit */ + u_int64_t dlimit :5; /* device wildcard limit */ + u_int64_t flimit :3; /* function wildcard limit */ + u_int64_t vfstridesel:4; /* p:bdf wildcard vf stride selector */ + u_int64_t td :1; /* tlp digest, generate ecrc on completion */ +#if defined(ASIC_CAPRI) + u_int64_t addrdw :34; /* target resource address */ +#elif defined(ASIC_ELBA) + u_int64_t addrdw :35; /* target resource address */ +#else +#error "ASIC not specified" +#endif + u_int64_t aspace :1; /* target address space, 1=external (pcie) */ + u_int64_t romsksel :7; /* read-only mask selector */ +#if defined(ASIC_CAPRI) + u_int64_t spare :8; /* implemented but unused in hw */ +#elif defined(ASIC_ELBA) + u_int64_t spare :7; /* implemented but unused in hw */ +#else +#error "ASIC not specified" +#endif + u_int64_t rsrv :18; /* unimplemented bits */ +} __attribute__((packed)) pmr_cfg_entry_t; + +/* bar pmr entry format */ +typedef struct { + u_int64_t valid :1; /* entry is valid */ + u_int64_t type :3; /* matches PMT.type */ + u_int64_t vfbase :11; /* vf base for vf ids valid for entry */ + u_int64_t indirect :1; /* sw handles tlp */ + u_int64_t notify :1; /* notify sw */ + u_int64_t prtb :12; /* base of contiguous prt entries */ + u_int64_t prtc :12; /* count of contiguous prt entries */ + u_int64_t prtsize :5; /* power-of-2 resource size, eg. 4=16 bytes */ + u_int64_t vfstart :6; /* low bit pos of vf field in addr */ + u_int64_t vfend :6; /* high bit pos of vf field in addr */ + u_int64_t vflimit :11; /* vf field upper limit */ + u_int64_t bdf :16; /* bdf for completions */ + u_int64_t td :1; /* tlp digest, generate ecrc on completion */ + u_int64_t pagesize :3; /* encoded page size, PID bit pos start */ + u_int64_t qtypestart:5; /* low bit pos of 3-bit qtype */ + u_int64_t qtypemask :3; /* qtype mask on 3 bits at qtypestart */ + u_int64_t qidstart :5; /* 32b db: low bit pos of qid field in addr */ + u_int64_t qidend :5; /* 32b db: high bit pos of qid field in addr */ +#if defined(ASIC_CAPRI) + u_int64_t spare :3; /* implemented but unused in hw */ +#elif defined(ASIC_ELBA) + u_int64_t hstridesel:3; /* host stride select */ +#else +#error "ASIC not specified" +#endif + u_int64_t rsrv :18; /* unimplemented bits */ +} __attribute__((packed)) pmr_bar_entry_t; + +/* all pmt formats */ +typedef union { + pmt_cmn_format_t cmn; + pmt_cfg_format_t cfg; + pmt_bar_format_t bar; + u_int64_t all; +} pmt_format_t; + +/* data and mask format used to describe pmt_tcam_t format */ +typedef struct pmt_datamask_s { + pmt_format_t data; + pmt_format_t mask; +} pmt_datamask_t; + +/* tcam entry format */ +typedef struct { + u_int64_t x; /* tcam x */ + u_int64_t y; /* tcam y */ + u_int32_t v; /* 1=entry valid */ +} __attribute__((packed)) pmt_tcam_t; + +/* tcam entry as words for reading/writing to hw */ +typedef union pmt_entry_u { + pmt_tcam_t tcam; + u_int32_t w[PMT_NWORDS]; +} pmt_entry_t; + +/* PMR entry format */ +typedef union { + pmr_cfg_entry_t cfg; + pmr_bar_entry_t bar; + u_int32_t w[PMR_NWORDS]; +} pmr_entry_t; + +/* full PMT/PMR entry */ +typedef struct pmt_s { + pmt_entry_t pmte; + pmr_entry_t pmre; +} pmt_t; + +#ifdef __cplusplus +} +#endif + +#endif /* __PCIESVC_PMT_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/prt.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/prt.h new file mode 100644 index 0000000000..40b1b1ce08 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/prt.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2018,2021, Pensando Systems Inc. + */ + +#ifndef __PCIESVC_PRT_H__ +#define __PCIESVC_PRT_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +/****************************************************************** + * PCIe Resource Table (PRT) + * + * PRT entry is the table entry by PMT to describe + * bar match table resources and handling. + */ + +#define PRT_COUNT 4096 +#define PRT_NWORDS 3 + +/* terminator index for chained prts */ +#define PRT_INVALID ((u_int16_t)-1) + +/* for PRT.type */ +#define PRT_TYPE_RES 0 /* resource */ +#define PRT_TYPE_DB64 1 /* 64-bit doorbells */ +#define PRT_TYPE_DB32 2 /* 32-bit doorbells */ +#define PRT_TYPE_DB16 3 /* 16-bit doorbells */ + +/* for PRT.res.wqebpsize */ +#define PRT_WQEBP_SZ64 0 /* 64B aligned WQEs */ +#define PRT_WQEBP_SZ128 1 /* 128B aligned WQEs */ +#define PRT_WQEBP_SZ256 2 /* 256B aligned WQEs */ +#define PRT_WQEBP_SZ512 3 /* 512B aligned WQEs */ + +/* for PRT.db.updvec */ +#define PRT_UPD_SCHED_NONE 0x00 /* no scheduler request */ +#define PRT_UPD_SCHED_EVAL 0x01 /* scheduler eval pi/ci */ +#define PRT_UPD_SCHED_CLEAR 0x02 /* scheduler clear */ +#define PRT_UPD_SCHED_SET 0x03 /* scheduler set */ +#define PRT_UPD_SCHED_MASK 0x03 /* sched bit mask */ +#define PRT_UPD_PICI_CISET 0x04 /* set ci */ +#define PRT_UPD_PICI_PISET 0x08 /* set pi */ +#define PRT_UPD_PICI_PIINC 0x0c /* increment pi */ +#define PRT_UPD_PICI_MASK 0x0c /* pici bit mask */ +#define PRT_UPD_PID_CHECK 0x10 /* check pid */ + +#define PRT_CMN_FIELDS \ + u_int64_t valid :1; /* entry is valid */ \ + u_int64_t type :2; /* PRT_TYPE_* */ \ + u_int64_t indirect :1; /* sw handles tlp */ \ + u_int64_t notify :1; /* notify sw */ \ + u_int64_t vfstride :5 /* power-of-2 stride added to addr */ + +/* common prt entry format */ +typedef struct { + PRT_CMN_FIELDS; +} __attribute__((packed)) prt_cmn_t; + +/* resource prt entry format */ +typedef struct { + PRT_CMN_FIELDS; + u_int64_t aspace :1; /* target address space, 1=external (pcie) */ + u_int64_t addrdw :50; /* target resource address */ + u_int64_t sizedw :11; /* encoded resource size */ + u_int64_t pmvdis :1; /* disable Programming Model Violation check */ +#if defined(ASIC_CAPRI) + u_int64_t spare :3; /* implemented but unused in hw */ + u_int64_t rsrv :52; /* unimplemented bits */ +#elif defined(ASIC_ELBA) + u_int64_t wqebpen :1; /* WQE bypass enable */ + u_int64_t wqebpsize :2; /* WQE bypass entry size encoded */ + u_int64_t wqebpdben :1; /* WQE bypass doorbell enable */ + u_int64_t spare :8; /* implemented but unused in hw */ + u_int64_t rsrv :43; /* unimplemented bits */ +#else +#error "ASIC not specified" +#endif +} __attribute__((packed)) prt_res_t; + +/* db64/db32/db16 prt entry format */ +typedef struct { + PRT_CMN_FIELDS; + u_int64_t lif :11; /* target LIF */ + u_int64_t updvec :40; /* 8x5-bit UPD field, indexed by qtype */ + u_int64_t stridesel :2; /* selects vfstride, 0=VF, 1={VF,LIF} */ + u_int64_t idxshift :2; /* db16/32: index location in data */ + u_int64_t idxwidth :4; /* db16/32: index width in data */ + u_int64_t qidshift :2; /* db16/32: qid location in data */ + u_int64_t qidwidth :4; /* db16/32: qid width in data */ + u_int64_t qidsel :1; /* db16/32: qid source select, 0=data 1=addr */ +#if defined(ASIC_CAPRI) + u_int64_t rsrv :52; /* unimplemented bits */ +#elif defined(ASIC_ELBA) + u_int64_t dbbussel :1; /* doorbell bus select, 0=prp, 1=express */ + u_int64_t spare :8; /* implemented but unused in hw */ + u_int64_t rsrv :43; /* unimplemented bits */ +#else +#error "ASIC not specified" +#endif +} __attribute__((packed)) prt_db_t; + +/* PRT entry */ +typedef union prt_u { + prt_cmn_t cmn; + prt_res_t res; + prt_db_t db; + u_int32_t w[PRT_NWORDS]; +} prt_t; + +static inline int +prt_is_valid(const prt_t *prt) +{ + return prt->cmn.valid; +} + +static inline u_int32_t +prt_type(const prt_t *prt) +{ + return prt->cmn.type; +} + +#ifdef __cplusplus +} +#endif + +#endif /* __PCIESVC_PRT_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/serial_state.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/serial_state.h new file mode 100644 index 0000000000..604cb14381 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/serial_state.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Pensando Systems Inc. + */ + +#ifndef __SERIAL_STATE_H__ +#define __SERIAL_STATE_H__ + +#include "uart.h" + +#define MEMQ_BUFSZ 64 + +typedef struct memq { + unsigned int pidx __attribute__((aligned(64))); + unsigned int cidx __attribute__((aligned(64))); + char buf[MEMQ_BUFSZ] __attribute__((aligned(64))); +} memq_t; + +typedef struct serial_state { + u_int32_t intrb; /* intr resource base */ + u_int32_t intrc; /* intr resource count */ + u_int32_t gen; /* generation number */ + u_int32_t gen_ack; /* generation number ack */ + u_int32_t breakreq; /* break request */ + u_int32_t _unused[11]; + memq_t txq; /* txq from device thr */ + memq_t rxq; /* rxq to device rbr */ +} serial_state_t; + +typedef struct serial_uart_state { + union { + serial_state_t serial_state; + u_int8_t _pad1[960]; + }; + union { + uart_state_t uart_state; + u_int8_t _pad2[64]; + }; +} serial_uart_state_t; + +#endif /* __SERIAL_STATE_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/tlpauxinfo.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/tlpauxinfo.h new file mode 100644 index 0000000000..9d275274c7 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/tlpauxinfo.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018,2020, Pensando Systems Inc. + */ + +#ifndef __TLPAUXINFO_H__ +#define __TLPAUXINFO_H__ + +/* + * For indirect or notify transactions, the hardware delivers + * this auxiliary information along with the pcie tlp. + */ +typedef struct tlpauxinfo_s { +#if defined(ASIC_CAPRI) + uint64_t direct_endaddr :6; +#elif defined(ASIC_ELBA) + uint64_t spare :3; + uint64_t wqebpdbxen :1; + uint64_t wqebpsize :2; +#else +#error "ASIC not specified" +#endif + uint64_t direct_blen :4; + uint64_t is_indirect :1; + uint64_t is_direct :1; + uint64_t is_ur :1; + uint64_t is_ca :1; + uint64_t romsksel :7; + uint64_t context_id :7; + uint64_t vfid :11; + uint64_t is_notify :1; + uint64_t direct_size :9; + uint64_t direct_addr :52; + uint64_t aspace :1; + uint64_t pmti :10; + uint64_t pmt_hit :1; + uint64_t indirect_reason :5; + uint64_t is_host :1; + uint64_t axilen :4; +#if defined(ASIC_CAPRI) + uint64_t rsrv :3; +#elif defined(ASIC_ELBA) + uint64_t rsrv :1; + uint64_t wqetype :1; /* wqe type, 0=wqe, 1=doorbell */ + uint64_t wqebpdben :1; /* wqe bypass doorbell enable */ +#else +#error "ASIC not specified" +#endif + uint64_t eop :1; + uint64_t sop :1; +} __attribute__((packed)) tlpauxinfo_t; + +#endif /* __TLPAUXINFO_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/uart.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/uart.h new file mode 100644 index 0000000000..2718a6cf18 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/uart.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Pensando Systems Inc. + */ + +#ifndef __UART_H__ +#define __UART_H__ + +/* + * UART definitions + */ +typedef struct uart_state { + uint8_t rbr; /* reg0: receive register (read) */ + uint8_t thr; /* reg0: transmit holding register (write) */ + uint8_t ier; /* reg1: interrupt enable register */ + uint8_t iir; /* reg2: interrupt id register (read) */ + uint8_t fcr; /* reg2: fifo control register (write) */ + uint8_t lcr; /* reg3: line control register */ + uint8_t mcr; /* reg4: modem control register */ + uint8_t lsr; /* reg5: line status register */ + uint8_t msr; /* reg6: modem status register */ + uint8_t scr; /* reg7: scratch register */ + int thr_ipending; + uint16_t divider; + int parity; + int data_bits; + int stop_bits; + uint8_t recv_fifo_itl; /* interrupt trigger level */ + uint8_t mcr_read; + uint8_t mcr_write; + int flags; +} uart_state_t; + + +enum uart_regs { + UART_RX_BUF = 0, + UART_TX_BUF = 0, + UART_INTERRUPT_ENABLE = 1, + UART_INTERRUPT_ID = 2, + UART_FIFO_CONTROL = 2, + UART_LINE_CONTROL = 3, + UART_MODEM_CONTROL = 4, + UART_LINE_STATUS = 5, + UART_MODEM_STATUS = 6, + UART_SCRATCH = 7, +}; + +#define UART_LCR_BRK 0x40 /* Set Break Enable */ +#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */ + +/* + * Interrupt Enable Register (offset 0x1) + */ +#define UART_IER_MSI 0x08 /* Enable Modem status interrupt */ +#define UART_IER_RLSI 0x04 /* Enable receiver line status interrupt */ +#define UART_IER_THRI 0x02 /* Enable Transmitter holding register int. */ +#define UART_IER_RDI 0x01 /* Enable receiver data interrupt */ + +/* + * Interrupt Identification Register (offset 0x2) - read + */ +#define UART_IIR_NO_INT 0x01 /* No interrupts pending */ +#define UART_IIR_ID 0x06 /* Mask for the interrupt ID */ + +#define UART_IIR_MSI 0x00 /* Modem status interrupt */ +#define UART_IIR_THRI 0x02 /* Transmitter holding register empty */ +#define UART_IIR_RDI 0x04 /* Receiver data interrupt */ +#define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */ +#define UART_IIR_CTI 0x0C /* Character Timeout Indication */ + +#define UART_IIR_FENF 0x80 /* Fifo enabled, but not functionning */ +#define UART_IIR_FE 0xC0 /* Fifo enabled */ + +/* + * FIFO Control Register (offset 0x2) - write + */ +#define UART_FCR_ITL_1 0x00 /* 1 byte ITL */ +#define UART_FCR_ITL_2 0x40 /* 4 bytes ITL */ +#define UART_FCR_ITL_3 0x80 /* 8 bytes ITL */ +#define UART_FCR_ITL_4 0xC0 /* 14 bytes ITL */ +#define UART_FCR_DMS 0x08 /* DMA Mode Select */ +#define UART_FCR_XFR 0x04 /* XMIT Fifo Reset */ +#define UART_FCR_RFR 0x02 /* RCVR Fifo Reset */ +#define UART_FCR_FE 0x01 /* FIFO Enable */ + +/* + * Modem Control Register (offset 0x4) + */ +#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */ +#define UART_MCR_OUT2 0x08 /* Out2 complement */ +#define UART_MCR_OUT1 0x04 /* Out1 complement */ +#define UART_MCR_RTS 0x02 /* RTS complement */ +#define UART_MCR_DTR 0x01 /* DTR complement */ + +/* + * Line Status Register (offset 0x5) + */ +#define UART_LSR_TEMT 0x40 /* Transmitter empty */ +#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */ +#define UART_LSR_BI 0x10 /* Break interrupt indicator */ +#define UART_LSR_FE 0x08 /* Frame error indicator */ +#define UART_LSR_PE 0x04 /* Parity error indicator */ +#define UART_LSR_OE 0x02 /* Overrun error indicator */ +#define UART_LSR_DR 0x01 /* Receiver data ready */ +#define UART_LSR_INT_ANY 0x1E /* Any of the lsr-interrupt-triggering bits */ + +/* + * Modem Status Register (offset 0x6) + */ +#define UART_MSR_DCD 0x80 /* Data Carrier Detect */ +#define UART_MSR_RI 0x40 /* Ring Indicator */ +#define UART_MSR_DSR 0x20 /* Data Set Ready */ +#define UART_MSR_CTS 0x10 /* Clear to Send */ +#define UART_MSR_DDCD 0x08 /* Delta DCD */ +#define UART_MSR_TERI 0x04 /* Trailing edge ring indicator */ +#define UART_MSR_DDSR 0x02 /* Delta DSR */ +#define UART_MSR_DCTS 0x01 /* Delta CTS */ +#define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */ + +#define CHR_TIOCM_CTS 0x020 +#define CHR_TIOCM_CAR 0x040 +#define CHR_TIOCM_DSR 0x100 +#define CHR_TIOCM_RI 0x080 +#define CHR_TIOCM_DTR 0x002 +#define CHR_TIOCM_RTS 0x004 + +#endif /* __UART_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/virtio_spec.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/virtio_spec.h new file mode 100644 index 0000000000..4ff97fbcce --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/include/virtio_spec.h @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Pensando Systems Inc. + */ +#ifndef __VIRTIO_SPEC_H__ +#define __VIRTIO_SPEC_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +#ifndef BIT +#define BIT(nr) (1UL << (nr)) +#endif +#ifndef BIT_ULL +#define BIT_ULL(nr) (1ULL << (nr)) +#endif +#ifndef BIT_MASK +#define BIT_MASK(nr) (1UL << (nr)) +#endif +#ifndef BITS_PER_BYTE +#define BITS_PER_BYTE (8) +#endif + +/* 4.1.4.3 - common configuration structure layout */ + +/* upper bounds, non-inclusive, for indirect access via select registers */ +enum { + VIRTIO_PCI_FEATURE_SELECT_COUNT = 2, + VIRTIO_PCI_QUEUE_SELECT_COUNT = 128, +}; + +/* feature config, for indirect access via select register */ +typedef uint32_t virtio_pci_feature_cfg_t; + +/* virtqueue config, for indirect access via select register */ +typedef struct virtio_pci_queue_cfg { + uint16_t queue_size; + uint16_t queue_msix_vector; + uint16_t queue_enable; + uint16_t queue_notify_off; + uint32_t queue_desc_lo; + uint32_t queue_desc_hi; + uint32_t queue_avail_lo; + uint32_t queue_avail_hi; + uint32_t queue_used_lo; + uint32_t queue_used_hi; +} __attribute__((packed)) virtio_pci_queue_cfg_t; + +/* common config, with dummy fields in place of indirect access */ +typedef struct virtio_pci_common_cfg { + uint32_t device_feature_select; + virtio_pci_feature_cfg_t device_feature; // indirect + uint32_t driver_feature_select; + virtio_pci_feature_cfg_t driver_feature; // indirect + uint16_t config_msix_vector; + uint16_t num_queues; + uint8_t device_status; + uint8_t config_generation; + uint16_t queue_select; + union { + virtio_pci_queue_cfg_t queue_cfg; // indirect + /* indirect features are hidden behind unused queue_cfg */ + struct { + virtio_pci_feature_cfg_t device_feature_cfg[VIRTIO_PCI_FEATURE_SELECT_COUNT]; + virtio_pci_feature_cfg_t driver_feature_cfg[VIRTIO_PCI_FEATURE_SELECT_COUNT]; + /* pciemgr observed device status nonzero -> zero */ + uint8_t need_reset; + }; + }; +} __attribute__((packed)) virtio_pci_common_cfg_t; + +typedef struct virtio_net_config { + /* The config defining mac address (if VIRTIO_NET_F_MAC) */ + uint8_t mac[6]; + + /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */ + uint16_t status; + + /* Maximum number of each of transmit and receive queues; + * see VIRTIO_NET_F_MQ and VIRTIO_NET_CTRL_MQ. + * Legal values are between 1 and 0x8000 + */ + uint16_t max_virtqueue_pairs; + + /* Default maximum transmit unit advice */ + uint16_t mtu; + + /* Speed, in units of 1Mb. All values 0 to INT_MAX are legal. + * Any other value stands for unknown. + */ + uint32_t speed; + + /* 0x00 - half duplex + * 0x01 - full duplex + * Any other value stands for unknown. + */ + uint8_t duplex; + + /* maximum size of RSS key */ + uint8_t rss_max_key_size; + + /* maximum number of indirection table entries */ + uint16_t rss_max_indirection_table_length; + + /* bitmask of supported VIRTIO_NET_RSS_HASH_ types */ + uint32_t supported_hash_types; +} __attribute__((packed)) virtio_net_config_t; + +/* 2.1 - device status field */ +enum { + VIRTIO_S_ACKNOWLEDGE = (1u << 0), + VIRTIO_S_DRIVER = (1u << 1), + VIRTIO_S_DRIVER_OK = (1u << 2), + VIRTIO_S_FEATURES_OK = (1u << 3), + VIRTIO_S_NEEDS_RESET = (1u << 6), + VIRTIO_S_FAILED = (1u << 7), +}; + +/* 5.1.3 - network device - feature bits */ +enum { + VIRTIO_NET_F_CSUM = (1ull << 0), + VIRTIO_NET_F_GUEST_CSUM = (1ull << 1), + VIRTIO_NET_F_CTRL_GUEST_OFFLOADS = (1ull << 2), + VIRTIO_NET_F_MTU = (1ull << 3), + VIRTIO_NET_F_MAC = (1ull << 5), + VIRTIO_NET_F_GSO = (1ull << 6), + VIRTIO_NET_F_GUEST_TSO4 = (1ull << 7), + VIRTIO_NET_F_GUEST_TSO6 = (1ull << 8), + VIRTIO_NET_F_GUEST_ECN = (1ull << 9), + VIRTIO_NET_F_GUEST_UFO = (1ull << 10), + VIRTIO_NET_F_HOST_TSO4 = (1ull << 11), + VIRTIO_NET_F_HOST_TSO6 = (1ull << 12), + VIRTIO_NET_F_HOST_ECN = (1ull << 13), + VIRTIO_NET_F_HOST_UFO = (1ull << 14), + VIRTIO_NET_F_MRG_RXBUF = (1ull << 15), + VIRTIO_NET_F_STATUS = (1ull << 16), + VIRTIO_NET_F_CTRL_VQ = (1ull << 17), + VIRTIO_NET_F_CTRL_RX = (1ull << 18), + VIRTIO_NET_F_CTRL_VLAN = (1ull << 19), + VIRTIO_NET_F_CTRL_RX_EXTRA = (1ull << 20), + VIRTIO_NET_F_GUEST_ANNOUNCE = (1ull << 21), + VIRTIO_NET_F_MQ = (1ull << 22), + VIRTIO_NET_F_CTRL_MAC_ADDR = (1ull << 23), + VIRTIO_NET_F_NOTF_COAL = (1ull << 53), + VIRTIO_NET_F_HASH_REPORT = (1ull << 57), + VIRTIO_NET_F_RSS = (1ull << 60), + VIRTIO_NET_F_RSS_EXT = (1ull << 61), + VIRTIO_NET_F_STANDBY = (1ull << 62), + VIRTIO_NET_F_SPEED_DUPLEX = (1ull << 63), +}; + +/* 6 - reserved feature bits */ +enum { + VIRTIO_F_NOTIFY_ON_EMPTY = (1ull << 24), + VIRTIO_F_ANY_LAYOUT = (1ull << 27), + VIRTIO_F_RING_INDIRECT_DESC = (1ull << 28), + VIRTIO_F_RING_EVENT_IDX = (1ull << 29), + VIRTIO_F_UNUSED = (1ull << 30), + VIRTIO_F_VERSION_1 = (1ull << 32), + VIRTIO_F_ACCESS_PLATFORM = (1ull << 33), + VIRTIO_F_RING_PACKED = (1ull << 34), + VIRTIO_F_IN_ORDER = (1ull << 35), + VIRTIO_F_ORDER_PLATFORM = (1ull << 36), + VIRTIO_F_SR_IOV = (1ull << 37), + VIRTIO_F_NOTIFICATION_DATA = (1ull << 38), +}; + +/* supported/enabled hash types */ +enum { + VIRTIO_NET_RSS_HASH_TYPE_IPv4 = (1u << 0), + VIRTIO_NET_RSS_HASH_TYPE_TCPv4 = (1u << 1), + VIRTIO_NET_RSS_HASH_TYPE_UDPv4 = (1u << 2), + VIRTIO_NET_RSS_HASH_TYPE_IPv6 = (1u << 3), + VIRTIO_NET_RSS_HASH_TYPE_TCPv6 = (1u << 4), + VIRTIO_NET_RSS_HASH_TYPE_UDPv6 = (1u << 5), + VIRTIO_NET_RSS_HASH_TYPE_IP_EX = (1u << 6), + VIRTIO_NET_RSS_HASH_TYPE_TCP_EX = (1u << 7), + VIRTIO_NET_RSS_HASH_TYPE_UDP_EX = (1u << 8), +}; + +struct virtio_pci_notify_reg { + uint8_t inc_pi_dbell[512]; + uint8_t set_pi_dbell[512]; +}; + +#define VIRTIO_NOTIFY_MULTIPLIER 4 + +struct virtio_ident_reg { + uint64_t hw_features; + uint16_t max_vqs; + uint16_t max_qlen; + uint16_t min_qlen; +}; + +struct virtio_dev_regs { + union { + struct virtio_pci_common_cfg cmn_cfg; + uint8_t part0[256]; + }; + union { + struct virtio_net_config net_cfg; + uint8_t dev_cfg[256]; + uint8_t part1[256]; + }; + union { + struct virtio_ident_reg ident; + uint8_t part2[512]; + }; + union { + struct virtio_pci_notify_reg notify_reg; + uint8_t part3[1024]; + }; + union { + uint8_t isr_cfg[2048]; + uint8_t part4[2048]; + }; + /* indirect queue configs */ + struct virtio_pci_queue_cfg queue_cfg[VIRTIO_PCI_QUEUE_SELECT_COUNT]; +} __attribute__((packed)); + +#define VIRTIO_DEV_REG_OFF(fld) offsetof(struct virtio_dev_regs, fld) +#define VIRTIO_DEV_REG_SZ(fld) sizeof(((struct virtio_dev_regs *)0)->fld) +#define VIRTIO_DEV_REG_ADDR(base, fld) ((base) + VIRTIO_DEV_REG_OFF(fld)) + +struct pvirtq_desc { + uint64_t addr; /* Buffer Address. */ + uint32_t len; /* Buffer Length. */ + uint16_t id; /* Buffer ID. */ + uint16_t flags; /* The flags depending on descriptor type. */ +}; + +#ifdef __cplusplus +} +#endif + +#endif /* __VIRTIO_SPEC_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/asic_regs.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/asic_regs.h new file mode 100644 index 0000000000..98f0ae15b0 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/asic_regs.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021, Pensando Systems Inc. + */ + +#ifndef __ASIC_REGS_H__ +#define __ASIC_REGS_H__ + +#ifdef ASIC_CAPRI +#include "asic_regs_capri.h" +#endif +#ifdef ASIC_ELBA +#include "asic_regs_elba.h" +#endif + +#endif /* __ASIC_REGS_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/asic_regs_capri.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/asic_regs_capri.h new file mode 100644 index 0000000000..67b6b058ef --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/asic_regs_capri.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021-2022, Pensando Systems Inc. + */ + +#ifndef __ASIC_REGS_CAPRI_H__ +#define __ASIC_REGS_CAPRI_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +#define ASIC_(REG) CAP_ ##REG +#define PXB_(REG) \ + (CAP_ADDR_BASE_PXB_PXB_OFFSET + CAP_PXB_CSR_ ##REG## _BYTE_ADDRESS) +#define PXC_(REG, pn) \ + (CAP_ADDR_BASE_PP_PP_OFFSET + \ + ((pn) * CAP_PXC_CSR_BYTE_SIZE) + \ + CAP_PP_CSR_PORT_C_ ##REG## _BYTE_ADDRESS) + +/* cap_top_csr_defines.h */ +#define CAP_ADDR_BASE_INTR_INTR_OFFSET 0x6000000 +#define CAP_ADDR_BASE_PP_PP_OFFSET 0x7000000 +#define CAP_ADDR_BASE_PXB_PXB_OFFSET 0x7100000 + +/* cap_pxb_c_hdr.h */ +#define CAP_PXB_CSR_DHS_ITR_PCIHDRT_BYTE_ADDRESS 0x8000 +#define CAP_PXB_CSR_DHS_TGT_NOTIFY_BYTE_ADDRESS 0x9a000 +#define CAP_PXB_CSR_DHS_TGT_PMT_BYTE_ADDRESS 0x18000 +#define CAP_PXB_CSR_DHS_TGT_PMR_BYTE_ADDRESS 0x20000 +#define CAP_PXB_CSR_DHS_TGT_PRT_BYTE_ADDRESS 0x30000 +#define CAP_PXB_CSR_DHS_TGT_AXIMST0_BYTE_ADDRESS 0x62000 +#define CAP_PXB_CSR_DHS_TGT_AXIMST1_BYTE_ADDRESS 0x63000 +#define CAP_PXB_CSR_DHS_TGT_IND_RSP_ENTRY_BYTE_ADDRESS 0x9a020 +#define CAP_PXB_CSR_CFG_TGT_REQ_NOTIFY_INT_BYTE_ADDRESS 0x9a120 +#define CAP_PXB_CSR_CFG_TGT_REQ_NOTIFY_RING_SIZE_BYTE_ADDRESS 0x9a130 +#define CAP_PXB_CSR_CFG_TGT_REQ_INDIRECT_INT_BYTE_ADDRESS 0x9a140 +#define CAP_PXB_CSR_CFG_TGT_NOTIFY_EN_BYTE_ADDRESS 0x9a184 +#define CAP_PXB_CSR_CFG_TGT_PMT_GRST_BYTE_ADDRESS 0x9a204 +#define CAP_PXB_CSR_STA_TGT_IND_INFO_BYTE_ADDRESS 0x9a320 +#define CAP_PXB_CSR_DHS_ITR_PCIHDRT_ENTRIES 0x800 +#define CAP_PXB_CSR_DHS_ITR_PCIHDRT_ENTRY_BYTE_SIZE 0x10 +#define CAP_PXB_CSR_DHS_TGT_PMT_ENTRY_ARRAY_ELEMENT_SIZE 0x1 +#define CAP_PXB_CSR_DHS_TGT_PMR_ENTRY_BYTE_SIZE 0x10 +#define CAP_PXB_CSR_DHS_TGT_PRT_ENTRY_BYTE_SIZE 0x10 + +/* cap_pp_c_hdr.h */ +#define CAP_PXC_CSR_BYTE_SIZE 0x2000 +#define CAP_PP_CSR_PORT_C_DHS_C_MAC_APB_ENTRY_BYTE_ADDRESS 0x10000 + +/* cap_intr_c_hdr.h */ +#define CAP_INTR_CSR_DHS_INTR_ASSERT_ENTRY_ARRAY_COUNT 0x1000 +#define CAP_INTR_CSR_DHS_INTR_MSIXCFG_BYTE_OFFSET 0x10000 +#define CAP_INTR_CSR_DHS_INTR_FWCFG_BYTE_OFFSET 0x20000 +#define CAP_INTR_CSR_DHS_INTR_DRVCFG_BYTE_OFFSET 0x40000 +#define CAP_INTR_CSR_DHS_INTR_ASSERT_BYTE_OFFSET 0x68000 +#define CAP_INTR_CSR_DHS_INTR_STATE_BYTE_OFFSET 0x70000 + +#ifdef __cplusplus +} +#endif + +#endif /* __ASIC_REGS_CAPRI_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/asic_regs_elba.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/asic_regs_elba.h new file mode 100644 index 0000000000..c2e50cfed1 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/asic_regs_elba.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021-2022, Pensando Systems Inc. + */ + +#ifndef __ASIC_REGS_ELBA_H__ +#define __ASIC_REGS_ELBA_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +#define ASIC_(REG) ELB_ ##REG +#define PXB_(REG) \ + (ELB_ADDR_BASE_PXB_PXB_OFFSET + ELB_PXB_CSR_ ##REG## _BYTE_ADDRESS) +#define _PXC_BASE(pn) \ + (ELB_ADDR_BASE_PP_PXC_0_OFFSET + \ + ((pn) * ELB_ADDR_BASE_PP_PXC_0_SIZE)) +#define PXC_(REG, pn) \ + (_PXC_BASE(pn) + ELB_PXC_CSR_ ##REG## _BYTE_ADDRESS) + +/* elb_top_csr_defines.h */ +#define ELB_ADDR_BASE_PXB_PXB_OFFSET 0x20000000 +#define ELB_ADDR_BASE_PP_PXC_0_OFFSET 0x20100000 +#define ELB_ADDR_BASE_PP_PXC_0_SIZE 0x40000 +#define ELB_ADDR_BASE_INTR_INTR_OFFSET 0x61800000 + +/* elb_pxb_c_hdr.h */ +#define ELB_PXB_CSR_DHS_TGT_NOTIFY_BYTE_ADDRESS 0xc4000 +#define ELB_PXB_CSR_DHS_ITR_PCIHDRT_BYTE_ADDRESS 0x8000 +#define ELB_PXB_CSR_DHS_TGT_PMT_BYTE_ADDRESS 0x18000 +#define ELB_PXB_CSR_DHS_TGT_PMR_BYTE_ADDRESS 0x20000 +#define ELB_PXB_CSR_DHS_TGT_PRT_BYTE_ADDRESS 0x30000 +#define ELB_PXB_CSR_DHS_TGT_AXIMST0_BYTE_ADDRESS 0x62000 +#define ELB_PXB_CSR_DHS_TGT_AXIMST1_BYTE_ADDRESS 0x63000 +#define ELB_PXB_CSR_DHS_TGT_IND_RSP_ENTRY_BYTE_ADDRESS 0xc4020 +#define ELB_PXB_CSR_CFG_TGT_REQ_NOTIFY_INT_BYTE_ADDRESS 0xc4160 +#define ELB_PXB_CSR_CFG_TGT_REQ_NOTIFY_RING_SIZE_BYTE_ADDRESS 0xc4170 +#define ELB_PXB_CSR_CFG_TGT_REQ_INDIRECT_INT_BYTE_ADDRESS 0xc4180 +#define ELB_PXB_CSR_CFG_TGT_NOTIFY_EN_BYTE_ADDRESS 0xc41c4 +#define ELB_PXB_CSR_CFG_TGT_PMT_GRST_BYTE_ADDRESS 0xc4244 +#define ELB_PXB_CSR_STA_TGT_IND_INFO_BYTE_ADDRESS 0xc43c0 +#define ELB_PXB_CSR_DHS_ITR_PCIHDRT_ENTRIES 0x800 +#define ELB_PXB_CSR_DHS_ITR_PCIHDRT_ENTRY_BYTE_SIZE 0x10 +#define ELB_PXB_CSR_DHS_TGT_PMT_ENTRY_ARRAY_ELEMENT_SIZE 0x1 +#define ELB_PXB_CSR_DHS_TGT_PMR_ENTRY_BYTE_SIZE 0x10 +#define ELB_PXB_CSR_DHS_TGT_PRT_ENTRY_BYTE_SIZE 0x10 + +/* elb_pxc_c_hdr.h */ +#define ELB_PXC_CSR_DHS_C_MAC_APB_ENTRY_BYTE_ADDRESS 0x1000 + +/* elb_intr_c_hdr.h */ +#define ELB_INTR_CSR_DHS_INTR_ASSERT_ENTRY_ARRAY_COUNT 0x2000 +#define ELB_INTR_CSR_DHS_INTR_MSIXCFG_BYTE_OFFSET 0x20000 +#define ELB_INTR_CSR_DHS_INTR_FWCFG_BYTE_OFFSET 0x40000 +#define ELB_INTR_CSR_DHS_INTR_DRVCFG_BYTE_OFFSET 0x80000 +#define ELB_INTR_CSR_DHS_INTR_ASSERT_BYTE_OFFSET 0xd0000 +#define ELB_INTR_CSR_DHS_INTR_STATE_BYTE_OFFSET 0xe0000 + +#ifdef __cplusplus +} +#endif + +#endif /* __ASIC_REGS_ELBA_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/bar.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/bar.c new file mode 100644 index 0000000000..a21143f91b --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/bar.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2018,2020-2022, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "pcietlp.h" +#include "indirect.h" +#include "notify.h" +#include "serial.h" +#include "virtio.h" +#include "pmt.h" + +static pciehwbar_t * +pciehw_bar_get(pciehwdev_t *phwdev, const int idx) +{ + if (idx < 0 || idx > 7) return NULL; + if (idx == 7) return &phwdev->rombar; + return &phwdev->bar[idx]; +} + +u_int64_t +pciehw_bar_getsize(pciehwbar_t *phwbar) +{ + if (!phwbar->valid) return 0; + return phwbar->size; +} + +void +pciehw_bar_setaddr(pciehwbar_t *phwbar, const u_int64_t addr) +{ + if (phwbar->addr != addr) { + phwbar->addr = addr; + pciehw_pmt_setaddr(phwbar, addr); + } +} + +void +pciehw_bar_load(pciehwdev_t *phwdev, pciehwbar_t *phwbar) +{ + if (!phwbar->loaded) { +#ifdef PCIEMGR_DEBUG + const pciehwdev_t *phwdev = pciehwdev_get(owner); + pciesvc_logdebug("%s: bar %d pmt %d loaded\n", + pciehwdev_get_name(phwdev), + phwbar->cfgidx, phwbar->pmtb); + pciehwdev_put(phwdev, CLEAN); +#endif + phwbar->bdf = pciehwdev_get_hostbdf(phwdev); + pciehw_bar_load_pmts(phwbar); + phwbar->loaded = 1; + } +} + +void +pciehw_bar_unload(pciehwdev_t *phwdev, pciehwbar_t *phwbar) +{ + if (phwbar->loaded) { +#ifdef PCIEMGR_DEBUG + const pciehwdev_t *phwdev = pciehwdev_get(owner); + pciesvc_logdebug("%s: bar %d pmt %d unloaded\n", + pciehwdev_get_name(phwdev), + phwbar->cfgidx, phwbar->pmtb); + pciehwdev_put(phwdev, CLEAN); +#endif + pciehw_bar_unload_pmts(phwbar); + phwbar->loaded = 0; + } +} + +void +pciehw_bar_enable(pciehwdev_t *phwdev, pciehwbar_t *phwbar, const int on) +{ + if (on) { + pciehw_bar_load(phwdev, phwbar); + } else { + pciehw_bar_unload(phwdev, phwbar); + } +} + +static void +pciehw_barrw_notify(const pciesvc_event_t evtype, + const int port, + pciehwdev_t *phwdev, + const pcie_stlp_t *stlp, + const tlpauxinfo_t *info, + const pciehw_spmt_t *spmt) +{ + const pciehwbar_t *phwbar = pciehw_bar_get(phwdev, spmt->cfgidx); + pciesvc_eventdata_t evd; + pciesvc_memrw_notify_t *memrw; + + pciesvc_memset(&evd, 0, sizeof(evd)); + evd.evtype = evtype; + evd.port = port; + evd.lif = phwdev->lifb; + memrw = &evd.memrw_notify; + memrw->baraddr = stlp->addr; + memrw->cfgidx = spmt->cfgidx; + memrw->baroffset = stlp->addr - phwbar->addr; + memrw->size = stlp->size; + memrw->localpa = info->direct_addr; + memrw->data = stlp->data; /* data, if write or hacked in */ + pciesvc_event_handler(&evd, sizeof(evd)); +} + +void +pciehw_barrd_notify(const int port, notify_entry_t *nentry) +{ + const tlpauxinfo_t *info = &nentry->info; + const pciehw_spmt_t *spmt = pciesvc_spmt_get(info->pmti); + pciehwdev_t *phwdev = pciehwdev_get(spmt->owner); + pcie_stlp_t stlpbuf, *stlp = &stlpbuf; + + pcietlp_decode(stlp, nentry->rtlp, sizeof(nentry->rtlp)); + + pciehw_barrw_notify(PCIESVC_EV_MEMRD_NOTIFY, + port, phwdev, stlp, info, spmt); + + pciehwdev_put(phwdev, CLEAN); + pciesvc_spmt_put(spmt, CLEAN); +} + +void +pciehw_barwr_notify(const int port, notify_entry_t *nentry) +{ + const tlpauxinfo_t *info = &nentry->info; + const pciehw_spmt_t *spmt = pciesvc_spmt_get(info->pmti); + pciehwdev_t *phwdev = pciehwdev_get(spmt->owner); + pcie_stlp_t stlpbuf, *stlp = &stlpbuf; + + pcietlp_decode(stlp, nentry->rtlp, sizeof(nentry->rtlp)); + + pciehw_barrw_notify(PCIESVC_EV_MEMWR_NOTIFY, + port, phwdev, stlp, info, spmt); + + pciehwdev_put(phwdev, CLEAN); + pciesvc_spmt_put(spmt, CLEAN); +} + +void +pciehw_barrd_indirect(const int port, indirect_entry_t *ientry) +{ + const tlpauxinfo_t *info = &ientry->info; + const pciehw_spmt_t *spmt = pciesvc_spmt_get(info->pmti); + pciehwdev_t *phwdev = pciehwdev_get(spmt->owner); + const pciehwbar_t *phwbar = pciehw_bar_get(phwdev, spmt->cfgidx); + + switch (phwbar->hnd) { + + case PCIEHW_BARHND_SERIAL: { + pcie_stlp_t stlpbuf, *stlp = &stlpbuf; + u_int64_t baroff; + + pcietlp_decode(stlp, ientry->rtlp, sizeof(ientry->rtlp)); + baroff = stlp->addr - phwbar->addr; + ientry->data[0] = serial_barrd(phwdev, baroff, info->direct_size); + break; + } + + case PCIEHW_BARHND_VIRTIO: { + pcie_stlp_t stlpbuf, *stlp = &stlpbuf; + u_int64_t baroff; + u_int8_t do_notify = 0; + + pcietlp_decode(stlp, ientry->rtlp, sizeof(ientry->rtlp)); + baroff = stlp->addr - phwbar->addr; + ientry->data[0] = virtio_barrd(phwdev, info->direct_addr, baroff, + info->direct_size, &do_notify); + + stlp->data = ientry->data[0]; // HACK so logging shows real value + + if (do_notify) { + pciehw_barrw_notify(PCIESVC_EV_MEMRD_NOTIFY, + port, phwdev, stlp, info, spmt); + } + + break; + } + + default: { + u_int64_t pa = info->direct_addr; + size_t sz = info->direct_size; + + pciesvc_mem_rd(pa, ientry->data, sz); + break; + } + } + pciehwdev_put(phwdev, CLEAN); + pciesvc_spmt_put(spmt, CLEAN); + + pciehw_indirect_complete(ientry); +} + +void +pciehw_barwr_indirect(const int port, indirect_entry_t *ientry) +{ + const tlpauxinfo_t *info = &ientry->info; + const pciehw_spmt_t *spmt = pciesvc_spmt_get(info->pmti); + pciehwdev_t *phwdev = pciehwdev_get(spmt->owner); + const pciehwbar_t *phwbar = pciehw_bar_get(phwdev, spmt->cfgidx); + pcie_stlp_t stlpbuf, *stlp = &stlpbuf; + + pcietlp_decode(stlp, ientry->rtlp, sizeof(ientry->rtlp)); + + switch (phwbar->hnd) { + + case PCIEHW_BARHND_SERIAL: { + const u_int64_t baroff = stlp->addr - phwbar->addr; + const u_int32_t size = info->direct_size; + + serial_barwr(phwdev, baroff, size, stlp->data); + break; + } + + case PCIEHW_BARHND_VIRTIO: { + const u_int64_t baroff = stlp->addr - phwbar->addr; + const u_int32_t size = info->direct_size; + u_int8_t do_notify = 0; + + virtio_barwr(phwdev, info->direct_addr, baroff, size, stlp->data, + &do_notify); + + if (do_notify) { + pciehw_barrw_notify(PCIESVC_EV_MEMWR_NOTIFY, + port, phwdev, stlp, info, spmt); + } + + break; + } + + default: { + u_int64_t pa = info->direct_addr; + size_t sz = info->direct_size; + + pciesvc_mem_wr(pa, &stlp->data, sz); + break; + } + } + pciehwdev_put(phwdev, CLEAN); + pciesvc_spmt_put(spmt, CLEAN); + + pciehw_indirect_complete(ientry); +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/bdf.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/bdf.h new file mode 100644 index 0000000000..1646e875e9 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/bdf.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017,2021, Pensando Systems Inc. + */ + +#ifndef __BDF_H__ +#define __BDF_H__ + +static inline int +bdf_to_bus(const int bdf) +{ + return (bdf >> 8) & 0xff; +} + +static inline int +bdf_to_dev(const int bdf) +{ + return (bdf >> 3) & 0x1f; +} + +static inline int +bdf_to_fnc(const int bdf) +{ + return bdf & 0x7; +} + +static inline int +bdf_make(const int b, const int d, const int f) +{ + return ((b & 0xff) << 8) | ((d & 0x1f) << 3) | (f & 0x7); +} + +static inline char * +bdf_to_buf(const int bdf, char *buf, size_t bufsz) +{ + const int b = bdf_to_bus(bdf); + const int d = bdf_to_dev(bdf); + const int f = bdf_to_fnc(bdf); + pciesvc_snprintf(buf, bufsz, "%02x:%02x.%d", b, d, f); + return buf; +} + +static inline char * +bdf_to_str(const int bdf) +{ +#define NBUFS 8 + static char buf[NBUFS][16]; + static int bufi; + return bdf_to_buf(bdf, buf[bufi++ % NBUFS], sizeof(buf[0])); +} + +#endif /* __BDF_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/cfg.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/cfg.c new file mode 100644 index 0000000000..2ec811a03c --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/cfg.c @@ -0,0 +1,1072 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2019,2021-2022, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "pcietlp.h" +#include "portcfg.h" +#include "cfgspace.h" +#include "bdf.h" +#include "intr.h" +#include "indirect.h" +#include "notify.h" +#include "hdrt.h" +#include "vpd.h" +#include "reset.h" + +typedef struct handler_ctx_s { + pcie_stlp_t stlp; + int port; + pciehwdevh_t hwdevh; + uint32_t retval; + indirect_entry_t *ientry; + notify_entry_t *nentry; +} handler_ctx_t; + +/* + * Detect these overlaps: + * + * regaddr regsize + * v v + * +--------------+ + * +--------------+ + * ^ ^ + * tlpaddr tpsize + * + * regaddr regsize + * v v + * +--------------+ + * +--------------+ + * ^ ^ + * tlpaddr tpsize + * + * regaddr regsize + * v v + * +--------------+ + * +--------------+ + * ^ ^ + * tlpaddr tpsize + */ +static int +stlp_overlap(const pcie_stlp_t *stlp, + const u_int32_t regaddr, const u_int32_t regsize) +{ + const u_int32_t tlpaddr = stlp->addr; + const u_int32_t tlpsize = stlp->size; + + return tlpaddr < regaddr + regsize && tlpaddr + tlpsize > regaddr; +} + +/* + * The "info->vfid" parameter is scaled by the vfstride entry + * to compute the target config space physical address. We + * use the hardware target "cfgpa" to determine the target + * hwdev that is being addressed. This makes us independent + * of the vfstride scaling of "info->vfid" to find the target device. + * + * We could record the vfid scale factor in the spmt and then + * shift the "info->vfid" as the hw would do, but using the + * "info->direct_addr" provided by hw is easier and gives us + * the same answer. + * + * We could also lookup based on stlp->bdf that comes from the + * decode of the rawtlp so we know it is accurate. Right now + * our bdf lookup is not very efficient so "cfgpa" is faster. + * + * Sometimes we get called with a "cfgpa" that is outside the + * device cfgcur region. This happens when we have indirect_catchall + * and the cfgpa is in zerospa. For this case we return 0 + * which is an unused handle so config space is all 0's so we'll + * end up reading a 0 for all values. + */ +static pciehwdevh_t +cfgpa_to_hwdevh(const u_int64_t cfgpa) +{ +#define CFGCURSZ sizeof(((pciehw_mem_t *)0L)->cfgcur) + const u_int64_t cfgcurpa = pciesvc_cfgcur_pa(); + + if (cfgpa >= cfgcurpa && cfgpa < cfgcurpa + CFGCURSZ) { + const u_int64_t cfgoff = cfgpa - cfgcurpa; + return cfgoff >> PCIEHW_CFGSHIFT; + } + return 0; +} + +void +pciehw_cfg_load(pciehwdev_t *phwdev) +{ + pciehw_pmt_load_cfg(phwdev); +} + +void +pciehw_cfg_unload(pciehwdev_t *phwdev) +{ + pciehw_pmt_unload_cfg(phwdev); +} + +/***************************************************************** + * cfgrd handlers + */ + +static void +pciehw_cfgrd_delay(handler_ctx_t *hctx) +{ + pciesvc_debug_cmd(&hctx->retval); +} + +/***************************************************************** + * cfgwr handlers + */ + +static u_int32_t +cfg_bar32(cfgspace_t *cs, const u_int32_t cfgoff) +{ + u_int32_t baraddr = cfgspace_readd(cs, cfgoff); + return baraddr; +} + +static u_int64_t +cfg_bar64(cfgspace_t *cs, const u_int32_t cfgoff) +{ + u_int32_t barlo, barhi; + + barlo = cfgspace_readd(cs, cfgoff + 0); + barhi = cfgspace_readd(cs, cfgoff + 4); + + return ((u_int64_t)barhi << 32) | barlo; +} + +static u_int64_t +cfg_baraddr(cfgspace_t *cs, const u_int32_t cfgoff, const u_int32_t barlen) +{ + u_int64_t baraddr; + + if (barlen == 8) { + baraddr = cfg_bar64(cs, cfgoff); + } else { + baraddr = cfg_bar32(cs, cfgoff); + } + return baraddr; +} + +static void +pciehw_cfg_bars_enable(pciehwdev_t *phwdev, const u_int16_t cmd) +{ + const int io_en = (cmd & PCI_COMMAND_IO) != 0; + const int mem_en = (cmd & PCI_COMMAND_MEMORY) != 0; + pciehwbar_t *phwbar; + int i; + +#ifdef PCIEMGR_DEBUG + if (!phwdev->vf) { + pciesvc_logdebug("bars_enable: %s mem%c io%c\n", + pciehwdev_get_name(phwdev), + mem_en ? '+' : '-', + io_en ? '+' : '-'); + } +#endif + + for (phwbar = phwdev->bar, i = 0; i < PCIEHW_NBAR; i++, phwbar++) { + if (!phwbar->valid) continue; + + if ((phwbar->type == PCIEHWBARTYPE_MEM || + phwbar->type == PCIEHWBARTYPE_MEM64)) { + pciehw_bar_enable(phwdev, phwbar, mem_en); + } else if (phwbar->type == PCIEHWBARTYPE_IO) { + pciehw_bar_enable(phwdev, phwbar, io_en); + } + } +} + +/* + * rombar is enabled iff CMD.memory_space_en && ROMBAR.en. + */ +static void +pciehw_cfg_rombar_enable(pciehwdev_t *phwdev, cfgspace_t *cs) +{ + pciehwbar_t *phwbar = &phwdev->rombar; + + if (phwbar->valid) { + const int mem_en = (cfgspace_readw(cs, PCI_COMMAND) & 0x2) != 0; + const int rom_en = (cfgspace_readd(cs, PCI_ROM_ADDRESS) & 0x1) != 0; + + pciehw_bar_enable(phwdev, phwbar, mem_en && rom_en); + } +} + +static void +pciehw_cfg_busmaster_enable(pciehwdev_t *phwdev, const int on) +{ +#ifdef PCIEMGR_DEBUG + if (!phwdev->vf) { + pciesvc_logdebug("busmaster_enable: %s %s\n", + pciehwdev_get_name(phwdev), on ? "on" : "off"); + } +#endif + if (on) { + pciehw_hdrt_load(phwdev->lifb, phwdev->lifc, phwdev->bdf); + } else { + pciehw_hdrt_unload(phwdev->lifb, phwdev->lifc); + } +} + +static void +pciehw_cfg_cmd(pciehwdev_t *phwdev, cfgspace_t *cs, const u_int16_t cmd) +{ + u_int16_t msixcap, msixctl; + + /* + * PF check cmd reg for bar enables. + * VF bar enables come from PF sriov capability (see cfgwr_sriov()). + */ + if (!phwdev->vf) { + /* bar control */ + pciehw_cfg_bars_enable(phwdev, cmd); + /* cmd.mem_enable might have enabled rombar */ + pciehw_cfg_rombar_enable(phwdev, cs); + + msixcap = cfgspace_findcap(cs, PCI_CAP_ID_MSIX); + if (msixcap) { + msixctl = cfgspace_readw(cs, msixcap + PCI_MSIX_FLAGS); + } else { + msixctl = 0; + } + + /* intx_disable */ + if ((msixctl & PCI_MSIX_FLAGS_ENABLE) == 0) { + const int legacy = 1; + const int fmask = (cmd & PCI_COMMAND_INTX_DISABLE) != 0; + pciehw_intr_config(phwdev, legacy, fmask); + } + } + + pciehw_cfg_busmaster_enable(phwdev, (cmd & PCI_COMMAND_MASTER) != 0); +} + +static void +pciehw_cfgwr_cmd(const handler_ctx_t *hctx) +{ + pciehwdev_t *phwdev; + cfgspace_t cs; + u_int16_t cmd; + + phwdev = pciehwdev_get(hctx->hwdevh); + pciesvc_cfgspace_get(hctx->hwdevh, &cs); + cmd = cfgspace_readw(&cs, PCI_COMMAND); + pciehw_cfg_cmd(phwdev, &cs, cmd); + pciesvc_cfgspace_put(&cs, CLEAN); + pciehwdev_put(phwdev, DIRTY); /* updated bars[].bdf */ +} + +static void +pciehw_cfgwr_bars(pciehwdev_t *phwdev, + const pcie_stlp_t *stlp, + cfgspace_t *cs, + const int cfgbase) +{ + pciehwbar_t *phwbar; + int i; + + for (phwbar = phwdev->bar, i = 0; i < PCIEHW_NBAR; i++, phwbar++) { + if (phwbar->valid) { + const int cfgoff = cfgbase + phwbar->cfgidx * 4; + const int barlen = phwbar->type == PCIEHWBARTYPE_MEM64 ? 8 : 4; + if (stlp_overlap(stlp, cfgoff, barlen)) { + const u_int64_t vfbaroff = (pciehw_bar_getsize(phwbar) * + phwdev->vfidx); + u_int64_t baraddr = cfg_baraddr(cs, cfgoff, barlen); + u_int64_t addr; + + if (phwbar->type == PCIEHWBARTYPE_IO) { + baraddr &= ~0x3ULL; + } else { + baraddr &= ~0xfULL; + } + addr = baraddr + vfbaroff; +#ifdef PCIEMGR_DEBUG + pciesvc_logdebug("%s: bar %d pmt %d setaddr 0x%" PRIx64 "\n", + pciehwdev_get_name(phwdev), + phwbar->cfgidx, phwbar->pmtb, addr); +#endif + pciehw_bar_setaddr(phwbar, addr); + } + } + } +} + +static void +pciehw_cfgwr_dev_bars(const handler_ctx_t *hctx) +{ + const int cfgbase = 0x10; + pciehwdev_t *phwdev; + cfgspace_t cs; + + phwdev = pciehwdev_get(hctx->hwdevh); + pciesvc_cfgspace_get(hctx->hwdevh, &cs); + pciehw_cfgwr_bars(phwdev, &hctx->stlp, &cs, cfgbase); + pciesvc_cfgspace_put(&cs, CLEAN); + pciehwdev_put(phwdev, DIRTY); /* updated phwdev->bars[] bdf,addr */ + +} + +static void +pciehw_cfgwr_rom_bar(const handler_ctx_t *hctx) +{ + pciehwdev_t *phwdev; + pciehwbar_t *phwbar; + cfgspace_t cs; + u_int32_t baraddr; + + phwdev = pciehwdev_get(hctx->hwdevh); + pciesvc_cfgspace_get(hctx->hwdevh, &cs); + baraddr = cfgspace_readd(&cs, PCI_ROM_ADDRESS); + baraddr &= ~0x1; /* mask enable bit */ + phwbar = &phwdev->rombar; + pciehw_bar_setaddr(phwbar, baraddr); + pciehw_cfg_rombar_enable(phwdev, &cs); + pciesvc_cfgspace_put(&cs, CLEAN); + pciehwdev_put(phwdev, DIRTY); /* updated phwdev->bars[] bdf,addr */ +} + +static void +pciehw_mgmtchg_event(const pciehwdev_t *phwdev) +{ + pciesvc_eventdata_t evd; + + pciesvc_memset(&evd, 0, sizeof(evd)); + evd.evtype = PCIESVC_EV_MGMTCHG; + evd.port = phwdev->port; + evd.lif = phwdev->lifb; + pciesvc_event_handler(&evd, sizeof(evd)); +} + +/* + * Set a new device bus identity for this device. + * This happens when a bridge secondary bus is written. + * We also load the cfg entries into the pmt tcam if requested. + */ +static void +pciehw_cfg_set_bus(pciehwdev_t *phwdev, const u_int8_t bus, const int load) +{ + u_int8_t busbase, busdelta; + u_int32_t pmti; + + busbase = 0; + for (pmti = phwdev->pmtb; pmti < phwdev->pmtb + phwdev->pmtc; pmti++) { + pciehw_spmt_t *spmt = pciesvc_spmt_get(pmti); + pmt_t *pmt = &spmt->pmt; + const pmr_cfg_entry_t *pmr = &pmt->pmre.cfg; + + /* + * If we have >255 vfs then some pmt entries will have a + * different bus. We'll keep track of the (pre-adjusted) bstart + * from the first entry and apply any delta to the new bus. + */ + if (pmti == phwdev->pmtb) { + busbase = pmr->bstart; + } + busdelta = pmr->bstart - busbase; + + pmt_cfg_set_bus(pmt, bus + busdelta); + if (load) { + pmt_set(pmti, pmt); + spmt->loaded = 1; + } + pciesvc_spmt_put(spmt, DIRTY); /* pmt, loaded */ + } + if (!phwdev->cfgloaded && load) { + phwdev->cfgloaded = 1; + } +} + +/* + * Device captures a new bus. We do this when our parent bridge + * gets a new secondary bus number assigned. + * If new bus == 0 then unload the cfg entries. Host software + * will set bus == 0 during the bus walk step as the number of + * required buses due to ari/sriov is determined, only a single + * bus at a time gets a bus number assigned. + * + * For example, during bios scan + * (on UCS C220 Bios Version C220M5.3.1.3c.0307181404, and + * Dell R6525 BIOS Version 2.2.5) we see: + * [2000-06-21 09:18:28.829918] bridgedn0: hwbus 0xb4 secbus 0xb5 adjbus 0x01 + * [2000-06-21 09:18:28.833321] bridgedn0: hwbus 0xb4 secbus 0x00 adjbus 0x00 + * [2000-06-21 09:18:28.837797] bridgedn1: hwbus 0xb4 secbus 0xb5 adjbus 0x01 + * [2000-06-21 09:18:28.841914] bridgedn1: hwbus 0xb4 secbus 0x00 adjbus 0x00 + * [2000-06-21 09:18:28.845558] bridgedn2: hwbus 0xb4 secbus 0xb5 adjbus 0x01 + * [2000-06-21 09:18:28.849805] bridgedn2: hwbus 0xb4 secbus 0x00 adjbus 0x00 + * + * Then, the bus requirements are determined, and the final config is set: + * [2000-06-21 09:18:31.920035] bridgedn0: hwbus 0xb4 secbus 0xb5 adjbus 0x01 + * [2000-06-21 09:18:31.946122] bridgedn1: hwbus 0xb4 secbus 0xb6 adjbus 0x02 + * [2000-06-21 09:18:31.968468] bridgedn2: hwbus 0xb4 secbus 0xb7 adjbus 0x03 + */ +static void +pciehw_capture_bus(pciehwdev_t *phwdev, const u_int8_t bus, const int load) +{ + if (!phwdev->vf) { + pciesvc_loginfo("capture_bus: %s bdf 0x%04x new bus 0x%02x\n", + pciehwdev_get_name(phwdev), phwdev->bdf, bus); + } + if (bus) { + const uint8_t vfbusoff = phwdev->vf ? bdf_to_bus(phwdev->vfidx+1) : 0; + const uint16_t newbdf = bdf_make(bus + vfbusoff, + bdf_to_dev(phwdev->bdf), + bdf_to_fnc(phwdev->bdf)); + if (phwdev->bdf != newbdf) { + phwdev->bdf = newbdf; + /* event for mgmteth bdf change */ + if (phwdev->type == PCIEHDEVICE_MGMTETH) { + pciehw_mgmtchg_event(phwdev); + } + } + pciehw_cfg_set_bus(phwdev, bus, load); + } else { + pciehw_cfg_unload(phwdev); + } +} + +static void +pciehw_assign_bus(pciehwdevh_t hwdevh, const u_int8_t bus, const int load) +{ + while (hwdevh) { + pciehwdev_t *phwdev = pciehwdev_get(hwdevh); + const pciehwdevh_t childh = phwdev->childh; + const pciehwdevh_t peerh = phwdev->peerh; + + pciehw_capture_bus(phwdev, bus, load); + pciehwdev_put(phwdev, DIRTY); /* bdf */ + + /* also assign bus to vfs if any */ + pciehw_assign_bus(childh, bus, load); + + hwdevh = peerh; + } +} + +static void +pciehw_bridge_secbus(pciehwdev_t *phwdev) +{ + cfgspace_t cs; + u_int8_t hwbus, secbus, adjbus; + pciehwdevh_t childh; + + /* + * Note that our bridge PRIMARY_BUS is the same + * as hwbus, but pribus is optional in pcie and + * some systems (UCS bios) don't set bridge pribus + * during the initial bus scan, so we get the + * secbus of the hwbridge as a reliable bus. + */ + portcfg_read_bus(phwdev->port, NULL, &hwbus, NULL); + + pciesvc_cfgspace_get(phwdev->hwdevh, &cs); + secbus = cfgspace_get_secbus(&cs); + pciesvc_cfgspace_put(&cs, CLEAN); + + /* + * The bridge secbus is a physical bus number. + * The hardware usually deals with "adjusted" bus numbers, + * i.e. bus numbers relative to the secondary bus of the hw bridge. + * Here we perform the bus adjustment the hw will do to our + * secondary bus by subtracting the hw bridge secondary bus + * from the configured secbus to get the + * adjusted bus to assign to our devices. + */ + adjbus = (secbus && hwbus != 0xff) ? secbus - hwbus : 0; + + pciesvc_loginfo("%s: hwbus 0x%02x secbus 0x%02x adjbus 0x%02x\n", + pciehwdev_get_name(phwdev), hwbus, secbus, adjbus); + childh = phwdev->childh; + + pciehw_assign_bus(childh, adjbus, 1); +} + +static void +pciehw_cfgwr_bridge_bus(const handler_ctx_t *hctx) +{ + if (stlp_overlap(&hctx->stlp, PCI_SECONDARY_BUS, sizeof(uint8_t))) { + pciehwdev_t *phwdev = pciehwdev_get(hctx->hwdevh); + + pciehw_bridge_secbus(phwdev); + + pciehwdev_put(phwdev, CLEAN); + } +} + +static void +pciehw_cfgwr_bridgectl(const handler_ctx_t *hctx) +{ + cfgspace_t cs; + u_int16_t brctl; + u_int8_t secbus; + + pciesvc_cfgspace_get(hctx->hwdevh, &cs); + brctl = cfgspace_readw(&cs, PCI_BRIDGE_CONTROL); + secbus = cfgspace_get_secbus(&cs); + pciesvc_cfgspace_put(&cs, CLEAN); + + if (brctl & PCI_BRIDGE_CTL_BUS_RESET) { + pciehwdev_t *phwdev = pciehwdev_get(hctx->hwdevh); + pciehw_reset_bus(phwdev, secbus); + pciehwdev_put(phwdev, CLEAN); + } +} + +static void +pciehw_cfgwr_msix(const handler_ctx_t *hctx) +{ + const u_int16_t reg = hctx->stlp.addr; + const u_int16_t regdw = reg >> 2; + pciehwdev_t *phwdev; + cfgspace_t cs; + u_int16_t msixctl, cmd; + int msix_en, msix_mask, fmask, legacy; + + pciesvc_cfgspace_get(hctx->hwdevh, &cs); + msixctl = cfgspace_readw(&cs, (regdw << 2) + 2); + msix_en = (msixctl & PCI_MSIX_FLAGS_ENABLE) != 0; + msix_mask = (msixctl & PCI_MSIX_FLAGS_MASKALL) != 0; + + phwdev = pciehwdev_get(hctx->hwdevh); + + if (msix_en) { + /* msix mode */ + legacy = 0; + fmask = msix_mask; + } else if (phwdev->vf) { + /* sriov vf disabled */ + legacy = 0; + fmask = 1; + } else { + /* intx mode */ + legacy = 1; + cmd = cfgspace_readw(&cs, PCI_COMMAND); + fmask = phwdev->vf || (cmd & PCI_COMMAND_INTX_DISABLE) != 0; + } + pciesvc_cfgspace_put(&cs, CLEAN); + + pciehw_intr_config(phwdev, legacy, fmask); + pciehwdev_put(phwdev, CLEAN); +} + +static void +pciehw_cfgwr_vpd(const handler_ctx_t *hctx) +{ + cfgspace_t cs; + u_int16_t vpdcap, addr, f; + u_int32_t data; + + pciesvc_cfgspace_get(hctx->hwdevh, &cs); + vpdcap = cfgspace_findcap(&cs, PCI_CAP_ID_VPD); + addr = cfgspace_readw(&cs, vpdcap + PCI_VPD_ADDR); + f = addr & PCI_VPD_ADDR_F; + addr &= PCI_VPD_ADDR_MASK; + + /* + * Flag set indicates write data, clear flag when complete. + * Flag clear indicates read data, set flag when complete. + */ + if (f) { + /* vpd write */ + data = cfgspace_readd(&cs, vpdcap + PCI_VPD_DATA); + pciehw_vpd_write(hctx->hwdevh, addr, data); + cfgspace_writew(&cs, vpdcap + PCI_VPD_ADDR, addr); + } else { + /* vpd read */ + data = pciehw_vpd_read(hctx->hwdevh, addr); + cfgspace_writed(&cs, vpdcap + PCI_VPD_DATA, data); + pciesvc_mem_barrier(); /* data lands *before* we set ADDR_F */ + cfgspace_writew(&cs, vpdcap + PCI_VPD_ADDR, addr | PCI_VPD_ADDR_F); + } + pciesvc_cfgspace_put(&cs, DIRTY); /* VPD_DATA,VPD_ADDR */ +} + +static void +pciehw_cfgwr_pcie_devctl(const handler_ctx_t *hctx) +{ + cfgspace_t cs; + u_int16_t pciecap, devctl; + + pciesvc_cfgspace_get(hctx->hwdevh, &cs); + pciecap = cfgspace_findcap(&cs, PCI_CAP_ID_EXP); + devctl = cfgspace_readw(&cs, pciecap + PCI_EXP_DEVCTL); + pciesvc_cfgspace_put(&cs, CLEAN); + + if (stlp_overlap(&hctx->stlp, pciecap + 0x8, sizeof(u_int16_t))) { + if (devctl & PCI_EXP_DEVCTL_BCR_FLR) { + pciehwdev_t *phwdev = pciehwdev_get(hctx->hwdevh); + pciehw_reset_flr(phwdev); + pciehwdev_put(phwdev, CLEAN); + } + } +} + +static void +pciehw_sriov_numvfs_event(pciehwdev_t *phwdev, const u_int16_t numvfs) +{ + pciesvc_eventdata_t evd; + pciesvc_sriov_numvfs_t *sriov_numvfs; + + pciesvc_memset(&evd, 0, sizeof(evd)); + evd.evtype = PCIESVC_EV_SRIOV_NUMVFS; + evd.port = phwdev->port; + evd.lif = phwdev->lifb; + sriov_numvfs = &evd.sriov_numvfs; + sriov_numvfs->numvfs = numvfs; + pciesvc_event_handler(&evd, sizeof(evd)); +} + +/* + * Some of the bar entries of vf0 cover the bars for all vfs. + * Apply the "numvfs" limit to these vf0 bars. + */ +static int +pciehw_sriov_adjust_vf0(pciehwdev_t *vfhwdev, const int numvfs) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + pciehwbar_t *phwbar; + int i, r, do_log; + + r = 0; + for (phwbar = vfhwdev->bar, i = 0; i < PCIEHW_NBAR; i++, phwbar++) { + pciehw_spmt_t *spmt, *spmte; + if (!phwbar->valid) continue; + do_log = 1; /* log adjust_vf0 for first pmt of bar */ + spmt = &pshmem->spmt[phwbar->pmtb]; + spmte = spmt + phwbar->pmtc; + for ( ; spmt < spmte; spmt++) { + if (spmt->vf0) { + const u_int64_t pmtaddr = phwbar->addr + spmt->baroff; + r = pciehw_pmt_adjust_vf0(spmt, pmtaddr, numvfs, do_log); + if (r < 0) goto out; +#ifndef PCIEMGR_DEBUG + do_log = 0; +#endif + } + } + } + out: + return r; +} + +/* + * Enable this VF. Make it visible on the PCIe bus in cfg space, + * and enable bars too if Memory Space Enable (mse) is set. + */ +static void +pciehw_sriov_enable_vf(pciehwdev_t *vfhwdev, const int mse) +{ + u_int16_t cmd; + + /* XXX handle vfe load/unload cfg space */ + /* refactor and call pciehw_cfg_load(vfhwdev) */ + + /* load/unload the bars */ + cmd = mse ? PCI_COMMAND_MEMORY : 0; + pciehw_cfg_bars_enable(vfhwdev, cmd); +} + +static void +pciehw_sriov_enable_vfs(pciehwdev_t *phwdev, const int numvfs, const int mse) +{ + pciehwdev_t *vfhwdev; + int vfidx, r; + + vfhwdev = pciehwdev_vfdev_get(phwdev, 0); + r = pciehw_sriov_adjust_vf0(vfhwdev, numvfs); + pciehwdev_vfdev_put(vfhwdev, CLEAN); + if (r < 0) { + pciesvc_logerror("%s: adjust_vf0 failed\n", + pciehwdev_get_name(phwdev)); + return; + } + + for (vfidx = 0; vfidx < numvfs; vfidx++) { + vfhwdev = pciehwdev_vfdev_get(phwdev, vfidx); + pciehw_sriov_enable_vf(vfhwdev, mse); + pciehwdev_vfdev_put(vfhwdev, DIRTY); /* bdf */ + } +} + +static void +pciehw_sriov_disable_vf(pciehwdev_t *vfhwdev) +{ + const u_int16_t cmd = 0; + pciehw_cfg_bars_enable(vfhwdev, cmd); + + /* XXX handle vfe load/unload cfg space */ + /* refactor and call pciehw_cfg_unload(vfhwdev) */ +} + +/* + * Disable VFs. Unload the bars and clear bus master enable. + * We'll reset cfg space for the disable VFs which clears bus master enable. + */ +static void +pciehw_sriov_disable_vfs(pciehwdev_t *phwdev, const int vfb, const int vfc) +{ + int vfidx; + + for (vfidx = vfb; vfidx < vfb + vfc; vfidx++) { + pciehwdev_t *vfhwdev = pciehwdev_vfdev_get(phwdev, vfidx); + pciehw_sriov_disable_vf(vfhwdev); + pciehwdev_vfdev_put(vfhwdev, DIRTY); + } + /* Park disabled vf's in reset state. */ + pciehw_reset_vfs(phwdev, vfb, vfc); +} + +/* + * If VF Enable (vfe) is set, then enable VFs and possibly enable bars + * if Memory Space Enable (mse) is also set. + * + * If VF Enable (vfe) is clear, then disable VFs (mse is ignored). + */ +static void +pciehw_sriov_ctrl_numvfs(pciehwdev_t *phwdev, + const u_int16_t ctrl, const u_int16_t numvfs) +{ + const int vfe = (ctrl & PCI_SRIOV_CTRL_VFE) != 0; /* VF Enable */ + const int mse = (ctrl & PCI_SRIOV_CTRL_MSE) != 0; /* Memory Space Enable */ + + if (vfe) { + /* + * VF Enable set, first disable any enabled VFs greater than numvfs, + * then enable [0-numvfs) range. + */ + if (phwdev->enabledvfs > numvfs) { + pciehw_sriov_disable_vfs(phwdev, + numvfs, phwdev->enabledvfs - numvfs); + } + pciehw_sriov_enable_vfs(phwdev, numvfs, mse); + phwdev->enabledvfs = numvfs; + + } else { + /* + * VF Enable clear, disable all enabled VFs. + */ + if (phwdev->enabledvfs) { + pciehw_sriov_disable_vfs(phwdev, 0, phwdev->enabledvfs); + phwdev->enabledvfs = 0; + } + } + + /* + * Generate an event for numvfs change. + */ + if (phwdev->numvfs != numvfs) { + pciehw_sriov_numvfs_event(phwdev, numvfs); + phwdev->numvfs = numvfs; + } +} + +void +pciehw_sriov_ctrl(pciehwdev_t *phwdev, + const u_int16_t ctrl, const u_int16_t numvfs) +{ + if (phwdev->sriovctrl != ctrl) { +#ifdef __aarch64__ + pciesvc_loginfo("%s " + "sriov_ctrl 0x%04x vfe%c mse%c ari%c numvfs %d\n", + pciehwdev_get_name(phwdev), + ctrl, + ctrl & PCI_SRIOV_CTRL_VFE ? '+' : '-', + ctrl & PCI_SRIOV_CTRL_MSE ? '+' : '-', + ctrl & PCI_SRIOV_CTRL_ARI ? '+' : '-', + numvfs); +#endif + pciehw_sriov_ctrl_numvfs(phwdev, ctrl, numvfs); + phwdev->sriovctrl = ctrl; + } +} + +static void +pciehw_cfgwr_sriov_ctrl(const handler_ctx_t *hctx) +{ + pciehwdev_t *phwdev; + cfgspace_t cs; + u_int16_t sriovcap, sriovctrl, numvfs; + + phwdev = pciehwdev_get(hctx->hwdevh); + pciesvc_cfgspace_get(hctx->hwdevh, &cs); + sriovcap = cfgspace_findextcap(&cs, PCI_EXT_CAP_ID_SRIOV); + sriovctrl = cfgspace_readw(&cs, sriovcap + PCI_SRIOV_CTRL); + + numvfs = cfgspace_readw(&cs, sriovcap + PCI_SRIOV_NUM_VF); + if (numvfs > phwdev->totalvfs) numvfs = phwdev->totalvfs; + + pciesvc_cfgspace_put(&cs, CLEAN); + + /* + * If we're running as an indirect transaction then we'll have ientry + * set. If indirect, complete the transaction now before we go do + * the potentially long work of resetting a bunch of VFs. + */ + if (hctx->ientry) { + pciehw_indirect_complete(hctx->ientry); + } + + pciehw_sriov_ctrl(phwdev, sriovctrl, numvfs); + pciehwdev_put(phwdev, DIRTY); /* set sriovctrl,enabledvs */ +} + +static void +pciehw_cfgwr_sriov_bars(const handler_ctx_t *hctx) +{ + pciehwdev_t *phwdev, *vfhwdev; + cfgspace_t pfcs; + int vfidx, sriovcap; + + phwdev = pciehwdev_get(hctx->hwdevh); + pciesvc_cfgspace_get(hctx->hwdevh, &pfcs); + sriovcap = cfgspace_findextcap(&pfcs, PCI_EXT_CAP_ID_SRIOV); + + /* + * Distribute the new bar address to all the VFs. + * Each VF will compute its own offset within + * the bar for its VF sliced region. + */ + for (vfidx = 0; vfidx < phwdev->totalvfs; vfidx++) { + vfhwdev = pciehwdev_vfdev_get(phwdev, vfidx); + pciehw_cfgwr_bars(vfhwdev, &hctx->stlp, &pfcs, sriovcap + 0x24); + pciehwdev_vfdev_put(vfhwdev, DIRTY); /* vfhwdev->bars[] bdf,addr */ + } + pciesvc_cfgspace_put(&pfcs, CLEAN); + pciehwdev_put(phwdev, CLEAN); +} + +/***************************************************************** + * cfg handlers + */ + +static void +pciehw_cfgrd_handler(handler_ctx_t *hctx) +{ + const u_int16_t reg = hctx->stlp.addr; + const u_int16_t regdw = reg >> 2; + pciehw_cfghnd_t hnd = PCIEHW_CFGHND_NONE; + + if (regdw < PCIEHW_CFGHNDSZ) { + pciehwdev_t *phwdev = pciehwdev_get(hctx->hwdevh); + hnd = phwdev->cfghnd[regdw]; + pciehwdev_put(phwdev, CLEAN); + } + switch (hnd) { + default: + case PCIEHW_CFGHND_NONE: + break; + case PCIEHW_CFGHND_DBG_DELAY: + pciehw_cfgrd_delay(hctx); + break; + } +} + +static void +pciehw_cfgwr_handler(const handler_ctx_t *hctx) +{ + const u_int16_t reg = hctx->stlp.addr; + const u_int16_t regdw = reg >> 2; + pciehw_cfghnd_t hnd = PCIEHW_CFGHND_NONE; + + if (regdw < PCIEHW_CFGHNDSZ) { + pciehwdev_t *phwdev = pciehwdev_get(hctx->hwdevh); + hnd = phwdev->cfghnd[regdw]; + pciehwdev_put(phwdev, CLEAN); + } + switch (hnd) { + default: + case PCIEHW_CFGHND_NONE: + break; + case PCIEHW_CFGHND_CMD: + pciehw_cfgwr_cmd(hctx); + break; + case PCIEHW_CFGHND_DEV_BARS: + pciehw_cfgwr_dev_bars(hctx); + break; + case PCIEHW_CFGHND_ROM_BAR: + pciehw_cfgwr_rom_bar(hctx); + break; + case PCIEHW_CFGHND_BRIDGE_BUS: + pciehw_cfgwr_bridge_bus(hctx); + break; + case PCIEHW_CFGHND_BRIDGECTL: + pciehw_cfgwr_bridgectl(hctx); + break; + case PCIEHW_CFGHND_MSIX: + pciehw_cfgwr_msix(hctx); + break; + case PCIEHW_CFGHND_VPD: + pciehw_cfgwr_vpd(hctx); + break; + case PCIEHW_CFGHND_PCIE_DEVCTL: + pciehw_cfgwr_pcie_devctl(hctx); + break; + case PCIEHW_CFGHND_SRIOV_CTRL: + pciehw_cfgwr_sriov_ctrl(hctx); + break; + case PCIEHW_CFGHND_SRIOV_BARS: + pciehw_cfgwr_sriov_bars(hctx); + break; + } +} + +/***************************************************************** + * notify handlers + */ + +void +pciehw_cfgrd_notify(const int port, notify_entry_t *nentry) +{ + handler_ctx_t hctx; + + pciesvc_memset(&hctx, 0, sizeof(hctx)); + hctx.port = port; + hctx.nentry = nentry; + hctx.hwdevh = cfgpa_to_hwdevh(nentry->info.direct_addr); + pcietlp_decode(&hctx.stlp, nentry->rtlp, sizeof(nentry->rtlp)); + + pciehw_cfgrd_handler(&hctx); +} + +void +pciehw_cfgwr_notify(const int port, notify_entry_t *nentry) +{ + handler_ctx_t hctx; + + pciesvc_memset(&hctx, 0, sizeof(hctx)); + hctx.port = port; + hctx.nentry = nentry; + hctx.hwdevh = cfgpa_to_hwdevh(nentry->info.direct_addr); + pcietlp_decode(&hctx.stlp, nentry->rtlp, sizeof(nentry->rtlp)); + + pciehw_cfgwr_handler(&hctx); +} + +/***************************************************************** + * indirect handlers + */ + +void +pciehw_cfgrd_indirect(const int port, indirect_entry_t *ientry) +{ + handler_ctx_t hctx; + cfgspace_t cs; + + pciesvc_memset(&hctx, 0, sizeof(hctx)); + hctx.port = port; + hctx.ientry = ientry; + hctx.hwdevh = cfgpa_to_hwdevh(ientry->info.direct_addr); + pcietlp_decode(&hctx.stlp, ientry->rtlp, sizeof(ientry->rtlp)); + + /* + * For indirect reads read the current value at target addr + * and put in retval. The handler has a chance to modify + * retval if desired. + */ + pciesvc_cfgspace_get(hctx.hwdevh, &cs); + cfgspace_read(&cs, hctx.stlp.addr, hctx.stlp.size, &hctx.retval); + pciesvc_cfgspace_put(&cs, CLEAN); + + pciehw_cfgrd_handler(&hctx); + + ientry->data[0] = hctx.retval; + pciehw_indirect_complete(ientry); + +#ifdef PCIEMGR_DEBUG + pciesvc_logdebug("cfgrd_indirect: " + "hwdevh %d vfid %d rd 0x%lx sz %d data 0x%x\n", + hctx.hwdevh, ientry->info.vfid, + hctx.stlp.addr, hctx.stlp.size, ientry->data[0]); +#endif +} + +void +pciehw_cfgwr_indirect(const int port, indirect_entry_t *ientry) +{ + handler_ctx_t hctx; + cfgspace_t cs; + int r; + + pciesvc_memset(&hctx, 0, sizeof(hctx)); + hctx.port = port; + hctx.ientry = ientry; + hctx.hwdevh = cfgpa_to_hwdevh(ientry->info.direct_addr); + pcietlp_decode(&hctx.stlp, ientry->rtlp, sizeof(ientry->rtlp)); + +#ifdef PCIEMGR_DEBUG + pciesvc_logdebug("cfgwr_indirect: " + "hwdevh %d vfid %d wr 0x%lx sz %d data 0x%lx\n", + hctx.hwdevh, ientry->info.vfid, + hctx.stlp.addr, hctx.stlp.size, hctx.stlp.data); +#endif + + /* + * For indirect writes, write the data first, + * then let the handler run with the updated data. + */ + pciesvc_cfgspace_get(hctx.hwdevh, &cs); + r = cfgspace_write(&cs, hctx.stlp.addr, hctx.stlp.size, hctx.stlp.data); + pciesvc_cfgspace_put(&cs, DIRTY); + + if (r < 0) { + ientry->cpl = PCIECPL_CA; + } + pciehw_cfgwr_handler(&hctx); + pciehw_indirect_complete(ientry); +} + +/***************************************************************** + * reset + */ + +void +pciehw_cfg_reset(pciehwdev_t *phwdev, const pciesvc_rsttype_t rsttype) +{ + cfgspace_t cs; + u_int16_t cfgsz, cmd; + + pciesvc_cfgspace_get(pciehwdev_geth(phwdev), &cs); + cfgsz = cfgspace_size(&cs); + + /***************** + * reset cfg space + */ + pciesvc_memcpy_toio(cs.cur, cs.rst, cfgsz); + + /* Read reset value for cmd */ + cmd = cfgspace_readw(&cs, PCI_COMMAND); + pciehw_cfg_cmd(phwdev, &cs, cmd); + /* XXX Reset bar addrs? */ + + /* bridge just reset secbus to reset value=0 */ + if (cfgspace_get_headertype(&cs) == 0x1) { + pciehw_bridge_secbus(phwdev); + } + + if (phwdev->pf) { + u_int16_t sriovcap, sriovctrl, numvfs; + + /* Read reset values for sriovctrl, numvfs. */ + sriovcap = cfgspace_findextcap(&cs, PCI_EXT_CAP_ID_SRIOV); + sriovctrl = cfgspace_readw(&cs, sriovcap + PCI_SRIOV_CTRL); + numvfs = cfgspace_readw(&cs, sriovcap + PCI_SRIOV_NUM_VF); + if (numvfs > phwdev->totalvfs) numvfs = phwdev->totalvfs; + + /* ARI-Capable bit preserved across FLR reset */ + if (rsttype == PCIESVC_RSTTYPE_FLR) { + sriovctrl |= (phwdev->sriovctrl & PCI_SRIOV_CTRL_ARI); + cfgspace_writew(&cs, sriovcap + PCI_SRIOV_CTRL, sriovctrl); + } + + /* release our cfgspace before resetting vfs */ + pciesvc_cfgspace_put(&cs, DIRTY); + + pciehw_sriov_ctrl(phwdev, sriovctrl, numvfs); + /* XXX Reset VF bar addrs? */ + } else { + pciesvc_cfgspace_put(&cs, DIRTY); + } +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/cfgspace.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/cfgspace.c new file mode 100644 index 0000000000..ecc9e24cf3 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/cfgspace.c @@ -0,0 +1,401 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017,2021, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "cfgspace.h" + +/* + * These functions do the actual work of reading/writing + * the configuration space and associated mask region. + * These functions understand the implementation details + * and should not be called directly by external clients. + * + * Note that the config space memory region (in cfg->cur[]) is the + * actual representation of config space for devices exposed across + * the PCIe bus to the host. PCIe config space is little-endian. + * These functions are implemented to be endian-agnostic to run on + * either big- or little-endian cpus. + */ + +static inline u_int8_t +_cfgspace_getb_fld(u_int8_t *fld, const u_int16_t offset) +{ + return fld[offset]; +} + +static inline u_int16_t +_cfgspace_getw_fld(u_int8_t *fld, const u_int16_t offset) +{ + u_int16_t val; + + val = (((u_int16_t)fld[offset + 1] << 8) | + ((u_int16_t)fld[offset + 0] << 0)); + return val; +} + +static inline u_int32_t +_cfgspace_getd_fld(u_int8_t *fld, const u_int16_t offset) +{ + u_int32_t val; + + val = (((u_int32_t)fld[offset + 3] << 24) | + ((u_int32_t)fld[offset + 2] << 16) | + ((u_int32_t)fld[offset + 1] << 8) | + ((u_int32_t)fld[offset + 0] << 0)); + return val; +} + +static inline void +_cfgspace_setb_fld(u_int8_t *fld, const u_int16_t offset, const u_int8_t val) +{ + fld[offset] = val; +} + +static inline void +_cfgspace_setw_fld(u_int8_t *fld, const u_int16_t offset, const u_int16_t val) +{ + fld[offset + 0] = val; + fld[offset + 1] = val >> 8; +} + +static inline void +_cfgspace_setd_fld(u_int8_t *fld, const u_int16_t offset, const u_int32_t val) +{ + fld[offset + 0] = val; + fld[offset + 1] = val >> 8; + fld[offset + 2] = val >> 16; + fld[offset + 3] = val >> 24; +} + +static inline u_int8_t +cfgspace_getb_cur(cfgspace_t *cs, const u_int16_t offset) +{ + return _cfgspace_getb_fld(cs->cur, offset); +} + +static inline u_int16_t +cfgspace_getw_cur(cfgspace_t *cs, const u_int16_t offset) +{ + return _cfgspace_getw_fld(cs->cur, offset); +} + +static inline u_int32_t +cfgspace_getd_cur(cfgspace_t *cs, const u_int16_t offset) +{ + return _cfgspace_getd_fld(cs->cur, offset); +} + +static inline void +cfgspace_setb_cur(cfgspace_t *cs, const u_int16_t offset, const u_int8_t val) +{ + _cfgspace_setb_fld(cs->cur, offset, val); +} + +static inline void +cfgspace_setw_cur(cfgspace_t *cs, const u_int16_t offset, const u_int16_t val) +{ + _cfgspace_setw_fld(cs->cur, offset, val); +} + +static inline void +cfgspace_setd_cur(cfgspace_t *cs, const u_int16_t offset, const u_int32_t val) +{ + _cfgspace_setd_fld(cs->cur, offset, val); +} + +static inline u_int8_t +cfgspace_getb_msk(cfgspace_t *cs, const u_int16_t offset) +{ + return _cfgspace_getb_fld(cs->msk, offset); +} + +static inline u_int16_t +cfgspace_getw_msk(cfgspace_t *cs, const u_int16_t offset) +{ + return _cfgspace_getw_fld(cs->msk, offset); +} + +static inline u_int32_t +cfgspace_getd_msk(cfgspace_t *cs, const u_int16_t offset) +{ + return _cfgspace_getd_fld(cs->msk, offset); +} + +static inline void +cfgspace_setb_msk(cfgspace_t *cs, const u_int16_t offset, const u_int8_t val) +{ + _cfgspace_setb_fld(cs->msk, offset, val); +} + +static inline void +cfgspace_setw_msk(cfgspace_t *cs, const u_int16_t offset, const u_int16_t val) +{ + _cfgspace_setw_fld(cs->msk, offset, val); +} + +static inline void +cfgspace_setd_msk(cfgspace_t *cs, const u_int16_t offset, const u_int32_t val) +{ + _cfgspace_setd_fld(cs->msk, offset, val); +} + +/*****************************************************************/ + +/* + * Low-level config space initialization operations. + */ + +static u_int8_t +cfgspace_getb(cfgspace_t *cs, u_int16_t offset) +{ + if (offset < cfgspace_size(cs)) { + return cfgspace_getb_cur(cs, offset); + } + /* + * Any read between end of implementation and + * end of PCIe Spec size returns 0's. + */ + if (offset < 4096) { + return 0; + } + /* + * Shouldn't get any access beyond end of PCIe Spec size, + * but if so, return all 0xff's. + */ + return 0xff; +} + +static u_int16_t +cfgspace_getw(cfgspace_t *cs, u_int16_t offset) +{ + if (offset + 1 < cfgspace_size(cs)) { + return cfgspace_getw_cur(cs, offset); + } + /* + * Any read between end of implementation and + * end of PCIe Spec size returns 0's. + */ + if (offset + 1 < 4096) { + return 0; + } + /* + * Shouldn't get any access beyond end of PCIe Spec size, + * but if so, return all 0xff's. + */ + return 0xffff; +} + +static u_int32_t +cfgspace_getd(cfgspace_t *cs, u_int16_t offset) +{ + if (offset + 3 < cfgspace_size(cs)) { + return cfgspace_getd_cur(cs, offset); + } + /* + * Any read between end of implementation and + * end of PCIe Spec size returns 0's. + */ + if (offset + 3 < 4096) { + return 0; + } + /* + * Shouldn't get any access beyond end of PCIe Spec size, + * but if so, return all 0xff's. + */ + return 0xffffffff; +} + +u_int8_t +cfgspace_readb(cfgspace_t *cs, const u_int16_t offset) +{ + return cfgspace_getb(cs, offset); +} + +u_int16_t +cfgspace_readw(cfgspace_t *cs, const u_int16_t offset) +{ + return cfgspace_getw(cs, offset); +} + +u_int32_t +cfgspace_readd(cfgspace_t *cs, const u_int16_t offset) +{ + return cfgspace_getd(cs, offset); +} + +int +cfgspace_read(cfgspace_t *cs, + const u_int16_t offset, + const u_int8_t size, + u_int32_t *valp) +{ + switch (size) { + case 1: *valp = cfgspace_getb(cs, offset); break; + case 2: *valp = cfgspace_getw(cs, offset); break; + case 4: *valp = cfgspace_getd(cs, offset); break; + default: + return -1; + } + return 0; +} + +/*****************************************************************/ + +/* + * Config space writes. Normally config space is initialized + * with the cfgspace_set* functions. Once initialized, write + * accesses come through these APIs where we implement the write-mask + * fields (initialized with cfgspace_set[bwd]m() functions). + * A bit set in the write-mask indicates that bit is writeable + * by these operations. Read-only fields in the current value are + * merged with writeable fields from the new written value and the + * current contents of config space are replaced with the result. + */ + +void +cfgspace_writeb(cfgspace_t *cs, const u_int16_t offset, const u_int8_t val) +{ + const u_int8_t oval = cfgspace_getb_cur(cs, offset); + const u_int8_t wmsk = cfgspace_getb_msk(cs, offset); + const u_int8_t nval = (oval & ~wmsk) | (val & wmsk); + cfgspace_setb_cur(cs, offset, nval); +} + +void +cfgspace_writew(cfgspace_t *cs, const u_int16_t offset, const u_int16_t val) +{ + const u_int16_t oval = cfgspace_getw_cur(cs, offset); + const u_int16_t wmsk = cfgspace_getw_msk(cs, offset); + const u_int16_t nval = (oval & ~wmsk) | (val & wmsk); + cfgspace_setw_cur(cs, offset, nval); +} + +void +cfgspace_writed(cfgspace_t *cs, const u_int16_t offset, const u_int32_t val) +{ + const u_int32_t oval = cfgspace_getd_cur(cs, offset); + const u_int32_t wmsk = cfgspace_getd_msk(cs, offset); + const u_int32_t nval = (oval & ~wmsk) | (val & wmsk); + cfgspace_setd_cur(cs, offset, nval); +} + +int +cfgspace_write(cfgspace_t *cs, + const u_int16_t offset, + const u_int8_t size, + const u_int32_t val) +{ + switch (size) { + case 1: cfgspace_writeb(cs, offset, val); break; + case 2: cfgspace_writew(cs, offset, val); break; + case 4: cfgspace_writed(cs, offset, val); break; + default: + return -1; + } + return 0; +} + +/*****************************************************************/ + +u_int16_t +cfgspace_get_status(cfgspace_t *cs) +{ + return cfgspace_getw(cs, 0x6); +} + +u_int8_t +cfgspace_get_headertype(cfgspace_t *cs) +{ + return cfgspace_getb(cs, 0xe); +} + +u_int8_t +cfgspace_get_cap(cfgspace_t *cs) +{ + return cfgspace_getb(cs, 0x34); +} + +u_int8_t +cfgspace_get_pribus(cfgspace_t *cs) +{ + return cfgspace_getb(cs, 0x18); +} + +u_int8_t +cfgspace_get_secbus(cfgspace_t *cs) +{ + return cfgspace_getb(cs, 0x19); +} + +u_int8_t +cfgspace_get_subbus(cfgspace_t *cs) +{ + return cfgspace_getb(cs, 0x1a); +} + +/* + * Find capability header with id "capid" in the linked list of + * capability headers and return the config space address of it. + * Return 0 if "capid" is not found in the list. + */ +u_int8_t +cfgspace_findcap(cfgspace_t *cs, const u_int8_t capid) +{ + u_int16_t status = cfgspace_get_status(cs); + + /* check Capability List bit in status reg */ + if (status & (1 << 4)) { + int loops = 256 / 4; /* max-capspace / min-cap-size */ + u_int8_t capaddr; + + for (capaddr = cfgspace_get_cap(cs) & ~0x3; + loops && capaddr != 0; + capaddr = cfgspace_getb(cs, capaddr + 0x1) & ~0x3, loops--) { + const u_int8_t id = cfgspace_getb(cs, capaddr); + if (id == capid) { + return capaddr; /* found capid at capaddr */ + } + } + } + return 0; /* not found */ +} + +/*****************************************************************/ + +static u_int16_t +extcap_get_id(u_int32_t caphdr) +{ + return caphdr & 0x0000ffff; +} + +static u_int16_t +extcap_get_next(u_int32_t caphdr) +{ + return (caphdr >> 20) & 0xffc; +} + +/* + * Find extended capability header with id "capid" in the linked list of + * extended capability headers and return the config space address of it. + * Return 0 if "capid" is not found in the list. + */ +u_int16_t +cfgspace_findextcap(cfgspace_t *cs, const u_int16_t capid) +{ + u_int16_t cap; + u_int32_t caphdr; + int loops = cfgspace_size(cs) / 4; /* (config size) / (min cap size) */ + + cap = 0x100; + do { + caphdr = cfgspace_getd(cs, cap); + if (extcap_get_id(caphdr) == capid) { + return cap; + } + cap = extcap_get_next(caphdr); + } while (cap && --loops); + + return 0; /* not found */ +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/cfgspace.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/cfgspace.h new file mode 100644 index 0000000000..fd8934916e --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/cfgspace.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2019,2021, Pensando Systems Inc. + */ + +#ifndef __PCIESVC_CFGSPACE_H__ +#define __PCIESVC_CFGSPACE_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +typedef struct cfgspace_s { + u_int8_t *cur; + u_int8_t *msk; + u_int8_t *rst; + u_int16_t size; +} cfgspace_t; + +static inline u_int16_t +cfgspace_size(cfgspace_t *cs) +{ + return cs->size; +} + +/* rename these to avoid static link dups */ +#define cfgspace_get_status _pciesvc_cfgspace_get_status +#define cfgspace_get_cap _pciesvc_cfgspace_get_cap +#define cfgspace_get_pribus _pciesvc_cfgspace_get_pribus +#define cfgspace_get_secbus _pciesvc_cfgspace_get_secbus +#define cfgspace_get_subbus _pciesvc_cfgspace_get_subbus +#define cfgspace_get_headertype _pciesvc_cfgspace_get_headertype +#define cfgspace_findcap _pciesvc_cfgspace_findcap +#define cfgspace_findextcap _pciesvc_cfgspace_findextcap +#define cfgspace_readb _pciesvc_cfgspace_readb +#define cfgspace_readw _pciesvc_cfgspace_readw +#define cfgspace_readd _pciesvc_cfgspace_readd +#define cfgspace_read _pciesvc_cfgspace_read +#define cfgspace_writeb _pciesvc_cfgspace_writeb +#define cfgspace_writew _pciesvc_cfgspace_writew +#define cfgspace_writed _pciesvc_cfgspace_writed +#define cfgspace_write _pciesvc_cfgspace_write + +/* + * Access specific config space registers. + */ +u_int8_t cfgspace_get_pribus(cfgspace_t *cs); +u_int8_t cfgspace_get_secbus(cfgspace_t *cs); +u_int8_t cfgspace_get_subbus(cfgspace_t *cs); +u_int8_t cfgspace_get_headertype(cfgspace_t *cs); + +/* + * Capabilities. + */ +u_int8_t cfgspace_findcap(cfgspace_t *cs, const u_int8_t capid); + +/* + * Extended Capabilities. + */ +u_int16_t cfgspace_findextcap(cfgspace_t *cs, const u_int16_t capid); + +/* + * Config space operational accessors. + * + * Reads return current values, writes apply the write-mask to + * implement read-only fields. + */ +u_int8_t cfgspace_readb(cfgspace_t *cs, const u_int16_t offset); +u_int16_t cfgspace_readw(cfgspace_t *cs, const u_int16_t offset); +u_int32_t cfgspace_readd(cfgspace_t *cs, const u_int16_t offset); +int cfgspace_read(cfgspace_t *cs, + const u_int16_t offset, + const u_int8_t size, + u_int32_t *valp); + +void cfgspace_writeb(cfgspace_t *cs, + const u_int16_t offset, const u_int8_t val); +void cfgspace_writew(cfgspace_t *cs, + const u_int16_t offset, const u_int16_t val); +void cfgspace_writed(cfgspace_t *cs, + const u_int16_t offset, const u_int32_t val); +int cfgspace_write(cfgspace_t *cs, + const u_int16_t offset, + const u_int8_t size, + const u_int32_t val); + +#ifdef __cplusplus +} +#endif + +#endif /* __PCIESVC_CFGSPACE_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/cmd.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/cmd.c new file mode 100644 index 0000000000..ec236d7f68 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/cmd.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022, Advanced Micro Devices Inc. + */ + +#include "pciesvc_impl.h" + +static pciesvc_cmdres_t resbuf; + +static int +cmd_nop(const pciesvc_cmd_nop_t *cmd, + pciesvc_cmdres_nop_t *res) +{ + res->status = 0; + return 0; +} + +static int +cmd_set_log_level(const pciesvc_cmd_set_log_level_t *cmd, + pciesvc_cmdres_set_log_level_t *res) +{ + res->old_level = pciesvc_log_level; + pciesvc_log_level = cmd->log_level; + res->status = 0; + return 0; +} + +int +pciesvc_cmd_read(char *buf, const long int off, const size_t count) +{ + int n; + + if (off < 0 || off > sizeof(resbuf)) { + return -1; + } + if (off + count > sizeof(resbuf)) { + /* clamp read size to remainder of resbuf */ + n = sizeof(resbuf) - off; + } else { + n = count; + } + pciesvc_memcpy(buf, ((char *)&resbuf) + off, n); + return n; +} + +int +pciesvc_cmd_write(const char *buf, const long int off, const size_t count) +{ + pciesvc_cmd_t *cmd; + pciesvc_cmdres_t *res = &resbuf; + int r; + + if (off != 0 || count < sizeof(pciesvc_cmd_t)) { + return -1; + } + + cmd = (pciesvc_cmd_t *)buf; + pciesvc_memset(res, 0, sizeof(*res)); + + switch (cmd->cmd) { + case PCIESVC_CMD_NOP: + r = cmd_nop(&cmd->nop, &res->nop); + break; + case PCIESVC_CMD_SET_LOG_LEVEL: + r = cmd_set_log_level(&cmd->set_log_level, &res->set_log_level); + break; + default: + res->status = PCIESVC_CMDSTATUS_UNKNOWN_CMD; + r = 0; /* cmd_write "succeeded" */ + break; + } + + return r < 0 ? r : count; +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/hdrt.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/hdrt.c new file mode 100644 index 0000000000..5ca81f6c2b --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/hdrt.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017,2021, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "hdrt.h" + +#define HDRT_BASE PXB_(DHS_ITR_PCIHDRT) +#define HDRT_COUNT ASIC_(PXB_CSR_DHS_ITR_PCIHDRT_ENTRIES) +#define HDRT_STRIDE ASIC_(PXB_CSR_DHS_ITR_PCIHDRT_ENTRY_BYTE_SIZE) +#define HDRT_NWORDS 3 + +static int +hdrt_size(void) +{ + return HDRT_COUNT; +} + +static u_int64_t +hdrt_addr(const u_int32_t lif) +{ + pciesvc_assert(lif < hdrt_size()); + return HDRT_BASE + (lif * HDRT_STRIDE); +} + +static void +hdrt_set(const u_int32_t lif, const hdrt_t *hdrt) +{ + pciesvc_reg_wr32w(hdrt_addr(lif), (u_int32_t *)hdrt, HDRT_NWORDS); +} + +static void +hdrt_set_itr(const u_int32_t lif, const u_int16_t bdf) +{ + hdrt_t h = { 0 }; + + h.valid = 1; + h.bdf = bdf; + h.attr2_1_rd = 0x1; /* reads get Relaxed Ordering */ + hdrt_set(lif, &h); +} + +/****************************************************************** + * apis + */ + +int +pciehw_hdrt_load(const u_int32_t lifb, + const u_int32_t lifc, + const u_int16_t bdf) +{ + u_int32_t lif; + + for (lif = lifb; lif < lifb + lifc; lif++) { + hdrt_set_itr(lif, bdf); + } + return 0; +} + +int +pciehw_hdrt_unload(const u_int32_t lifb, const u_int32_t lifc) +{ + const hdrt_t h0 = { 0 }; + u_int32_t lif; + + for (lif = lifb; lif < lifb + lifc; lif++) { + hdrt_set(lif, &h0); + } + return 0; +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/hdrt.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/hdrt.h new file mode 100644 index 0000000000..7d74613c8e --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/hdrt.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017,2021, Pensando Systems Inc. + */ + +#ifndef __HDRT_H__ +#define __HDRT_H__ + +typedef struct { + u_int64_t valid :1; + u_int64_t bdf :16; + u_int64_t td :1; + u_int64_t pasid_en :1; + u_int64_t pasid_sel :2; + u_int64_t pasid :20; + u_int64_t pasid_exe :1; + u_int64_t pasid_priv:1; + u_int64_t attr2_1_rd:2; + u_int64_t attr2_1_wr:2; + u_int64_t rc_cfg1 :1; + u_int64_t attr0_rd :1; + u_int64_t attr0_wr :1; + u_int64_t ats_at_wr :1; + u_int64_t ats_at_rd :1; + u_int64_t tc :3; + u_int64_t ln_wr :1; + u_int64_t ln_rd :1; + u_int64_t rsrv :13; + u_int64_t ecc :8; + u_int64_t _pad :16; +} __attribute__((packed)) hdrt_t; + +void pciehw_hdrt_init(void); +int pciehw_hdrt_load(const u_int32_t lifb, + const u_int32_t lifc, + const u_int16_t bdf); +int pciehw_hdrt_unload(const u_int32_t lifb, + const u_int32_t lifc); + +#endif /* __HDRT_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/indirect.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/indirect.c new file mode 100644 index 0000000000..bcd13e21f1 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/indirect.c @@ -0,0 +1,331 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018,2021-2022, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "pcietlp.h" +#include "req_int.h" +#include "indirect.h" + +#define IND_INFO_BASE PXB_(STA_TGT_IND_INFO) +#define IND_INFO_NWORDS 1 +#define IND_INFO_STRIDE 4 + +static u_int64_t +ind_info_addr(const int port) +{ + return IND_INFO_BASE + (port * IND_INFO_STRIDE); +} + +static u_int64_t +indirect_int_addr(void) +{ + return PXB_(CFG_TGT_REQ_INDIRECT_INT); +} + +/***************************************************************** + * aximst rams + */ +#define AXIMST_BASE PXB_(DHS_TGT_AXIMST0) +#define AXIMST_STRIDE \ + (ASIC_(PXB_CSR_DHS_TGT_AXIMST1_BYTE_ADDRESS) - \ + ASIC_(PXB_CSR_DHS_TGT_AXIMST0_BYTE_ADDRESS)) + +#define AXIMST_NWORDS 4 +#define AXIMST_ENTRY_STRIDE 32 +#define AXIMST_ENTRIES_PER_PORT 16 +#define AXIMST_PORTS_PER_ROW 8 +#define AXIMST_PORT_STRIDE (AXIMST_ENTRY_STRIDE * AXIMST_ENTRIES_PER_PORT) + +static u_int64_t +aximst_addr(const unsigned int port, + const unsigned int idx, + const unsigned int entry) +{ + return (AXIMST_BASE + + ((u_int64_t)idx * AXIMST_STRIDE) + + ((u_int64_t)port * AXIMST_PORT_STRIDE) + + ((u_int64_t)entry * AXIMST_ENTRY_STRIDE)); +} + +static void +read_aximst(const unsigned int port, + const unsigned int idx, + const unsigned int entry, + u_int32_t *buf) +{ + const u_int64_t pa = aximst_addr(port, idx, entry); + + pciesvc_reg_rd32w(pa, buf, AXIMST_NWORDS); +} + +/* + * Indirect info tlp format is reversed in srams: + * + * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 + * -------------------------------------------------- + * 0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * 1: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * 2: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 <= tlp[16] + * 3: 00 00 00 00 60 02 00 3d 0f 00 00 3a 01 00 00 05 <= tlp[0] + * 4: 24 48 00 00 00 04 c0 bc 05 78 02 00 40 0e 41 c4 <= indirect info + */ +static void +decode_indirect_info(u_int8_t *info, indirect_entry_t *ientry) +{ + u_int8_t *p; + int i; + + /* copy the raw tlp data */ + p = (u_int8_t *)&ientry->rtlp; + for (i = 0; i < sizeof(ientry->rtlp); i++) { + p[i] = info[63 - i]; + } + + /* copy the tlp aux info */ + p = (u_int8_t *)&ientry->info; + for (i = 0; i < sizeof(ientry->info); i++) { + p[i] = info[64 + i]; + } +} + +static void +read_ind_info(const unsigned int port, int *entryp, int *pendingp) +{ + union { + struct { + u_int32_t pending:1; + u_int32_t entry:4; + u_int32_t port:3; + } __attribute__((packed)); + u_int32_t w[IND_INFO_NWORDS]; + } ind_info; + + pciesvc_reg_rd32w(ind_info_addr(port), ind_info.w, IND_INFO_NWORDS); + + /* pciesvc_assert(ind_info.port == port); */ + + if (entryp) *entryp = ind_info.entry; + if (pendingp) *pendingp = ind_info.pending; +} + +/* + * Fill the buffer with the raw indirect info data + * from the aximst srams. + */ +static void +read_indirect_info(const unsigned int port, + const unsigned int entry, + u_int8_t *buf) +{ + u_int8_t *bp; + int i; + + for (bp = buf, i = 0; i < 5; i++, bp += 16) { + read_aximst(port, i, entry, (u_int32_t *)bp); + } +} + +static void +read_indirect_entry(const unsigned int port, + const unsigned int entry, + indirect_entry_t *ientry) +{ + u_int8_t buf[80]; + + read_indirect_info(port, entry, buf); + decode_indirect_info(buf, ientry); + ientry->port = port; +} + +static int +read_pending_indirect_entry(const unsigned int port, + indirect_entry_t *ientry) +{ + int entry, pending; + + read_ind_info(port, &entry, &pending); + read_indirect_entry(port, entry, ientry); + return pending; +} + +void +pciehw_indirect_complete(indirect_entry_t *ientry) +{ +#define IND_RSP_ADDR PXB_(DHS_TGT_IND_RSP_ENTRY) +#define IND_RSP_NWORDS 5 + union { + struct { + u_int32_t data0; + u_int32_t data1; + u_int32_t data2; + u_int32_t data3; + u_int32_t cpl_stat:3; + u_int32_t port_id:3; + u_int32_t axi_id:7; + u_int32_t fetch_rsp:1; + } __attribute__((packed)); + u_int32_t w[IND_RSP_NWORDS]; + } ind_rsp; + const u_int64_t pa = ientry->info.direct_addr; + const size_t sz = ientry->info.direct_size; + + if (ientry->completed) return; + + /* + * This indirect transaction was handled by software. + * We might have written some memory that will be read + * by subsequent direct transactions handled in hw. + * Insert barrier here to be sure all memory writes have + * landed so hw will always see the data we wrote. + */ + pciesvc_mem_barrier(); + + if (sz < 4 && (pa & 0x3)) { + /* + * If sub-dword read, shift return data to the correct + * byte lanes expected for this transaction. + * + * data0 = data0 << (address-dword-offset * 8); + */ + ind_rsp.data0 = ientry->data[0] << ((pa & 0x3) << 3); + } else { + ind_rsp.data0 = ientry->data[0]; + ind_rsp.data1 = ientry->data[1]; + ind_rsp.data2 = ientry->data[2]; + ind_rsp.data3 = ientry->data[3]; + } + ind_rsp.cpl_stat = ientry->cpl; + ind_rsp.port_id = ientry->port; + ind_rsp.axi_id = ientry->info.context_id; + ind_rsp.fetch_rsp = 0; + + pciesvc_reg_wr32w(IND_RSP_ADDR, ind_rsp.w, IND_RSP_NWORDS); + + ientry->completed = 1; +} + +static void +handle_indirect(const int port, pciehw_port_t *p, indirect_entry_t *ientry) +{ + const u_int32_t pmti = ientry->info.pmti; + pciehw_spmt_t *spmt = pciesvc_spmt_get(pmti); + const pcie_tlp_common_hdr_t *hdr = (void *)ientry->rtlp; + const u_int8_t tlp_type = hdr->type; + + switch (tlp_type) { + case PCIE_TLP_TYPE_CFGRD0: + case PCIE_TLP_TYPE_CFGRD1: + pciehw_cfgrd_indirect(port, ientry); + spmt->swrd++; + p->stats.ind_cfgrd++; + break; + case PCIE_TLP_TYPE_CFGWR0: + case PCIE_TLP_TYPE_CFGWR1: + pciehw_cfgwr_indirect(port, ientry); + spmt->swwr++; + p->stats.ind_cfgwr++; + break; + case PCIE_TLP_TYPE_MEMRD: + case PCIE_TLP_TYPE_MEMRD64: + pciehw_barrd_indirect(port, ientry); + spmt->swrd++; + p->stats.ind_memrd++; + break; + case PCIE_TLP_TYPE_MEMWR: + case PCIE_TLP_TYPE_MEMWR64: + pciehw_barwr_indirect(port, ientry); + spmt->swwr++; + p->stats.ind_memwr++; + break; + case PCIE_TLP_TYPE_IORD: + pciehw_barrd_indirect(port, ientry); + spmt->swrd++; + p->stats.ind_iord++; + break; + case PCIE_TLP_TYPE_IOWR: + pciehw_barwr_indirect(port, ientry); + spmt->swwr++; + p->stats.ind_iowr++; + break; + default: + ientry->cpl = PCIECPL_UR; + pciehw_indirect_complete(ientry); + p->stats.ind_unknown++; + break; + } + + pciesvc_spmt_put(spmt, DIRTY); +} + +/****************************************************************** + * apis + */ + +int +pciehw_indirect_intr_init(const int port, + const u_int64_t msgaddr, const u_int32_t msgdata) +{ + return req_int_init(indirect_int_addr(), port, + msgaddr, msgdata | MSGDATA_ADD_PORT); +} + +static int +pciehw_indirect_handle(const int port, const int polled) +{ + pciehw_port_t *p = pciesvc_port_get(port); + indirect_entry_t ientrybuf, *ientry = &ientrybuf; + int pending; + int r = 0; + + pciesvc_memset(ientry, 0, sizeof(*ientry)); + pending = read_pending_indirect_entry(port, ientry); + + p->stats.ind_intr++; + if (polled) p->stats.ind_polled++; + if (!pending) { + p->stats.ind_spurious++; + goto out; + } + + ientry->cpl = PCIECPL_SC; /* assume success */ + handle_indirect(port, p, ientry); + r = 1; + + out: + pciesvc_port_put(p, DIRTY); + return r; +} + +int +pciehw_indirect_intr(const int port) +{ + return pciehw_indirect_handle(port, 0); +} + +/* + * Arrange to have the notify interrupt written to memory, + * then we can poll memory locations to see if there is work to do. + */ +int +pciehw_indirect_poll_init(const int port) +{ + const u_int64_t msgaddr = pciesvc_indirect_intr_dest_pa(port); + const u_int32_t msgdata = 1; + + return req_int_init(indirect_int_addr(), port, msgaddr, msgdata); +} + +int +pciehw_indirect_poll(const int port) +{ + int pending; + int r = 0; + + read_ind_info(port, NULL, &pending); + if (pending) { + r = pciehw_indirect_handle(port, 1); + } + return r; +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/indirect.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/indirect.h new file mode 100644 index 0000000000..6e08b6ecef --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/indirect.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018,2021, Pensando Systems Inc. + */ + +#ifndef __INDIRECT_H__ +#define __INDIRECT_H__ + +#include "indirect_entry.h" + +int pciehw_indirect_poll_init(const int port); +int pciehw_indirect_poll(const int port); +int pciehw_indirect_intr_init(const int port, + u_int64_t msgaddr, u_int32_t msgdata); +int pciehw_indirect_intr(const int port); + +void pciehw_indirect_complete(indirect_entry_t *ientry); + +#endif /* __INDIRECT_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/intr.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/intr.c new file mode 100644 index 0000000000..b1186525cf --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/intr.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018,2021, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "intr.h" +#include "intrutils.h" + +static void +intr_config(const u_int32_t intrb, + const u_int32_t intrc, + const int legacy, + const int fmask) +{ + u_int32_t intr; + + for (intr = intrb; intr < intrb + intrc; intr++) { + intr_fwcfg_mode(intr, legacy, fmask); + } +} + +void +pciehw_intr_config(pciehwdev_t *phwdev, const int legacy, const int fmask) +{ + int i; + + if (phwdev->novrdintr) { + for (i = 0; i < phwdev->novrdintr; i++) { + const u_int32_t intrb = phwdev->ovrdintr[i].intrb; + const u_int32_t intrc = phwdev->ovrdintr[i].intrc; + + intr_config(intrb, intrc, legacy, fmask); + } + } else { + intr_config(phwdev->intrb, phwdev->intrc, legacy, fmask); + } +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/intr.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/intr.h new file mode 100644 index 0000000000..2b5cd5b9b7 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/intr.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018,2021, Pensando Systems Inc. + */ + +#ifndef __INTR_H__ +#define __INTR_H__ + +union pciehwdev_u; typedef union pciehwdev_u pciehwdev_t; + +void pciehw_intr_config(pciehwdev_t *phwdev, + const int legacy, const int fmask); + +#endif /* __INTR_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/intrutils.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/intrutils.c new file mode 100644 index 0000000000..c4ff5e3b7d --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/intrutils.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2022, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "intrutils.h" + +#define INTR_BASE ASIC_(ADDR_BASE_INTR_INTR_OFFSET) +#define INTR_COUNT ASIC_(INTR_CSR_DHS_INTR_ASSERT_ENTRY_ARRAY_COUNT) + +#define INTR_MSIXCFG_OFFSET ASIC_(INTR_CSR_DHS_INTR_MSIXCFG_BYTE_OFFSET) +#define INTR_MSIXCFG_BASE (INTR_BASE + INTR_MSIXCFG_OFFSET) +#define INTR_MSIXCFG_STRIDE 0x10 + +#define INTR_FWCFG_OFFSET ASIC_(INTR_CSR_DHS_INTR_FWCFG_BYTE_OFFSET) +#define INTR_FWCFG_BASE (INTR_BASE + INTR_FWCFG_OFFSET) +#define INTR_FWCFG_STRIDE 0x8 + +#define INTR_DRVCFG_OFFSET ASIC_(INTR_CSR_DHS_INTR_DRVCFG_BYTE_OFFSET) +#define INTR_DRVCFG_BASE (INTR_BASE + INTR_DRVCFG_OFFSET) +#define INTR_DRVCFG_STRIDE 0x20 + +#define INTR_ASSERT_OFFSET ASIC_(INTR_CSR_DHS_INTR_ASSERT_BYTE_OFFSET) +#define INTR_ASSERT_BASE (INTR_BASE + INTR_ASSERT_OFFSET) +#define INTR_ASSERT_STRIDE 0x4 +#define INTR_ASSERT_DATA 0x00000001 /* in little-endian */ + +#define INTR_STATE_OFFSET ASIC_(INTR_CSR_DHS_INTR_STATE_BYTE_OFFSET) +#define INTR_STATE_BASE (INTR_BASE + INTR_STATE_OFFSET) +#define INTR_STATE_STRIDE 0x10 + +#define NWORDS(a) (sizeof(a) / sizeof(u_int32_t)) + +static u_int64_t +intr_msixcfg_addr(const int intrb) +{ + pciesvc_assert(intrb < INTR_COUNT); + return INTR_MSIXCFG_BASE + (intrb * INTR_MSIXCFG_STRIDE); +} + +static u_int64_t +intr_fwcfg_addr(const int intrb) +{ + pciesvc_assert(intrb < INTR_COUNT); + return INTR_FWCFG_BASE + (intrb * INTR_FWCFG_STRIDE); +} + +static u_int64_t +intr_drvcfg_addr(const int intrb) +{ + pciesvc_assert(intrb < INTR_COUNT); + return INTR_DRVCFG_BASE + (intrb * INTR_DRVCFG_STRIDE); +} + +/* + * Set the drvcfg_mask for this interrupt resource. + * Return the previous value of the mask so caller can + * restore to previous value if desired. + */ +int +intr_drvcfg_mask(const int intr, const int on) +{ + const u_int64_t pa = intr_drvcfg_addr(intr); + const int omask = pciesvc_reg_rd32(pa + offsetof(intr_drvcfg_t, mask)); + + pciesvc_reg_wr32(pa + offsetof(intr_drvcfg_t, mask), on); + return omask; +} + +static void +intr_msixcfg(const int intr, + const u_int64_t msgaddr, const u_int32_t msgdata, const int vctrl) +{ + const u_int64_t pa = intr_msixcfg_addr(intr); +#define MSG_ADDR_OFF 0 +#define MSG_DATA_OFF 8 +#define VECTOR_CTRL_OFF 12 + + pciesvc_reg_wr64(pa + MSG_ADDR_OFF, msgaddr); + pciesvc_reg_wr32(pa + MSG_DATA_OFF, msgdata); + pciesvc_reg_wr32(pa + VECTOR_CTRL_OFF, vctrl); +} + +static void +intr_fwcfg_set_function_mask(const int intr, const int on) +{ + const u_int64_t pa = intr_fwcfg_addr(intr); + pciesvc_reg_wr32(pa, on); +} + +static u_int64_t +intr_assert_addr(const int intr) +{ + pciesvc_assert(intr < INTR_COUNT); + return INTR_ASSERT_BASE + (intr * INTR_ASSERT_STRIDE); +} + +static u_int32_t +intr_assert_data(void) +{ + return INTR_ASSERT_DATA; +} + +void +intr_assert(const int intr) +{ + const u_int64_t pa = intr_assert_addr(intr); + const u_int32_t data = intr_assert_data(); + + pciesvc_reg_wr32(pa, data); +} + +/* + * Change the mode of the interrupt between legacy and msi mode. + * + * Note: We are careful to make config changes to fwcfg only with + * the function_mask set. Masking the interrupt will deassert the + * interrupt if asserted in legacy mode, then we change any config, + * then re-enable with the new config. If necessary the interrupt + * will re-assert with the new config. + */ +void +intr_fwcfg_mode(const int intr, const int legacy, const int fmask) +{ + const u_int64_t pa = intr_fwcfg_addr(intr); + intr_fwcfg_t v; + + /* mask via function_mask while making changes */ + intr_fwcfg_set_function_mask(intr, 1); + { + pciesvc_reg_rd32w(pa, v.w, NWORDS(v.w)); + v.legacy = legacy; + pciesvc_reg_wr32w(pa, v.w, NWORDS(v.w)); + } + if (!fmask) { + intr_fwcfg_set_function_mask(intr, fmask); + } +} + +/***************************************************************** + * Reset section + */ + +/***************** + * pba + */ + +/* + * Reset this interrupt's contribution to the interrupt status + * Pending Bit Array (PBA). We clear the PBA bit for this interrupt + * resource by returning all the "credits" for the interrupt. + * + * The driver interface to return credits is drvcfg.int_credits, + * but that register has special semantics where the value written + * to this register is atomically subtracted from the current value. + * We could use this interface to read the value X then write X back + * to the register to X - X = 0. This works even for negative values + * since (-X) - (-X) = 0. + */ +static void +intr_pba_clear(const int intr) +{ + const u_int64_t pa = intr_drvcfg_addr(intr); + u_int32_t credits; + + credits = pciesvc_reg_rd32(pa + offsetof(intr_drvcfg_t, int_credits)); + if (credits) { + pciesvc_reg_wr32(pa + offsetof(intr_drvcfg_t, int_credits), credits); + } +} + +void +intr_deassert(const int intr) +{ + intr_pba_clear(intr); +} + +/***************** + * msixcfg + */ + +/* + * Reset the msix control register group. This group is usually + * owned by the host OS and the behavior, including these reset values, + * are specified by the PCIe spec. + */ +static void +reset_msixcfg(const int intr) +{ + /* clear msg addr/data, vector_ctrl mask=1 */ + intr_msixcfg(intr, 0, 0, 1); +} + +static void +intr_reset_msixcfg(const int intrb, const int intrc) +{ + int intr; + + for (intr = intrb; intr < intrb + intrc; intr++) { + reset_msixcfg(intr); + } +} + +/***************** + * intr mode + */ + +/* + * Reset the interrupt "mode" to "legacy". + */ +static void +reset_mode(const int intr) +{ + /* reset to legacy mode, no fmask (CMD.int_disable == 0) */ + intr_fwcfg_mode(intr, 1, 0); +} + +static void +intr_reset_mode(const int intrb, const int intrc) +{ + int intr; + + for (intr = intrb; intr < intrb + intrc; intr++) { + reset_mode(intr); + } +} + +/***************** + * external reset apis + */ + +void +intr_reset_pci(const int intrb, const int intrc, const int dmask) +{ + intr_reset_msixcfg(intrb, intrc); + intr_reset_mode(intrb, intrc); +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/intrutils.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/intrutils.h new file mode 100644 index 0000000000..2d1362f393 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/intrutils.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019,2022, Pensando Systems Inc. + */ + +#ifndef __INTRUTILS_H__ +#define __INTRUTILS_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +typedef struct intr_drvcfg_s { + u_int32_t coal_init; + u_int32_t mask; + u_int32_t int_credits; + u_int32_t mask_on_assert; + u_int32_t coal_curr; +} __attribute__((packed)) intr_drvcfg_t; + +typedef struct intr_msixcfg_s { + u_int64_t msgaddr; + u_int32_t msgdata; + u_int32_t vector_ctrl; +} __attribute__((packed)) intr_msixcfg_t; + +typedef union intr_fwcfg_u { + struct { + u_int32_t function_mask:1; + u_int32_t rsrv:31; + u_int32_t lif:11; + u_int32_t port_id:3; + u_int32_t local_int:1; + u_int32_t legacy:1; + u_int32_t int_pin:2; + u_int32_t rsrv2:14; + } __attribute__((packed)); + u_int32_t w[2]; +} intr_fwcfg_t; + +typedef union intr_state_s { + struct { + u_int64_t msixcfg_msg_addr_51_2:50; + u_int64_t msixcfg_msg_data:32; + u_int64_t msixcfg_vector_ctrl:1; + u_int64_t fwcfg_function_mask:1; + u_int64_t fwcfg_lif:11; + u_int64_t fwcfg_local_int:1; + u_int64_t fwcfg_legacy_int:1; + u_int64_t fwcfg_legacy_pin:2; + u_int64_t drvcfg_mask:1; + int64_t drvcfg_int_credits:16; /* signed */ + u_int64_t drvcfg_mask_on_assert:1; + u_int64_t fwcfg_port_id:3; + } __attribute__((packed)); + u_int32_t w[4]; +} intr_state_t; + +/* override these to avoid static link dups */ +#define intr_assert _pciesvc_intr_assert +#define intr_deassert _pciesvc_intr_deassert +#define intr_drvcfg_mask _pciesvc_intr_drvcfg_mask +#define intr_fwcfg_mode _pciesvc_intr_fwcfg_mode +#define intr_reset_pci _pciesvc_intr_reset_pci + +void intr_assert(const int intr); +void intr_deassert(const int intr); +int intr_drvcfg_mask(const int intr, const int on); +void intr_fwcfg_mode(const int intr, const int legacy, const int fmask); + +/* + * intr_reset_pci() - reset the pcie managed register groups to default values, + * use for pcie block resets (FLR, bus reset). + */ +void intr_reset_pci(const int intrb, const int intrc, const int dmask); + +#ifdef __cplusplus +} +#endif + +#endif /* __INTRUTILS_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/log.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/log.c new file mode 100644 index 0000000000..ed611a9f88 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/log.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2019,2021-2022, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "log.h" + +#ifdef PCIESVC_SYSTEM_EXTERN + +static void +logv(pciesvc_logpri_t pri, const char *fmt, va_list ap) +{ + pciesvc_eventdata_t evd; + pciesvc_logmsg_t *logmsg; + char buf[80]; + + if (pri < pciesvc_log_level) { + return; + } + + pciesvc_vsnprintf(buf, sizeof(buf), fmt, ap); + + pciesvc_memset(&evd, 0, sizeof(evd)); + evd.evtype = PCIESVC_EV_LOGMSG; + logmsg = &evd.logmsg; + logmsg->pri = pri; + pciesvc_memcpy(logmsg->msg, buf, sizeof(logmsg->msg)); + pciesvc_event_handler(&evd, sizeof(evd)); +} + +void +pciesvc_logdebug(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + logv(PCIESVC_LOGPRI_DEBUG, fmt, ap); + va_end(ap); +} + +void +pciesvc_loginfo(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + logv(PCIESVC_LOGPRI_INFO, fmt, ap); + va_end(ap); +} + +void +pciesvc_logwarn(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + logv(PCIESVC_LOGPRI_WARN, fmt, ap); + va_end(ap); +} + +void +pciesvc_logerror(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + logv(PCIESVC_LOGPRI_ERROR, fmt, ap); + va_end(ap); +} + +void +pciesvc_loglocal(const char *fmt, ...) +{ + char buf[80]; + va_list ap; + + va_start(ap, fmt); + pciesvc_vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + pciesvc_log(buf); +} + +#endif diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/log.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/log.h new file mode 100644 index 0000000000..3ceede4992 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/log.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2018,2021-2022, Pensando Systems Inc. + */ + +#ifndef __LOG_H__ +#define __LOG_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +#ifdef PCIESVC_SYSTEM_EXTERN + +void pciesvc_loglocal(const char *fmt, ...) + __attribute__((format (printf, 1, 2))); +void pciesvc_logdebug(const char *fmt, ...) + __attribute__((format (printf, 1, 2))); +void pciesvc_loginfo(const char *fmt, ...) + __attribute__((format (printf, 1, 2))); +void pciesvc_logwarn(const char *fmt, ...) + __attribute__((format (printf, 1, 2))); +void pciesvc_logerror(const char *fmt, ...) + __attribute__((format (printf, 1, 2))); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __LOG_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/notify.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/notify.c new file mode 100644 index 0000000000..cbedefb1e1 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/notify.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018,2021, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "pcietlp.h" +#include "req_int.h" +#include "notify.h" + +#define NOTIFY_EN PXB_(CFG_TGT_NOTIFY_EN) +#define NOTIFY_RING_SIZE PXB_(CFG_TGT_REQ_NOTIFY_RING_SIZE) + +#define NOTIFY_BASE PXB_(DHS_TGT_NOTIFY) +#define NOTIFY_STRIDE 4 + +static u_int64_t +notify_addr(const int port) +{ + return NOTIFY_BASE + (port * NOTIFY_STRIDE); +} + +static u_int64_t +notify_int_addr(void) +{ + return PXB_(CFG_TGT_REQ_NOTIFY_INT); +} + +static void +notify_get_pici(const int port, int *pip, int *cip) +{ + const u_int32_t pici = pciesvc_reg_rd32(notify_addr(port)); + + *pip = pici & 0xffff; + *cip = pici >> 16; +} + +static void +notify_get_masked_pici(const int port, int *pip, int *cip, + const u_int32_t ring_mask) +{ + int pi, ci; + + notify_get_pici(port, &pi, &ci); + *pip = pi & ring_mask; + *cip = ci & ring_mask; +} + +/* + * NOTE: The hw doesn't allow sw to write to PI, + * when we write to the NOTIFY register only the CI is updated. + * To reset to empty ring, set CI = PI. + */ +static void +notify_set_ci(const int port, const int ci) +{ + const u_int32_t pici = (ci << 16); + pciesvc_reg_wr32(notify_addr(port), pici); +} + +static u_int32_t +notify_pici_delta(const int pi, const int ci, const u_int32_t ring_mask) +{ + if (pi > ci) { + return pi - ci; + } else { + return pi + ring_mask + 1 - ci; + } +} + +static void +notify_set_enable(const u_int32_t mask) +{ + union { + struct { + u_int32_t msg:1; + u_int32_t pmv:1; + u_int32_t db_pmv:1; + u_int32_t unsupp:1; + u_int32_t atomic:1; + u_int32_t pmt_miss:1; + u_int32_t pmr_invalid:1; + u_int32_t prt_invalid:1; + u_int32_t rc_vfid_miss:1; + u_int32_t prt_oor:1; + u_int32_t vfid_oor:1; + u_int32_t cfg_bdf_oor:1; + u_int32_t pmr_ecc_err:1; + u_int32_t prt_ecc_err:1; + } __attribute__((packed)); + u_int32_t w; + } en; + + en.w = pciesvc_reg_rd32(NOTIFY_EN); + en.w = mask; + pciesvc_reg_wr32(NOTIFY_EN, en.w); +} + +static void +notify_enable(void) +{ + notify_set_enable(0x3fff); /* enable all sources */ +} + +static int +notify_ring_inc(const int idx, const int inc, const u_int32_t ring_mask) +{ + return (idx + inc) & ring_mask; +} + +static void +handle_notify(const int port, pciehw_port_t *p, notify_entry_t *nentry) +{ + const tlpauxinfo_t *info = &nentry->info; + + /* + * If info->indirect_reason == 0 means we hit an entry we installed + * in the PMT for indirect handling. Go process the transaction. + * + * If info->indirect_reason != 0 then perhaps + * this is an exception or error. Track reason code stats. + */ + if (info->indirect_reason == 0) { + const u_int32_t pmti = info->pmti; + pciehw_spmt_t *spmt = pciesvc_spmt_get(pmti); + const pcie_tlp_common_hdr_t *hdr = (void *)nentry->rtlp; + const u_int8_t tlp_type = hdr->type; + + switch (tlp_type) { + case PCIE_TLP_TYPE_CFGRD0: + case PCIE_TLP_TYPE_CFGRD1: + pciehw_cfgrd_notify(port, nentry); + spmt->swrd++; + p->stats.not_cfgrd++; + break; + case PCIE_TLP_TYPE_CFGWR0: + case PCIE_TLP_TYPE_CFGWR1: + pciehw_cfgwr_notify(port, nentry); + spmt->swwr++; + p->stats.not_cfgwr++; + break; + case PCIE_TLP_TYPE_MEMRD: + case PCIE_TLP_TYPE_MEMRD64: + pciehw_barrd_notify(port, nentry); + spmt->swrd++; + p->stats.not_memrd++; + break; + case PCIE_TLP_TYPE_MEMWR: + case PCIE_TLP_TYPE_MEMWR64: + pciehw_barwr_notify(port, nentry); + spmt->swwr++; + p->stats.not_memwr++; + break; + case PCIE_TLP_TYPE_IORD: + pciehw_barrd_notify(port, nentry); + spmt->swrd++; + p->stats.not_iord++; + break; + case PCIE_TLP_TYPE_IOWR: + pciehw_barwr_notify(port, nentry); + spmt->swwr++; + p->stats.not_iowr++; + break; + default: + p->stats.not_unknown++; + break; + } + pciesvc_spmt_put(spmt, DIRTY); + } else { + uint64_t *notify_reasons = &p->stats.notify_reason_stats; + notify_reasons[info->indirect_reason]++; + } +} + +/****************************************************************** + * apis + */ + +/* + * CFG_TGT_REQ_NOTIFY_INT + */ +int +pciehw_notify_intr_init(const int port, u_int64_t msgaddr, u_int32_t msgdata) +{ + notify_enable(); + return req_int_init(notify_int_addr(), port, + msgaddr, msgdata | MSGDATA_ADD_PORT); +} + +static int +pciehw_notify_handle(const int port, const int polled) +{ + pciehw_port_t *p = pciesvc_port_get(port); + const u_int32_t ring_mask = pciesvc_notify_ring_mask(port); + int r, pi, ci, i, endidx; + u_int32_t pici_delta; + + p->stats.not_intr++; + if (polled) p->stats.not_polled++; + + notify_get_masked_pici(port, &pi, &ci, ring_mask); + if (ci == pi) { + p->stats.not_spurious++; + r = 0; /* not our intr */ + goto out; + } + + pici_delta = notify_pici_delta(pi, ci, ring_mask); + + p->stats.not_cnt += pici_delta; + if (pici_delta > p->stats.not_max) { + p->stats.not_max = pici_delta; + } + + endidx = notify_ring_inc(pi, 1, ring_mask); + for (i = notify_ring_inc(ci, 1, ring_mask); + i != endidx; + i = notify_ring_inc(i, 1, ring_mask)) { + notify_entry_t *nentry; + + nentry = pciesvc_notify_ring_get(port, i); + handle_notify(port, p, nentry); + pciesvc_notify_ring_put(nentry); + + /* return some slots occasionally while processing */ + if ((i & 0xff) == 0) { + notify_set_ci(port, i); + } + } + + /* we consumed these, adjust ci */ + notify_set_ci(port, pi); + r = 1; /* handled intr */ + + out: + pciesvc_port_put(p, DIRTY); + return r; +} + +int +pciehw_notify_intr(const int port) +{ + return pciehw_notify_handle(port, 0); +} + +/* + * Arrange to have the notify interrupt written to memory, + * then we can poll memory locations to see if there is work to do. + */ +int +pciehw_notify_poll_init(const int port) +{ + const u_int64_t msgaddr = pciesvc_notify_intr_dest_pa(port); + const u_int32_t msgdata = 1; + + notify_enable(); + return req_int_init(notify_int_addr(), port, msgaddr, msgdata); +} + +int +pciehw_notify_poll(const int port) +{ + const u_int32_t ring_mask = pciesvc_notify_ring_mask(port); + int pi, ci; + int r = 0; + + notify_get_masked_pici(port, &pi, &ci, ring_mask); + if (ci != pi) { + r = pciehw_notify_handle(port, 1); + } + return r; +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/notify.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/notify.h new file mode 100644 index 0000000000..5096ba67f3 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/notify.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, Pensando Systems Inc. + */ + +#ifndef __NOTIFY_H__ +#define __NOTIFY_H__ + +#include "notify_entry.h" + +int pciehw_notify_poll_init(const int port); +int pciehw_notify_poll(const int port); +int pciehw_notify_intr_init(const int port, + u_int64_t msgaddr, u_int32_t msgdata); +int pciehw_notify_intr(const int port); + +#endif /* __NOTIFY_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pciehwdev.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pciehwdev.c new file mode 100644 index 0000000000..0f6800c2c0 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pciehwdev.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2022, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "bdf.h" + +u_int16_t +pciehwdev_get_hostbdf(const pciehwdev_t *phwdev) +{ + pciehw_port_t *p; + u_int8_t secbus; + u_int16_t bdf; + + p = pciesvc_port_get(phwdev->port); + secbus = p->secbus; + pciesvc_port_put(p, CLEAN); + + /* + * If we have a parent then map our local bdf based on root secbus, + * else + * No parent this is the root, so no local bdf, + * construct bdf based on primary bus, also known as (secbus - 1). + * If no secbus set yet because no bios scan, then use 0. + */ + if (phwdev->parenth) { + bdf = bdf_make(bdf_to_bus(phwdev->bdf) + secbus, + bdf_to_dev(phwdev->bdf), + bdf_to_fnc(phwdev->bdf)); + } else { + const u_int8_t bus = secbus ? secbus - 1 : 0; + bdf = bdf_make(bus, 0, 0); + } + return bdf; +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pciesvc.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pciesvc.c new file mode 100644 index 0000000000..0ac9d11fee --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pciesvc.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021-2022, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "indirect.h" +#include "notify.h" + +int pciesvc_version_major = PCIESVC_VERSION_MAJ; +int pciesvc_version_minor = PCIESVC_VERSION_MIN; + +pciesvc_logpri_t pciesvc_log_level = PCIESVC_LOGPRI_INFO; + +/* local sanitized version of our params. */ +typedef struct pciesvc_lparams_s { + int port; /* port */ + uint32_t valid:1; /* initialized */ + uint32_t ind_poll:1; /* indirect poll for work */ + uint32_t ind_intr:1; /* indirect intr for work */ + uint32_t not_poll:1; /* notify poll for work */ + uint32_t not_intr:1; /* notify intr for work */ + uint32_t mac_poll:1; /* mac poll */ + uint32_t mac_intr:1; /* mac intr */ + uint64_t ind_msgaddr; /* ind_intr=1: intr msg addr */ + uint32_t ind_msgdata; /* ind_intr=1: intr msg data */ + uint64_t not_msgaddr; /* not_intr=1: intr msg addr */ + uint32_t not_msgdata; /* not_intr=1: intr msg addr */ +} pciesvc_lparams_t; + +static pciesvc_lparams_t lparams[PCIEHW_NPORTS]; + +static pciesvc_lparams_t * +params_v0_to_lparams(pciesvc_params_v0_t *p) +{ + pciesvc_lparams_t *lp; + + if (p->port < 0 || p->port >= PCIEHW_NPORTS) { + pciesvc_loglocal("pciesvc params invalid port %d\n", p->port); + return NULL; + } + + lp = &lparams[p->port]; + pciesvc_memset(lp, 0, sizeof(*lp)); + lp->port = p->port; + + /* poll *or* intr */ + if (p->ind_poll && p->ind_intr) { + pciesvc_loglocal("pciesvc params indirect poll and intr\n"); + return NULL; + } + if (p->not_poll && p->not_intr) { + pciesvc_loglocal("pciesvc params notify poll and intr\n"); + return NULL; + } + if (p->mac_poll && p->mac_intr) { + pciesvc_loglocal("pciesvc params mac poll and intr\n"); + return NULL; + } + + /* XXX don't mac handle yet */ + if (p->mac_poll || p->mac_intr) { + pciesvc_loglocal("pciesvc params mac poll/intr not implemented\n"); + return NULL; + } + + /* intr requires msgaddr */ + if (p->ind_intr && p->ind_msgaddr == 0) { + pciesvc_loglocal("pciesvc params no indirect msgaddr\n"); + return NULL; + } + if (p->not_intr && p->not_msgaddr == 0) { + pciesvc_loglocal("pciesvc params no notify msgaddr\n"); + return NULL; + } + + lp->ind_poll = p->ind_poll; + lp->ind_intr = p->ind_intr; + lp->not_poll = p->not_poll; + lp->not_intr = p->not_intr; + lp->mac_poll = p->mac_poll; + lp->mac_intr = p->mac_intr; + lp->ind_msgaddr = p->ind_msgaddr; + lp->ind_msgdata = p->ind_msgdata; + lp->not_msgaddr = p->not_msgaddr; + lp->not_msgdata = p->not_msgdata; + lp->valid = 1; + return lp; +} + +static pciesvc_lparams_t * +params_to_lparams(pciesvc_params_t *params) +{ + pciesvc_lparams_t *lp = NULL; + + switch (params->version) { + case 0: + lp = params_v0_to_lparams(¶ms->params_v0); + break; + default: + lp = NULL; + break; + } + return lp; +} + +static int +params_port(pciesvc_params_t *params) +{ + int port = -2; + + switch (params->version) { + case 0: + port = params->params_v0.port; + break; + default: + port = -2; + break; + } + return port; +} + +int +pciesvc_init(pciesvc_params_t *params) +{ + int r; + pciesvc_lparams_t *lp; + + /* if kpcimgr active_ports is unset (0) we get -1 here */ + if (params_port(params) == -1) { + pciesvc_loglocal("pciesvc_init: no active ports\n"); + return 0; + } + + lp = params_to_lparams(params); + if (lp == NULL) goto err_out; + + if (lp->ind_poll) { + if ((r = pciesvc_indirect_poll_init(lp->port)) < 0) { + pciesvc_loglocal("indirect_poll_init failed: %d\n", r); + goto err_out; + } + } else if (lp->ind_intr) { + if ((r = pciesvc_indirect_intr_init(lp->port, + lp->ind_msgaddr, + lp->ind_msgdata)) < 0) { + pciesvc_loglocal("indirect_intr_init failed: %d\n", r); + goto err_out; + } + } + + if (lp->not_poll) { + if ((r = pciesvc_notify_poll_init(lp->port)) < 0) { + pciesvc_loglocal("indirect_poll_init failed: %d\n", r); + goto err_out; + } + } else if (lp->not_intr) { + if ((r = pciesvc_notify_intr_init(lp->port, + lp->not_msgaddr, + lp->not_msgdata)) < 0) { + pciesvc_loglocal("notify_intr_init failed: %d\n", r); + goto err_out; + } + } + + return 0; + + err_out: + if (lp) lp->valid = 0; + return -1; +} + +void +pciesvc_shut(const int port) +{ + pciesvc_lparams_t *lp; + + if (port < 0 || port >= PCIEHW_NPORTS) return; + + lp = &lparams[port]; + if (!lp->valid) return; + + /* + * Shutdown any interrupts. + * Hardware doesn't have an interrupt disable setting. + * For now init for poll, then don't poll anymore. + */ + if (lp->ind_intr) { + pciesvc_indirect_poll_init(port); + } + if (lp->not_intr) { + pciesvc_notify_poll_init(port); + } + + lp->valid = 0; +} + +int +pciesvc_poll(const int port) +{ + pciesvc_lparams_t *lp; + int ind_poll = 0; + int not_poll = 0; + + if (port < 0 || port >= PCIEHW_NPORTS) return -1; + + lp = &lparams[port]; + if (!lp->valid) return -1; + + ind_poll = pciesvc_indirect_poll(lp->port); + not_poll = pciesvc_notify_poll(lp->port); + + return (ind_poll || not_poll); +} + +void +pciesvc_get_version(int *maj, int *min) +{ + *maj = pciesvc_version_major; + *min = pciesvc_version_minor; +} + +/****************************************************************** + * indirect + */ + +int +pciesvc_indirect_poll_init(const int port) +{ + return pciehw_indirect_poll_init(port); +} + +int +pciesvc_indirect_poll(const int port) +{ + return pciehw_indirect_poll(port); +} + +int +pciesvc_indirect_intr_init(const int port, + u_int64_t msgaddr, u_int32_t msgdata) +{ + return pciehw_indirect_intr_init(port, msgaddr, msgdata); +} + +int +pciesvc_indirect_intr(const int port) +{ + return pciehw_indirect_intr(port); +} + +/****************************************************************** + * notify + */ + +int +pciesvc_notify_poll_init(const int port) +{ + return pciehw_notify_poll_init(port); +} + +int +pciesvc_notify_poll(const int port) +{ + return pciehw_notify_poll(port); +} + +int +pciesvc_notify_intr_init(const int port, + u_int64_t msgaddr, u_int32_t msgdata) +{ + return pciehw_notify_intr_init(port, msgaddr, msgdata); +} + +int +pciesvc_notify_intr(const int port) +{ + return pciehw_notify_intr(port); +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pciesvc_impl.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pciesvc_impl.h new file mode 100644 index 0000000000..948cb62012 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pciesvc_impl.h @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021-2022, Pensando Systems Inc. + */ + +#ifndef __PCIESVC_IMPL_H__ +#define __PCIESVC_IMPL_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +#include "pciesvc_system.h" +#include "pciesvc.h" +#include "notify_entry.h" +#include "cfgspace.h" +#include "portcfg.h" +#include "log.h" +#include "asic_regs.h" +#include "pmt.h" + +struct indirect_entry_s; typedef struct indirect_entry_s indirect_entry_t; +struct notify_entry_s; typedef struct notify_entry_s notify_entry_t; + +void pciehw_cfgrd_indirect(const int port, indirect_entry_t *ientry); +void pciehw_cfgwr_indirect(const int port, indirect_entry_t *ientry); +void pciehw_barrd_indirect(const int port, indirect_entry_t *ientry); +void pciehw_barwr_indirect(const int port, indirect_entry_t *ientry); + +void pciehw_cfgrd_notify(const int port, notify_entry_t *nentry); +void pciehw_cfgwr_notify(const int port, notify_entry_t *nentry); +void pciehw_barrd_notify(const int port, notify_entry_t *nentry); +void pciehw_barwr_notify(const int port, notify_entry_t *nentry); + +enum pciesvc_rsttype_e; typedef enum pciesvc_rsttype_e pciesvc_rsttype_t; +void pciehw_cfg_reset(pciehwdev_t *phwdev, const pciesvc_rsttype_t rsttype); + +u_int64_t pciehw_bar_getsize(pciehwbar_t *phwbar); +void pciehw_bar_setaddr(pciehwbar_t *phwbar, const u_int64_t addr); +void pciehw_bar_load(pciehwdev_t *phwdev, pciehwbar_t *phwbar); +void pciehw_bar_enable(pciehwdev_t *phwdev, pciehwbar_t *phwbar, const int on); + +u_int16_t pciehwdev_get_hostbdf(const pciehwdev_t *phwdev); + +#define CLEAN 0 +#define DIRTY 1 + +static inline void +pciesvc_reg_rd32w(const uint64_t pa, uint32_t *w, const uint32_t nw) +{ + int i; + + for (i = 0; i < nw; i++) { + w[i] = pciesvc_reg_rd32(pa + (i * 4)); + } +} + +static inline void +pciesvc_reg_wr32w(const uint64_t pa, const uint32_t *w, const uint32_t nw) +{ + int i; + + for (i = 0; i < nw; i++) { + pciesvc_reg_wr32(pa + (i * 4), w[i]); + } +} + +static inline uint64_t +pciesvc_reg_rd64(const uint64_t pa) +{ + uint64_t val; + uint32_t *w = (uint32_t *)&val; + + pciesvc_reg_rd32w(pa, w, 2); + return val; +} + +static inline void +pciesvc_reg_wr64(const uint64_t pa, const uint64_t val) +{ + const uint32_t *w = (const uint32_t *)&val; + + pciesvc_reg_wr32w(pa, w, 2); +} + +static inline uint64_t +pciesvc_indirect_intr_dest_pa(const int port) +{ + static uint64_t intr_dest_pa[PCIEHW_NPORTS]; + + pciesvc_assert(port >= 0 && port < PCIEHW_NPORTS); + if (intr_dest_pa[port] == 0) { + pciehw_mem_t *phwmem = pciesvc_hwmem_get(); + intr_dest_pa[port] = + pciesvc_vtop(&phwmem->indirect_intr_dest[port]); + } + return intr_dest_pa[port]; +} + +static inline uint64_t +pciesvc_notify_intr_dest_pa(const int port) +{ + static uint64_t intr_dest_pa[PCIEHW_NPORTS]; + + pciesvc_assert(port >= 0 && port < PCIEHW_NPORTS); + if (intr_dest_pa[port] == 0) { + pciehw_mem_t *phwmem = pciesvc_hwmem_get(); + intr_dest_pa[port] = + pciesvc_vtop(&phwmem->notify_intr_dest[port]); + } + return intr_dest_pa[port]; +} + +static inline uint64_t +pciesvc_cfgcur_pa(void) +{ + static uint64_t cfgcur_pa; + + if (cfgcur_pa == 0) { + pciehw_mem_t *phwmem = pciesvc_hwmem_get(); + cfgcur_pa = pciesvc_vtop(phwmem->cfgcur); + } + return cfgcur_pa; +} + +static inline uint64_t +pciesvc_notify_ring_mask(const int port) +{ + static uint64_t ring_mask; + + if (ring_mask == 0) { + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + ring_mask = pshmem->notify_ring_mask; + } + return ring_mask; +} + +static inline notify_entry_t * +pciesvc_notify_ring_get(const int port, const int idx) +{ + pciehw_mem_t *phwmem = pciesvc_hwmem_get(); + notify_entry_t *notify_ring; + + notify_ring = (notify_entry_t *)phwmem->notify_area[port]; + return ¬ify_ring[idx]; +} + +static inline void +pciesvc_notify_ring_put(const notify_entry_t *nentry) +{ + /* nop */ +} + +static inline pciehw_port_t * +pciesvc_port_get(const int port) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + + pciesvc_assert(port >= 0 && port <= PCIEHW_NPORTS); + return &pshmem->port[port]; +} + +static inline void +pciesvc_port_put(const pciehw_port_t *p, const int dirty) +{ + /* nop */ +} + +static inline pciehwdev_t * +pciesvc_dev_get(const pciehwdevh_t hwdevh) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + + return hwdevh > 0 && hwdevh < PCIEHW_NDEVS ? &pshmem->dev[hwdevh] : NULL; +} + +static inline void +pciesvc_dev_put(const pciehwdev_t *phwdev, const int dirty) +{ + /* nop */ +} + +static inline void +pciesvc_cfgspace_get(const pciehwdevh_t hwdevh, cfgspace_t *cs) +{ + pciehw_mem_t *phwmem = pciesvc_hwmem_get(); + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + + cs->cur = phwmem->cfgcur[hwdevh]; + cs->msk = pshmem->cfgmsk[hwdevh]; + cs->rst = pshmem->cfgrst[hwdevh]; + cs->size = PCIEHW_CFGSZ; +} + +static inline void +pciesvc_cfgspace_put(const cfgspace_t *cs, const int dirty) +{ + /* nop */ +} + +static inline pciehw_spmt_t * +pciesvc_spmt_get(const int idx) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + + return &pshmem->spmt[idx]; +} + +static inline void +pciesvc_spmt_put(const pciehw_spmt_t *spmt, const int dirty) +{ + /* nop */ +} + +static inline pciehw_sprt_t * +pciesvc_sprt_get(const int idx) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + + return &pshmem->sprt[idx]; +} + +static inline void +pciesvc_sprt_put(const pciehw_sprt_t *sprt, const int dirty) +{ + /* nop */ +} + +static inline void * +pciesvc_vpd_get(const pciehwdevh_t hwdevh) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + + return &pshmem->vpddata[hwdevh]; +} + +static inline void +pciesvc_vpd_put(const void *vpddata, const int dirty) +{ + /* nop */ +} + +static inline pciehwdev_t * +pciehwdev_get(const pciehwdevh_t hwdevh) +{ + pciehwdev_t *phwdev = pciesvc_dev_get(hwdevh); + + /* older fw versions didn't init hwdevh, set it now */ + if (phwdev && phwdev->hwdevh != hwdevh) { + phwdev->hwdevh = hwdevh; + } + return phwdev; +} + +static inline void +pciehwdev_put(const pciehwdev_t *phwdev, const int dirty) +{ + pciesvc_dev_put(phwdev, dirty); +} + +static inline pciehwdevh_t +pciehwdev_geth(const pciehwdev_t *phwdev) +{ + return phwdev ? phwdev->hwdevh : 0; +} + +static inline const char * +pciehwdev_get_name(const pciehwdev_t *phwdev) +{ + return phwdev->name; +} + +static inline pciehwdev_t * +pciehwdev_vfdev_get(const pciehwdev_t *phwdev, const int vfidx) +{ + pciesvc_assert(vfidx >= 0 && vfidx < phwdev->totalvfs); + return pciehwdev_get(phwdev->childh + vfidx); +} + +static inline void +pciehwdev_vfdev_put(const pciehwdev_t *phwdev, const int dirty) +{ + pciehwdev_put(phwdev, dirty); +} + +/* + * roundup_power2 - Round up to next power of 2. + */ +static inline u_int64_t +roundup_power2(u_int64_t n) +{ + while (n & (n - 1)) { + n = (n | (n - 1)) + 1; + } + return n; +} + +static inline u_int64_t +rounddn_power2(u_int64_t n) +{ + return roundup_power2(n + 1) >> 1; +} + +#ifdef __cplusplus +} +#endif + +#endif /* __PCIESVC_IMPL_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pciesvc_system.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pciesvc_system.h new file mode 100644 index 0000000000..a8f29bca55 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pciesvc_system.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021-2022, Pensando Systems Inc. + */ + +#ifndef __PCIESVC_SYSTEM_H__ +#define __PCIESVC_SYSTEM_H__ + +/* + * Some build environments bring a customized version of these + * "system" functions (e.g. Linux kernel). If building for + * one of these environments build with -DPCIESVC_SYSTEM_EXTERN + * and provide a matching "pciesvc_system_extern.h", + * otherwise we pick up the local "system" functions. + */ +#ifdef PCIESVC_SYSTEM_EXTERN +#include "pciesvc_system_extern.h" +#else +#include "pciesvc_system_local.h" +#endif + +#endif /* __PCIESVC_SYSTEM_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pciesvc_system_local.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pciesvc_system_local.h new file mode 100644 index 0000000000..a191772644 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pciesvc_system_local.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021-2022, Pensando Systems Inc. + */ + +#ifndef __PCIESVC_SYSTEM_LOCAL_H__ +#define __PCIESVC_SYSTEM_LOCAL_H__ + +#include +#include +#include +#include +#include +#define __USE_GNU +#include +#include +#include +#include +#include +#include + +#include "platform/pal/include/pal.h" +#include "platform/pciemgr/include/pciemgr.h" +#include "platform/pciesvc/include/pciesvc.h" + +#define pciesvc_shmem_get pciehw_get_shmem +#define pciesvc_hwmem_get pciehw_get_hwmem +#define pciesvc_vtop pal_mem_vtop +#define pciesvc_reg_rd32 pal_reg_rd32 +#define pciesvc_reg_wr32 pal_reg_wr32 +#define pciesvc_pciepreg_rd32 pal_pciepreg_rd32 +#define pciesvc_pciepreg_wr32 pal_pciepreg_wr32 +#define pciesvc_mem_barrier PAL_barrier +#define pciesvc_memset memset +#define pciesvc_memcpy memcpy +#define pciesvc_memcpy_toio memcpy +#define pciesvc_assert assert +#define pciesvc_usleep usleep +#define pciesvc_ffs ffs +#define pciesvc_ffsll ffsll +#define pciesvc_snprintf snprintf +#define pciesvc_vsnprintf vsnprintf + +#define pciesvc_htobe32 htobe32 +#define pciesvc_be32toh be32toh +#define pciesvc_htobe16 htobe16 +#define pciesvc_be16toh be16toh +#define pciesvc_htole32 htole32 +#define pciesvc_le32toh le32toh + +#define pciesvc_loglocal pciesys_loginfo + +#define pciesvc_logdebug(args...) do { \ + if (PCIESVC_LOGPRI_DEBUG >= pciesvc_log_level) { \ + pciesys_logdebug(args); \ + } } while(0) + +#define pciesvc_loginfo(args...) do { \ + if (PCIESVC_LOGPRI_INFO >= pciesvc_log_level) { \ + pciesys_loginfo(args); \ + } } while(0) + +#define pciesvc_logwarn(args...) do { \ + if (PCIESVC_LOGPRI_WARN >= pciesvc_log_level) { \ + pciesys_logwarn(args); \ + } } while(0) + +#define pciesvc_logerror(args...) do { \ + if (PCIESVC_LOGPRI_ERROR >= pciesvc_log_level) { \ + pciesys_logerror(args); \ + } } while(0) + +typedef union { + u_int32_t l; + u_int16_t h[2]; + u_int8_t b[4]; +} iodata_t; + +static inline int +pciesvc_mem_rd(const uint64_t pa, void *buf, const size_t sz) +{ + uint64_t pa_aligned; + uint8_t idx; + iodata_t v; + + switch (sz) { + case 1: + pa_aligned = pa & ~0x3; + idx = pa & 0x3; + v.l = pal_reg_rd32(pa_aligned); + *(uint8_t *)buf = v.b[idx]; + break; + case 2: + pa_aligned = pa & ~0x3; + idx = (pa & 0x3) >> 1; + v.l = pal_reg_rd32(pa_aligned); + *(uint16_t *)buf = v.h[idx]; + break; + case 4: + case 8: + pal_reg_rd32w(pa, (uint32_t *)buf, sz >> 2); + break; + default: + return -1; + } + return 0; +} + +static inline void +pciesvc_mem_wr(const uint64_t pa, const void *buf, const size_t sz) +{ + uint64_t pa_aligned; + uint8_t idx; + iodata_t v; + + switch (sz) { + case 1: + pa_aligned = pa & ~0x3; + idx = pa & 0x3; + v.l = pal_reg_rd32(pa_aligned); + v.b[idx] = *(uint8_t *)buf; + pal_reg_wr32(pa_aligned, v.l); + break; + case 2: + pa_aligned = pa & ~0x3; + idx = (pa & 0x3) >> 1; + v.l = pal_reg_rd32(pa_aligned); + v.h[idx] = *(uint16_t *)buf; + pal_reg_wr32(pa_aligned, v.l); + break; + case 4: + case 8: + pal_reg_wr32w(pa, (uint32_t *)buf, sz >> 2); + break; + default: + break; + } +} + +static inline int +pciesvc_event_handler(const void *evdata, const size_t evsize) +{ + pciehdev_event(evdata); + return 0; +} + +static inline void +pciesvc_debug_cmd(uint32_t *valp) +{ + uint32_t delayus = *valp; + + if (delayus) { + pciesvc_logdebug("cfgrd delay %uus\n", delayus); + pciesvc_usleep(delayus); + } +} + +#endif /* __PCIESVC_SYSTEM_LOCAL_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pcietlp.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pcietlp.c new file mode 100644 index 0000000000..1c931644a1 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pcietlp.c @@ -0,0 +1,789 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018,2021-2022, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "pcietlp.h" +#include "bdf.h" + +typedef struct pcietlp_info_s { + unsigned int error:1; + char error_str[80]; +} pcietlp_info_t; + +static pcietlp_info_t pcietlp_info; + +static inline int +bitcount(u_int32_t n) +{ + int count = 0; + + while (n) { + count++; + n &= ~(-n); /* clear low order 1 bit */ + } + return count; +} + +static int pcietlp_set_error(const char *fmt, ...) + __attribute__((format (printf, 1, 2))); +static int pcietlp_set_error(const char *fmt, ...) +{ + pcietlp_info_t *pi = &pcietlp_info; + + if (pi->error == 0) { + va_list ap; + + va_start(ap, fmt); + pciesvc_vsnprintf(pi->error_str, sizeof(pi->error_str), fmt, ap); + va_end(ap); + pi->error = 1; + } + return -1; +} + +static void +pcietlp_clr_error(void) +{ + pcietlp_info_t *pi = &pcietlp_info; + pi->error_str[0] = '\0'; + pi->error = 0; +} + +static int +pcietlp_is_error(void) +{ + pcietlp_info_t *pi = &pcietlp_info; + return pi->error; +} + +char * +pcietlp_get_error(void) +{ + pcietlp_info_t *pi = &pcietlp_info; + return pi->error_str; +} + +static u_int32_t +stlp_dw(const pcie_stlp_t *stlp) +{ + const u_int64_t dw_start = stlp->addr >> 2; + const u_int64_t dw_end = (stlp->addr + stlp->size + 3) >> 2; + + return dw_end - dw_start; +} + +static u_int32_t +stlp_fbe(const pcie_stlp_t *stlp) +{ + static const u_int8_t betab[4] = { 0xf, 0xe, 0xc, 0x8 }; + + if (stlp_dw(stlp) <= 1) { + u_int8_t fbe = (1 << stlp->size) - 1; + return fbe << (stlp->addr & 0x3); + } + return betab[stlp->addr & 0x3]; +} + +static u_int32_t +stlp_lbe(const pcie_stlp_t *stlp) +{ + static const u_int8_t betab[4] = { 0xf, 0x8, 0xc, 0xe }; + + /* ndw == 1 all encoded in fbe, no lbe bits */ + if (stlp_dw(stlp) <= 1) return 0; + + return betab[(stlp->addr + stlp->size) & 0x3]; +} + +/******************************************************************/ + +static void +encode_addr32(const pcie_stlp_t *stlp, u_int32_t *addrp) +{ + addrp[0] = pciesvc_htobe32(stlp->addr & ~0x3); /* DW-align addr */ +} + +static void +decode_addr32(pcie_stlp_t *stlp, const u_int32_t addr) +{ + stlp->addr += pciesvc_be32toh(addr); +} + +/******************************************************************/ + +static void +encode_addr64(const pcie_stlp_t *stlp, u_int32_t *addrp) +{ + addrp[0] = pciesvc_htobe32(stlp->addr >> 32); + addrp[1] = pciesvc_htobe32(stlp->addr & ~0x3); /* DW-align addr */ +} + +static void +decode_addr64(pcie_stlp_t *stlp, const u_int32_t *addrp) +{ + stlp->addr += ((u_int64_t)pciesvc_be32toh(addrp[0]) << 32) | + pciesvc_be32toh(addrp[1]); +} + +/******************************************************************/ + +static void +encode_data32(const pcie_stlp_t *stlp, u_int32_t *datap) +{ + u_int32_t v = stlp->data; + + /* shift data over to byte lanes based on addr */ + v <<= (stlp->addr & 0x3) * 8; + + datap[0] = pciesvc_htole32(v); +} + +static void +decode_data32(pcie_stlp_t *stlp, const u_int32_t *datap) +{ + const u_int32_t v = pciesvc_le32toh(*datap); + + stlp->data = v >> ((stlp->addr & 0x3) * 8); + + /* mask off unused byte lanes */ + if (stlp->size < 4) { + const u_int32_t datamask = (1 << stlp->size * 8) - 1; + stlp->data &= datamask; + } +} + +/******************************************************************/ + +static void +encode_data64(const pcie_stlp_t *stlp, u_int32_t *datap) +{ + u_int64_t v = stlp->data; + + /* shift data over to byte lanes based on addr */ + v <<= (stlp->addr & 0x3) * 8; + + datap[0] = pciesvc_htole32(v); + datap[1] = pciesvc_htole32(v >> 32); +} + +static void +decode_data64(pcie_stlp_t *stlp, const u_int32_t *datap) +{ + const u_int64_t v = (pciesvc_le32toh(datap[0]) | + (u_int64_t)pciesvc_le32toh(datap[1]) << 32); + + stlp->data = v >> ((stlp->addr & 0x3) * 8); + + /* mask off unused byte lanes */ + if (stlp->size < 8) { + const u_int64_t datamask = (1ULL << stlp->size * 8) - 1; + stlp->data &= datamask; + } +} + +/******************************************************************/ + +static void +encode_data(const pcie_stlp_t *stlp, u_int32_t *datap) +{ + if (stlp_dw(stlp) <= 1) { + encode_data32(stlp, datap); + } else { + encode_data64(stlp, datap); + } +} + +static void +decode_data(pcie_stlp_t *stlp, const u_int32_t *datap) +{ + if (stlp_dw(stlp) <= 1) { + decode_data32(stlp, datap); + } else { + decode_data64(stlp, datap); + } +} + +/******************************************************************/ + +static void +encode_cmn_hdr(const pcie_stlp_t *stlp, const u_int8_t type, void *rtlp) +{ + pcie_tlp_common_hdr_t *hdr = rtlp; + u_int16_t ndw = stlp_dw(stlp); + + hdr->type = type; + + if (ndw == 0) { + /* 0-length transaction is allowed, ndw=1 and be=0 */ + ndw = 1; + } else if (ndw == 0x400) { + /* 0x400 dw encoded as len=0 */ + ndw = 0; + } else if (ndw > 0x400) { + /* can't encode > 0x400 */ + pcietlp_set_error("encode_cmn_hdr: ndw %d > 0x400", ndw); + return; + } + hdr->len_lo = ndw; + hdr->len_hi = ndw >> 8; + hdr->reqid = pciesvc_htobe16(stlp->reqid); + hdr->tag = stlp->tag; + hdr->t8 = stlp->tag >> 8; + hdr->t9 = stlp->tag >> 9; + hdr->fbe = stlp_fbe(stlp); + hdr->lbe = stlp_lbe(stlp); +} + +static void +decode_cmn_hdr(pcie_stlp_t *stlp, const void *rtlp) +{ + const pcie_tlp_common_hdr_t *hdr = rtlp; + const u_int8_t be_dw = (hdr->fbe > 0) + (hdr->lbe > 0); + const u_int8_t be_bits = bitcount(hdr->fbe) + bitcount(hdr->lbe); + const u_int8_t ffbe = pciesvc_ffs(hdr->fbe); + u_int16_t ndw = (hdr->len_hi << 8) | hdr->len_lo; + + /* ndw=0 indicates max 0x400 */ + if (ndw == 0) ndw = 0x400; + + /* Compute size. Start with ndw, then adjust for the Byte Enable bits. */ + if (ndw == 1 && !be_bits) { + stlp->size = 0; + } else { + stlp->size = ((ndw - be_dw) << 2) + be_bits; + } + + /* addr start depends on first First Byte Enable bit position.*/ + stlp->addr = ffbe ? ffbe - 1 : 0; + + stlp->reqid = pciesvc_be16toh(hdr->reqid); + stlp->tag = (hdr->t9 << 9) | (hdr->t8 << 8) | hdr->tag; +} + +/******************************************************************/ + +static void +encode_cfg_hdr(const pcie_stlp_t *stlp, const u_int8_t type, void *rtlp) +{ + pcie_tlp_cfg_t *cfg = rtlp; + + encode_cmn_hdr(stlp, type, cfg); + cfg->bdf = pciesvc_htobe16(stlp->bdf); + cfg->reg = stlp->addr & ~0x3; /* DW-aligned reg */ + cfg->extreg = stlp->addr >> 8; +} + +static void +decode_cfg_hdr(pcie_stlp_t *stlp, const void *rtlp) +{ + const pcie_tlp_cfg_t *cfg = rtlp; + + decode_cmn_hdr(stlp, cfg); + stlp->bdf = pciesvc_be16toh(cfg->bdf); + stlp->addr += (cfg->extreg << 8) | cfg->reg; +} + +/******************************************************************/ + +static void +encode_mem32_hdr(const pcie_stlp_t *stlp, const u_int8_t type, void *rtlp) +{ + pcie_tlp_mem32_t *mem = rtlp; + + encode_cmn_hdr(stlp, type, mem); + encode_addr32(stlp, &mem->addr); +} + +static void +decode_mem32_hdr(pcie_stlp_t *stlp, const void *rtlp) +{ + const pcie_tlp_mem32_t *mem = rtlp; + + decode_cmn_hdr(stlp, mem); + decode_addr32(stlp, mem->addr); +} + +/******************************************************************/ + +static void +encode_mem64_hdr(const pcie_stlp_t *stlp, const u_int8_t type, void *rtlp) +{ + pcie_tlp_mem64_t *mem = rtlp; + + encode_cmn_hdr(stlp, type, mem); + encode_addr64(stlp, &mem->addr_hi); +} + +static void +decode_mem64_hdr(pcie_stlp_t *stlp, const void *rtlp) +{ + const pcie_tlp_mem64_t *mem = rtlp; + + decode_cmn_hdr(stlp, mem); + decode_addr64(stlp, &mem->addr_hi); +} + +/****************************************************************** + * CFG + */ + +static int +encode_cfgrd(const pcie_stlp_t *stlp, void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 12; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("cfgrd: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + if (stlp->size > 4) { + return pcietlp_set_error("cfgrd: size %d > 4", stlp->size); + } + + encode_cfg_hdr(stlp, PCIE_TLP_TYPE_CFGRD0, rtlp); + return tlpsz; +} + +static int +decode_cfgrd(pcie_stlp_t *stlp, const void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 12; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("cfgrd: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + + decode_cfg_hdr(stlp, rtlp); + return tlpsz; +} + +/******************************************************************/ + +static int +encode_cfgwr(const pcie_stlp_t *stlp, void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 16; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("cfgwr: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + + encode_cfg_hdr(stlp, PCIE_TLP_TYPE_CFGWR0, rtlp); + encode_data32(stlp, rtlp + 12); + return tlpsz; +} + +static int +decode_cfgwr(pcie_stlp_t *stlp, const void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 16; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("cfgwr: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + + decode_cfg_hdr(stlp, rtlp); + decode_data32(stlp, rtlp + 12); + return tlpsz; +} + +/****************************************************************** + * MEM + */ + +static int +encode_memrd(const pcie_stlp_t *stlp, void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 12; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("memrd: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + + encode_mem32_hdr(stlp, PCIE_TLP_TYPE_MEMRD, rtlp); + encode_data(stlp, rtlp + 12); + return tlpsz; +} + +static int +decode_memrd(pcie_stlp_t *stlp, const void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 12; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("memrd: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + + decode_mem32_hdr(stlp, rtlp); + return tlpsz; +} + +/******************************************************************/ + +static int +encode_memwr(const pcie_stlp_t *stlp, void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 12 + stlp_dw(stlp) * 4; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("memwr: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + if (stlp->size > 8) { + /* stlp data is only 8 bytes */ + return pcietlp_set_error("memwr: size %d > 8", stlp->size); + } + + encode_mem32_hdr(stlp, PCIE_TLP_TYPE_MEMWR, rtlp); + encode_data(stlp, rtlp + 12); + return tlpsz; +} + +static int +decode_memwr(pcie_stlp_t *stlp, const void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 12; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("memwr: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + + decode_mem32_hdr(stlp, rtlp); + + if (rtlpsz < tlpsz + stlp->size) { + return pcietlp_set_error("memwr: rtlpsz want %d got %ld", + tlpsz + stlp->size, rtlpsz); + } + + decode_data(stlp, rtlp + 12); + return tlpsz + stlp->size; +} + +/****************************************************************** + * MEM 64 + */ + +static int +encode_memrd64(const pcie_stlp_t *stlp, void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 16; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("memrd64: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + + encode_mem64_hdr(stlp, PCIE_TLP_TYPE_MEMRD64, rtlp); + return tlpsz; +} + +static int +decode_memrd64(pcie_stlp_t *stlp, const void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 16; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("memrd64: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + + decode_mem64_hdr(stlp, rtlp); + return tlpsz; +} + +/******************************************************************/ + +static int +encode_memwr64(const pcie_stlp_t *stlp, void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 16 + stlp_dw(stlp) * 4; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("memwr64: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + if (stlp->size > 8) { + /* stlp data is only 8 bytes */ + return pcietlp_set_error("memwr64: size %d > 8", stlp->size); + } + + encode_mem64_hdr(stlp, PCIE_TLP_TYPE_MEMWR64, rtlp); + encode_data(stlp, rtlp + 16); + return tlpsz; +} + +static int +decode_memwr64(pcie_stlp_t *stlp, const void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 16; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("memwr64: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + + decode_mem64_hdr(stlp, rtlp); + + if (rtlpsz < tlpsz + stlp->size) { + return pcietlp_set_error("memwr64: rtlpsz want %d got %ld", + tlpsz + stlp->size, rtlpsz); + } + + decode_data(stlp, rtlp + 16); + return tlpsz + stlp->size; +} + +/****************************************************************** + * IO + */ + +static int +encode_iord(const pcie_stlp_t *stlp, void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 12; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("iord: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + if (stlp->size > 4) { + return pcietlp_set_error("iord: size %d > 4", stlp->size); + } + + encode_mem32_hdr(stlp, PCIE_TLP_TYPE_IORD, rtlp); + return tlpsz; +} + +static int +decode_iord(pcie_stlp_t *stlp, const void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 12; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("iord: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + + decode_mem32_hdr(stlp, rtlp); + return tlpsz; +} + +/******************************************************************/ + +static int +encode_iowr(const pcie_stlp_t *stlp, void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 16; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("iowr: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + if (stlp->size > 4) { + return pcietlp_set_error("iowr: size %d > 4", stlp->size); + } + + encode_mem32_hdr(stlp, PCIE_TLP_TYPE_IOWR, rtlp); + encode_data(stlp, rtlp + 12); + return tlpsz; +} + +static int +decode_iowr(pcie_stlp_t *stlp, const void *rtlp, const size_t rtlpsz) +{ + const int tlpsz = 16; + + if (rtlpsz < tlpsz) { + return pcietlp_set_error("iowr: rtlpsz want %d got %ld", + tlpsz, rtlpsz); + } + + decode_mem32_hdr(stlp, rtlp); + decode_data(stlp, rtlp + 12); + return tlpsz; +} + +/******************************************************************/ + +int +pcietlp_encode(const pcie_stlp_t *stlp, void *rtlp, const size_t rtlpsz) +{ + int n; + + pciesvc_memset(rtlp, 0, rtlpsz); + pcietlp_clr_error(); + switch (stlp->type) { + case PCIE_STLP_CFGRD: + n = encode_cfgrd(stlp, rtlp, rtlpsz); + break; + case PCIE_STLP_CFGWR: + n = encode_cfgwr(stlp, rtlp, rtlpsz); + break; + case PCIE_STLP_MEMRD: + n = encode_memrd(stlp, rtlp, rtlpsz); + break; + case PCIE_STLP_MEMWR: + n = encode_memwr(stlp, rtlp, rtlpsz); + break; + case PCIE_STLP_MEMRD64: + n = encode_memrd64(stlp, rtlp, rtlpsz); + break; + case PCIE_STLP_MEMWR64: + n = encode_memwr64(stlp, rtlp, rtlpsz); + break; + case PCIE_STLP_IORD: + n = encode_iord(stlp, rtlp, rtlpsz); + break; + case PCIE_STLP_IOWR: + n = encode_iowr(stlp, rtlp, rtlpsz); + break; + default: + pcietlp_set_error("encode: unhandled type 0x%x", stlp->type); + n = -1; + break; + } + return pcietlp_is_error() ? -1 : n; +} + +int +pcietlp_decode(pcie_stlp_t *stlp, const void *rtlp, const size_t rtlpsz) +{ + const pcie_tlp_common_hdr_t *hdr = rtlp; + int n; + + pcietlp_clr_error(); + switch (hdr->type) { + case PCIE_TLP_TYPE_MEMRD: + stlp->type = PCIE_STLP_MEMRD; + n = decode_memrd(stlp, rtlp, rtlpsz); + break; + case PCIE_TLP_TYPE_MEMRD64: + stlp->type = PCIE_STLP_MEMRD64; + n = decode_memrd64(stlp, rtlp, rtlpsz); + break; + case PCIE_TLP_TYPE_MEMWR: + stlp->type = PCIE_STLP_MEMWR; + n = decode_memwr(stlp, rtlp, rtlpsz); + break; + case PCIE_TLP_TYPE_MEMWR64: + stlp->type = PCIE_STLP_MEMWR64; + n = decode_memwr64(stlp, rtlp, rtlpsz); + break; + case PCIE_TLP_TYPE_IORD: + stlp->type = PCIE_STLP_IORD; + n = decode_iord(stlp, rtlp, rtlpsz); + break; + case PCIE_TLP_TYPE_IOWR: + stlp->type = PCIE_STLP_IOWR; + n = decode_iowr(stlp, rtlp, rtlpsz); + break; + case PCIE_TLP_TYPE_CFGRD0: + stlp->type = PCIE_STLP_CFGRD; + n = decode_cfgrd(stlp, rtlp, rtlpsz); + break; + case PCIE_TLP_TYPE_CFGWR0: + stlp->type = PCIE_STLP_CFGWR; + n = decode_cfgwr(stlp, rtlp, rtlpsz); + break; + case PCIE_TLP_TYPE_CFGRD1: + stlp->type = PCIE_STLP_CFGRD1; + n = decode_cfgrd(stlp, rtlp, rtlpsz); + break; + case PCIE_TLP_TYPE_CFGWR1: + stlp->type = PCIE_STLP_CFGWR1; + n = decode_cfgwr(stlp, rtlp, rtlpsz); + break; + default: + pcietlp_set_error("decode: unhandled type 0x%x\n", hdr->type); + n = -1; + break; + } + return pcietlp_is_error() ? -1 : n; +} + +/******************************************************************/ + +char * +pcietlp_buf(const pcie_stlp_t *stlp, void *buf, const size_t bufsz) +{ + switch (stlp->type) { + case PCIE_STLP_CFGRD: + pciesvc_snprintf(buf, bufsz, + "CFGRD %s 0x%08"PRIx64" size %d tag %d", + bdf_to_str(stlp->bdf), stlp->addr, stlp->size, stlp->tag); + break; + case PCIE_STLP_CFGWR: + pciesvc_snprintf(buf, bufsz, + "CFGWR %s 0x%08"PRIx64" size %d tag %d 0x%0*"PRIx64"", + bdf_to_str(stlp->bdf), stlp->addr, stlp->size, stlp->tag, + stlp->size * 2, stlp->data); + break; + case PCIE_STLP_CFGRD1: + pciesvc_snprintf(buf, bufsz, + "CFGRD1 %s 0x%08"PRIx64" size %d tag %d", + bdf_to_str(stlp->bdf), stlp->addr, stlp->size, stlp->tag); + break; + case PCIE_STLP_CFGWR1: + pciesvc_snprintf(buf, bufsz, + "CFGWR1 %s 0x%08"PRIx64" size %d tag %d 0x%0*"PRIx64"", + bdf_to_str(stlp->bdf), stlp->addr, stlp->size, stlp->tag, + stlp->size * 2, stlp->data); + break; + case PCIE_STLP_MEMRD: + pciesvc_snprintf(buf, bufsz, + "MEMRD 0x%08"PRIx64" size %d tag %d", + stlp->addr, stlp->size, stlp->tag); + break; + case PCIE_STLP_MEMWR: + pciesvc_snprintf(buf, bufsz, + "MEMWR 0x%08"PRIx64" size %d tag %d 0x%0*"PRIx64"", + stlp->addr, stlp->size, stlp->tag, + stlp->size * 2, stlp->data); + break; + case PCIE_STLP_MEMRD64: + pciesvc_snprintf(buf, bufsz, + "MEMRD64 0x%08"PRIx64" size %d tag %d", + stlp->addr, stlp->size, stlp->tag); + break; + case PCIE_STLP_MEMWR64: + pciesvc_snprintf(buf, bufsz, + "MEMWR64 0x%08"PRIx64" size %d tag %d 0x%0*"PRIx64"", + stlp->addr, stlp->size, stlp->tag, + stlp->size * 2, stlp->data); + break; + case PCIE_STLP_IORD: + pciesvc_snprintf(buf, bufsz, + "IORD 0x%08"PRIx64" size %d tag %d", + stlp->addr, stlp->size, stlp->tag); + break; + case PCIE_STLP_IOWR: + pciesvc_snprintf(buf, bufsz, + "IOWR 0x%08"PRIx64" size %d tag %d 0x%0*"PRIx64"", + stlp->addr, stlp->size, stlp->tag, + stlp->size * 2, stlp->data); + break; + case PCIE_STLP_MALFORMED: + pciesvc_snprintf(buf, bufsz, "MALFORMED"); + break; + default: + pciesvc_snprintf(buf, bufsz, "UNKNOWN type %d", stlp->type); + break; + } + return buf; +} + +char * +pcietlp_str(const pcie_stlp_t *stlp) +{ + static char buf[80]; + + return pcietlp_buf(stlp, buf, sizeof(buf)); +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pmt.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pmt.c new file mode 100644 index 0000000000..8eebb4c781 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pmt.c @@ -0,0 +1,947 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2018,2021-2022, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "bdf.h" +#include "prt.h" +#include "pmt.h" + +#define PMT_BASE PXB_(DHS_TGT_PMT) +#define PMT_STRIDE \ + (ASIC_(PXB_CSR_DHS_TGT_PMT_ENTRY_ARRAY_ELEMENT_SIZE) * 4 * 8) +#define PMT_GRST PXB_(CFG_TGT_PMT_GRST) +#define PMR_BASE PXB_(DHS_TGT_PMR) +#define PMR_STRIDE ASIC_(PXB_CSR_DHS_TGT_PMR_ENTRY_BYTE_SIZE) + +static int +pmt_count(void) +{ + return PMT_COUNT; +} + +static void +assert_pmti_in_range(const int pmti) +{ + pciesvc_assert(pmti >= 0 && pmti < pmt_count()); +} + +static void +assert_pmts_in_range(const int pmtb, const int pmtc) +{ + if (pmtc > 0) { + assert_pmti_in_range(pmtb); + assert_pmti_in_range(pmtb + pmtc - 1); + } +} + +static u_int64_t +pmt_addr(const int pmti) +{ + assert_pmti_in_range(pmti); + return PMT_BASE + (pmti * PMT_STRIDE); +} + +static u_int64_t +pmr_addr(const int pmti) +{ + assert_pmti_in_range(pmti); + return PMR_BASE + (pmti * PMR_STRIDE); +} + +static int +pmt_alloc_high(const int n) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + pciehw_spmt_t *spmt; + int pmti = -1; + + if (n == 1 && pshmem->freepmt_high != PMT_INVALID) { + /* alloc a single entry from free list */ + pmti = pshmem->freepmt_high; + spmt = pciesvc_spmt_get(pmti); + pshmem->freepmt_high = spmt->next; + spmt->next = PMT_INVALID; + pciesvc_spmt_put(spmt, DIRTY); + } else if (pshmem->allocpmt_high + n <= pshmem->allocpmt_low) { + /* alloc multiple entries from sequential block */ + pmti = pshmem->allocpmt_high; + pshmem->allocpmt_high += n; + } + return pmti; +} + +static int +pmt_alloc_low(const int n) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + pciehw_spmt_t *spmt; + int pmti = -1; + + if (n == 1 && pshmem->freepmt_low != PMT_INVALID) { + /* alloc a single entry from free list */ + pmti = pshmem->freepmt_low; + spmt = pciesvc_spmt_get(pmti); + pshmem->freepmt_low = spmt->next; + spmt->next = PMT_INVALID; + pciesvc_spmt_put(spmt, DIRTY); + } else if (pshmem->allocpmt_low - n >= pshmem->allocpmt_high) { + /* alloc multiple entries from sequential block */ + pshmem->allocpmt_low -= n; + pmti = pshmem->allocpmt_low; + } + return pmti; +} + +static int +pmt_alloc_vf0adj(const int n) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + int pmti = -1; + + /* if no reserved vf0adj region alloc from high pri */ + if (pshmem->allocpmt_vf0adj == -1) { + pmti = pmt_alloc_high(n); + } else if (pshmem->allocpmt_vf0adj + n <= pmt_count()) { + pmti = pshmem->allocpmt_vf0adj; + pshmem->allocpmt_vf0adj += n; + } + return pmti; +} + +/* + * pmt_alloc - supports multiple priority regions in the tcam. + * We want both PMTPRI_HIGH and PMTPRI_LOW regions to + * be able to grow, LOW to support adding flexvfs and + * HIGH to support adding overrides. + * + * Once the base config is configured and all pmts allocated + * then only HIGH grows down to allow the max number of overrides. + * + * +----------------+ <== 0 + * | PMTPRI_HIGH | <== pshmem->allocpmt_high + * | grows down | + * +----------------+ + * | PMTPRI_LOW | + * | grows up | <== pshmem->allocpmt_low + * +----------------+ + * | PMTPRI_VF0ADJ | <== pshmem->allocpmt_vf0adj + * | grows down | + * +----------------+ + * <== pmt_count() + * + * PMTPRI_HIGH - config, PF bars, VF flexvf overrides + * + * PMTPRI_LOW - VF flexvf base entries in this region + * + * PMTPRI_VF0ADJ - if we have flexvf then adjust_vf0 entries come + * from this region. We want the priority to be lower + * that LOW so entries are lower than the flexvf base entries. + * This region grows down to meet the expectations of the + * user but is low priority so entries can be overridden + * by flexvf overrides in the HIGH region. + */ +int +pmt_alloc(const int n, const int pri) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + int pmti = -1; + + pciesvc_assert(n > 0); + pciesvc_assert(n <= pmt_count()); + + if (!pshmem->pmtpri) { + pshmem->allocpmt_low = pmt_count(); + pshmem->freepmt_high = PMT_INVALID; + pshmem->freepmt_low = PMT_INVALID; + pshmem->allocpmt_vf0adj = -1; + pshmem->freeprt_slab = PRT_INVALID; + pshmem->pmtpri = 1; + } + + switch (pri) { + case PMTPRI_HIGH: + pmti = pmt_alloc_high(n); + break; + case PMTPRI_LOW: + pmti = pmt_alloc_low(n); + break; + case PMTPRI_VF0ADJ: + pmti = pmt_alloc_vf0adj(n); + break; + default: + pciesvc_logerror("pmt_alloc: unknown pri %d\n", pri); + pciesvc_assert(0); + break; + } + + return pmti; +} + +/* + * Reserve a contiguous range from PMTPRI_LOW to be used for + * the vf0adjust range. PMTPRI_LOW grows up but we want + * PMTPRI_VF0ADJ to be lower than LOW priority and grow down. + */ +int +pmt_reserve_vf0adj(const int n) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + + pshmem->allocpmt_vf0adj = pmt_alloc(n, PMTPRI_LOW); + return pshmem->allocpmt_vf0adj; +} + +static int +pmt_to_pri(const int pmti) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + int pmtpri = -1; + + if (pmti >= 0 && pmti <= pshmem->allocpmt_high) { + pmtpri = PMTPRI_HIGH; + } else if (pmti >= pshmem->allocpmt_low && pmti < pmt_count()) { + pmtpri = PMTPRI_LOW; + } + return pmtpri; +} + +static int +spmt_to_pmti(const pciehw_spmt_t *spmt) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + return spmt - pshmem->spmt; +} + +void +pmt_free(const int pmtb, const int pmtc) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + pciehw_spmt_t *spmt; + int pmti; + + assert_pmts_in_range(pmtb, pmtc); + + if (pmt_to_pri(pmtb + pmtc) == PMTPRI_HIGH) { + /* free high pri */ + for (pmti = pmtb; pmti < pmtb + pmtc; pmti++) { + spmt = pciesvc_spmt_get(pmti); + spmt->next = pshmem->freepmt_high; + pciesvc_spmt_put(spmt, DIRTY); + pshmem->freepmt_high = pmti; + } + } else if (pmt_to_pri(pmtb) == PMTPRI_LOW) { + /* free low pri */ + for (pmti = pmtb; pmti < pmtb + pmtc; pmti++) { + spmt = pciesvc_spmt_get(pmti); + spmt->next = pshmem->freepmt_low; + pciesvc_spmt_put(spmt, DIRTY); + pshmem->freepmt_low = pmti; + } + } else { + /* outside of both alloc ranges? */ + pciesvc_logerror("pmt_free: leak pmt %d (%d), " + "allocpmt_low %u allocpmt_high %u\n", + pmtb, pmtc, + pshmem->allocpmt_low, + pshmem->allocpmt_high); + } +} + +static void +pmt_get_entry(const int pmti, pmt_entry_t *pmte) +{ + pciesvc_reg_rd32w(pmt_addr(pmti), pmte->w, PMT_NWORDS); +} + +static void +pmt_set_entry(const int pmti, const pmt_entry_t *pmte) +{ + pciesvc_reg_wr32w(pmt_addr(pmti), pmte->w, PMT_NWORDS); +} + +static void +pmr_get_entry(const int pmti, pmr_entry_t *pmre) +{ + pciesvc_reg_rd32w(pmr_addr(pmti), pmre->w, PMR_NWORDS); +} + +static void +pmr_set_entry(const int pmti, const pmr_entry_t *pmre) +{ + pciesvc_reg_wr32w(pmr_addr(pmti), pmre->w, PMR_NWORDS); +} + +/* + * Retrieve an entry from hardware. + */ +void +pmt_get(const int pmti, pmt_t *pmt) +{ + pmt_get_entry(pmti, &pmt->pmte); + pmr_get_entry(pmti, &pmt->pmre); +} + +/* + * Install an entry in hardware at the specified index. + */ +void +pmt_set(const int pmti, const pmt_t *pmt) +{ + /* + * Set PMR entry first, then TCAM, so by the time a tcam search + * can hit an entry the corresponding ram entry is valid too. + */ + pmr_set_entry(pmti, &pmt->pmre); + pmt_set_entry(pmti, &pmt->pmte); +} + +static void +pmt_clr_tcam(const int pmti) +{ + pmt_entry_t pmte0; + + pciesvc_memset(&pmte0, 0, sizeof(pmte0)); + pmt_set_entry(pmti, &pmte0); +} + +/* + * Clear a pmt slot. For now we just invalidate the tcam entry + * so searches don't hit, but we don't write anything to PMR. + */ +static void +pmt_clr(const int pmti) +{ + pmt_clr_tcam(pmti); +} + +/* + * dm->data is the entry data values + * dm->mask is the entry mask bits, + * 1's for bits we want to match in "data", + * 0's for bits in "data" we want to ignore. + * + * {X Y} result + * ------- + * {0 0} (always match) + * {0 1} match if 0 + * {1 0} match if 1 + * {1 1} (never match) + */ +void +pmt_entry_enc(pmt_entry_t *pmte, const pmt_datamask_t *dm) +{ + const u_int64_t data = dm->data.all; + const u_int64_t mask = dm->mask.all; + + pmte->tcam.x = data & mask; + pmte->tcam.y = ~data & mask; + pmte->tcam.v = 1; +} + +/* + * Fetch the dm->data/mask values from this pmt_entry. + */ +void +pmt_entry_dec(const pmt_entry_t *pmte, pmt_datamask_t *dm) +{ + dm->data.all = pmte->tcam.x; + dm->mask.all = pmte->tcam.x ^ pmte->tcam.y; +} + +void +pmt_bar_set_bdf(pmt_t *pmt, const u_int16_t bdf) +{ + pmr_bar_entry_t *pmr = &pmt->pmre.bar; + pmr->bdf = bdf; +} + +void +pmt_bar_setaddr(pmt_t *pmt, const u_int64_t addr) +{ + pmt_datamask_t dm; + + pmt_entry_dec(&pmt->pmte, &dm); + dm.data.bar.addrdw = addr >> 2; + pmt_entry_enc(&pmt->pmte, &dm); +} + +void +pmt_bar_setaddrm(pmt_t *pmt, const u_int64_t addr, const u_int64_t mask) +{ + pmt_datamask_t dm; + + pmt_entry_dec(&pmt->pmte, &dm); + dm.data.bar.addrdw = addr >> 2; + dm.mask.bar.addrdw = mask >> 2; + pmt_entry_enc(&pmt->pmte, &dm); +} + +u_int64_t +pmt_bar_getaddr(const pmt_t *pmt) +{ + pmt_datamask_t dm; + + pmt_entry_dec(&pmt->pmte, &dm); + return dm.data.bar.addrdw << 2; +} + +u_int64_t +pmt_bar_getaddrmask(const pmt_t *pmt) +{ + pmt_datamask_t dm; + + pmt_entry_dec(&pmt->pmte, &dm); + return dm.mask.bar.addrdw << 2; +} + +void +pmt_cfg_set_bus(pmt_t *pmt, const u_int8_t bus) +{ + pmr_cfg_entry_t *pmr = &pmt->pmre.cfg; + pmt_datamask_t dm; + u_int8_t obus, busdelta; + +#define bdf_replace_bus(bdf, bus) \ + bdf = ((bus << 8) | ((bdf) & 0x00ff)) + + pmt_entry_dec(&pmt->pmte, &dm); + /* assume no bus wildcards */ + pciesvc_assert((dm.mask.cfg.bdf & 0xff00) == 0xff00); + obus = bdf_to_bus(dm.data.cfg.bdf); + bdf_replace_bus(dm.data.cfg.bdf, bus); + pmt_entry_enc(&pmt->pmte, &dm); + + /* replicate the bus delta between start/limit with new bus */ + busdelta = pmr->bstart - obus; + pmr->bstart = bus + busdelta; + + busdelta = pmr->blimit - obus; + pmr->blimit = bus + busdelta; +} + +/****************************************************************** + * apis + */ + +void +pciehw_pmt_load_cfg(pciehwdev_t *phwdev) +{ + if (!phwdev->cfgloaded) { + u_int32_t pmti; + + for (pmti = phwdev->pmtb; pmti < phwdev->pmtb + phwdev->pmtc; pmti++) { + pciehw_spmt_t *spmt = pciesvc_spmt_get(pmti); + int state = CLEAN; + + if (!spmt->loaded) { + pmt_set(pmti, &spmt->pmt); + spmt->loaded = 1; + state = DIRTY; /* loaded */ + } + pciesvc_spmt_put(spmt, state); + } + phwdev->cfgloaded = 1; + } +} + +void +pciehw_pmt_unload_cfg(pciehwdev_t *phwdev) +{ + if (phwdev->cfgloaded) { + u_int32_t pmti; + + for (pmti = phwdev->pmtb; pmti < phwdev->pmtb + phwdev->pmtc; pmti++) { + pciehw_spmt_t *spmt = pciesvc_spmt_get(pmti); + int state = CLEAN; + + if (spmt->loaded) { + pmt_clr(pmti); + spmt->loaded = 0; + state = DIRTY; /* loaded */ + } + pciesvc_spmt_put(spmt, state); + } + phwdev->cfgloaded = 0; + } +} + +static void +pciehw_bar_foreach_pmt(pciehwbar_t *phwbar, + void (*cb)(int pmti, pciehw_spmt_t *spmt, void *arg), + void *arg) +{ + pciehw_spmt_t *spmt; + u_int32_t pmti; + int next; + + /* process the base pmts */ + for (pmti = phwbar->pmtb; pmti < phwbar->pmtb + phwbar->pmtc; pmti++) { + int chain; + + spmt = pciesvc_spmt_get(pmti); + chain = spmt->chain; + next = spmt->next; + + cb(pmti, spmt, arg); + + pciesvc_spmt_put(spmt, DIRTY); /* spmt.pmt.pmr.bdf, loaded */ + + /* if chained pmts, process them */ + if (chain) { + while (next != PMT_INVALID) { + spmt = pciesvc_spmt_get(next); + + cb(next, spmt, arg); + + next = spmt->next; + pciesvc_spmt_put(spmt, DIRTY); /* loaded */ + } + } + } + + /* if ovrds, process them */ + if (phwbar->ovrds) { + next = phwbar->ovrd; + while (next != PMT_INVALID) { + spmt = pciesvc_spmt_get(next); + + cb(next, spmt, arg); + + next = spmt->next; + pciesvc_spmt_put(spmt, DIRTY); /* loaded */ + } + } +} + +static void +pmt_load(const int pmti, pciehw_spmt_t *spmt, const u_int16_t bdf) +{ + /* + * Load PRT first, then load PMT so PMT tcam search hit + * will find valid PRT entries. + */ + pciehw_prt_load(spmt->pmt.pmre.bar.prtb, spmt->pmt.pmre.bar.prtc); + + /* vf0 bdf was adjusted already in adjust_vf0 */ + if (!spmt->vf0) { + /* place bus-adjusted bdf in pmt, then load in hw */ + pmt_bar_set_bdf(&spmt->pmt, bdf); + } + pmt_set(pmti, &spmt->pmt); + + if (!spmt->loaded) { + spmt->loaded = 1; + } +} + +static void +pmt_load_cb(int pmti, pciehw_spmt_t *spmt, void *arg) +{ + const u_int16_t bdf = *(u_int16_t *)arg; + + pmt_load(pmti, spmt, bdf); +} + +void +pciehw_bar_load_pmts(pciehwbar_t *phwbar) +{ + u_int16_t bdf = phwbar->bdf; + + if (!phwbar->valid) return; + pciehw_bar_foreach_pmt(phwbar, pmt_load_cb, &bdf); +} + +static void +pmt_unload(const int pmti, pciehw_spmt_t *spmt) +{ + /* + * Unload PMT first THEN PRT, so PMT tcam search will not hit + * and PRT is unreferenced. Then safe to unload PRT. + */ + if (spmt->loaded) { + pmt_clr(pmti); + pciehw_prt_unload(spmt->pmt.pmre.bar.prtb, + spmt->pmt.pmre.bar.prtc); + spmt->loaded = 0; + } +} + +static void +pmt_unload_cb(int pmti, pciehw_spmt_t *spmt, void *arg) +{ + pmt_unload(pmti, spmt); +} + +void +pciehw_bar_unload_pmts(pciehwbar_t *phwbar) +{ + pciesvc_assert(phwbar->valid); + pciehw_bar_foreach_pmt(phwbar, pmt_unload_cb, NULL); +} + +void +pciehw_bar_load_ovrds(pciehwbar_t *phwbar) +{ + pciesvc_assert(phwbar->valid); + if (phwbar->ovrds) { + u_int16_t pmti = phwbar->ovrd; + while (pmti != PMT_INVALID) { + pciehw_spmt_t *spmt = pciesvc_spmt_get(pmti); + + pmt_load(pmti, spmt, phwbar->bdf); + + pmti = spmt->next; + pciesvc_spmt_put(spmt, DIRTY); /* loaded */ + } + } +} + +void +pciehw_bar_unload_ovrds(pciehwbar_t *phwbar) +{ + pciesvc_assert(phwbar->valid); + if (phwbar->ovrds) { + u_int16_t pmti = phwbar->ovrd; + while (pmti != PMT_INVALID) { + pciehw_spmt_t *spmt = pciesvc_spmt_get(pmti); + + pmt_unload(pmti, spmt); + + pmti = spmt->next; + pciesvc_spmt_put(spmt, DIRTY); /* loaded */ + } + } +} + +static int +spmt_dup_prts(const pciehw_spmt_t *ospmt, pciehw_spmt_t *nspmt) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + pciehw_sprt_t *osprt, *nsprt; + pmr_bar_entry_t *pmr; + pmt_t *pmt; + int prti; + + pmt = &nspmt->pmt; + pmr = &pmt->pmre.bar; + prti = prt_alloc(pmr->prtc); + if (prti < 0) { + pciesvc_logerror("spmt_dup: prt_alloc %d failed\n", pmr->prtc); + return -1; + } + osprt = &pshmem->sprt[pmr->prtb]; + nsprt = &pshmem->sprt[prti]; + pmr->prtb = prti; + for (prti = pmr->prtb; prti < pmr->prtb + pmr->prtc; prti++) { + *nsprt = *osprt; + } + return 0; +} + +static pciehw_spmt_t * +spmt_get_dup(const pciehw_spmt_t *ospmt) +{ + pciehw_spmt_t *nspmt; + int pmti; + + pmti = pmt_alloc(1, PMTPRI_VF0ADJ); + if (pmti < 0) { + pciesvc_logerror("spmt_dup: pmt_alloc failed\n"); + return NULL; + } + + nspmt = pciesvc_spmt_get(pmti); + pciesvc_memcpy(nspmt, ospmt, sizeof(*nspmt)); + nspmt->next = PMT_INVALID; + + if (spmt_dup_prts(ospmt, nspmt) < 0) { + pmt_free(pmti, 1); + return NULL; + } + + return nspmt; +} + +static pciehw_spmt_t * +spmt_get_next(pciehw_spmt_t *spmt) +{ + pciehw_spmt_t *nspmt; + int pmti; + + pmti = spmt->next; + if (pmti != PMT_INVALID) { + return pciesvc_spmt_get(pmti); + } + nspmt = spmt_get_dup(spmt); + if (nspmt) { + spmt->next = spmt_to_pmti(nspmt); + spmt->chain = 1; + } + return nspmt; +} + +/* + * Stub out any remaining chain pmt entries + * by assigning 0 address. + */ +static void +pmt_adjust_nullify_chain(int pmti) +{ + const u_int64_t addr = 0ULL; + const u_int64_t mask = ~0ULL; + + while (pmti != PMT_INVALID) { + pciehw_spmt_t *spmt = pciesvc_spmt_get(pmti); + pmt_t *pmt = &spmt->pmt; + + pmt_bar_setaddrm(pmt, addr, mask); + + pmti = spmt->next; + pciesvc_spmt_put(spmt, DIRTY); /* pmt.addr/mask */ + } +} + +static int +pmt_adjust_prt(pmt_t *pmt, prt_t *prt, const u_int64_t newval) +{ + int r = 0; + + switch (prt_type(prt)) { + case PRT_TYPE_RES: { + prt_res_t *res = &prt->res; + res->addrdw = newval >> 2; + break; + } + case PRT_TYPE_DB16: + case PRT_TYPE_DB32: + case PRT_TYPE_DB64: { + prt_db_t *db = &prt->db; + db->lif = newval; + break; + } + default: + break; + } + return r; +} + +static int +pmt_adjust_prts(pmt_t *pmt, const u_int64_t newval) +{ + pmr_bar_entry_t *pmr = &pmt->pmre.bar; + const int prtend = pmr->prtb + pmr->prtc; + int prti, r; + + r = 0; + for (prti = pmr->prtb; prti < prtend; prti++) { + pciehw_sprt_t *sprt = pciesvc_sprt_get(pmr->prtb); + r = pmt_adjust_prt(pmt, &sprt->prt, newval); + pciesvc_sprt_put(sprt, DIRTY); /* addrdw/lif */ + if (r < 0) break; + } + return r; +} + +/* + * We want to add the "numvfs" entries in the pmt. + * We want to match the address starting at "addr" and we need to + * be careful about bits already set in "addr". We can put in pmt + * tcam "wildcard" masks to match bits in "addr" for contiguous power-of-2 + * numvfs, but if some bits are set in "addr" we will install a pmt + * entry with exact match on the prefix up to that bit so we don't + * claim additional address space that is not allocated to this sriov + * vf group bar. + */ +static int +pmt_adjust_pmt(const pciehwdev_t *phwdev, + pciehw_spmt_t *spmt, + const u_int64_t addr, + const int vfoffset, + const int numvfs, + const int do_log) +{ + pmt_t *pmt = &spmt->pmt; + pmr_bar_entry_t *pmr = &pmt->pmre.bar; + const u_int32_t vfstart = pmr->vfstart; + u_int32_t numvfs2, numvfs2bitc, numvfs2end; + u_int32_t addrvfs, nvfs, vfbitc, nvfend; + u_int32_t ovfend, ovflimit, nvflimit; + u_int64_t nvfmask, omask, nmask, nprtval; + u_int16_t obdf, nbdf; + int r; + + /* + * Save pmtstart the first time through before any adjustments. + * We'll keep try of pmtstart to create the original addr mask + * for this entry. We could arrange this to be set a init time + * but we do this here at "runtime" when we start to configure sriov + * to handle the case where we upgraded from an older fw that didn't + * set "pmtstart" at init time and *then* the OS enables sriov. + */ + if (spmt->pmtstart == 0) { + const u_int64_t addrmask = pmt_bar_getaddrmask(pmt); + spmt->pmtstart = pciesvc_ffsll(addrmask) - 1; + } + + /* + * Figure out how many bits for vf index to address "numvfs". + * "numvfs2end" contains the upper bit of the vf index mask. + */ + numvfs2 = roundup_power2(numvfs); + numvfs2bitc = pciesvc_ffs(numvfs2) - 1; + numvfs2end = vfstart + numvfs2bitc; + /* + * "addrvfs" contains the bits set in "addr" for the vf range. + * We want to match on the existing bits in the address to + * avoid any wildcard matches claiming other (incorrect) addresses. + */ + addrvfs = (addr & ((1ULL << numvfs2end) - 1)) >> vfstart; + /* + * "nvfs" is the number of vfs covered by this entry. + */ + nvfs = addrvfs ? 1 << (pciesvc_ffs(addrvfs) - 1) : rounddn_power2(numvfs); + /* + * Now that we have the real vf count in "nvfs" calculate the + * new "nvfmask" that will select the vf index part of the "addr" + * for this pmt. + */ + vfbitc = pciesvc_ffs(nvfs) - 1; + nvfend = vfstart + vfbitc; + nvfmask = ((1ULL << vfbitc) - 1) << vfstart; + /* + * Compute the "nmask" new address mask for this pmt. + * Start with the first bit of the old mask to + * match the remaining upper bits of "addr". Then AND off + * the bits in "nvfmask" so the tcam entry will wildcard match + * on the vf index field. "nmask" will become the new pmt + * mask used below in pmt_bar_setaddrm(). + */ + omask = ~((1ULL << spmt->pmtstart) - 1); + nmask = omask & ~nvfmask; + + /* Compute the "nprtval" to be used for this "vfoffset". */ + nprtval = spmt->vf0base + (vfoffset << spmt->vf0stride); + /* "nvflimit" is new vf limit for pmt */ + nvflimit = nvfs - 1; + /* "nbdf" is adjusted completer bdf based on "vfoffset" */ + obdf = pciehwdev_get_hostbdf(phwdev); + nbdf = obdf + vfoffset; + /* save these original values for logging below */ + ovfend = pmr->vfend; + ovflimit = pmr->vflimit; + + /* update pmt with adjusted values based on {addr, nvfs} */ + pmr->vfend = nvfend; + pmr->vflimit = nvflimit; + pmr->bdf = nbdf; + pmt_bar_setaddrm(pmt, addr, nmask); + + r = pmt_adjust_prts(pmt, nprtval); + if (do_log) { + pciesvc_loginfo("%s: adjust bar %u addr 0x%" PRIx64 " numvfs %d/%d\n", + pciehwdev_get_name(phwdev), spmt->cfgidx, addr, + nvfs, numvfs); + pciesvc_loginfo(" addr 0x%016" PRIx64 "\n", addr); + pciesvc_loginfo(" omask 0x%016" PRIx64 "\n", omask); + pciesvc_loginfo(" nmask 0x%016" PRIx64 "\n", nmask); + pciesvc_loginfo(" nvfmask 0x%016" PRIx64 "\n", nvfmask); + pciesvc_loginfo(" addrval 0x%016" PRIx64 " (0x%" PRIx64 ")\n", + addr & nvfmask, + (addr & nvfmask) >> vfstart); + pciesvc_loginfo(" nvfs %u addrvfs %u\n", nvfs, addrvfs); + pciesvc_loginfo(" vfstart %u vfend %u->%u vflimit %u->%u " + "bdf 0x%04x->0x%04x\n", + pmr->vfstart, ovfend, nvfend, ovflimit, nvflimit, + obdf, nbdf); + pciesvc_loginfo(" vf0base 0x%" PRIx64 + " nprtval 0x%" PRIx64 " r %d\n", + (u_int64_t)spmt->vf0base, nprtval, r); + } + return nvfs; +} + +/* + * We've set up this PMT entry to match *all* the VFs that belong + * to this PF, taking advantage of the fact that the VF bars are + * "virtual", guaranteed to be configured with a bar address that + * is at a constant stride based on bar size. + */ +int +pciehw_pmt_adjust_vf0(pciehw_spmt_t *spmt, + u_int64_t addr, + const int numvfs, + const int do_log) +{ + const pciehwdev_t *phwdev = pciehwdev_get(spmt->owner); + pmt_t *pmt = &spmt->pmt; + pmr_bar_entry_t *pmr = &pmt->pmre.bar; + const u_int32_t vfstart = pmr->vfstart; + int vfoffset, nvfs_left, r; + + vfoffset = 0; + nvfs_left = numvfs; + r = numvfs; + while (nvfs_left) { + + const int nvfs = + pmt_adjust_pmt(phwdev, spmt, addr, vfoffset, nvfs_left, do_log); + if (nvfs <= 0) { + r = -1; + break; + } + + nvfs_left -= nvfs; + vfoffset += nvfs; + addr += nvfs << vfstart; + + /* + * We didn't cover all the nvfs with the entry above. + * Go fetch the next spmt on the list (or allocate a new + * one if necessary) to use for the remainder. + */ + if (nvfs_left) { + pciehw_spmt_t *nspmt = spmt_get_next(spmt); + if (nspmt == NULL) { + r = -1; + break; + } + spmt = nspmt; + } + } + + /* + * Deactivate remaining entries in the list, not needed for this config. + * Host might have enabled more/fewer sriov vfs last time so the previous + * config might have required more entries than we need to use right now. + * We keep the entries chained on the spmt list because they might be + * needed again if host enables more sriov vfs again in the future. + * Enabling with more/fewer sriov vfs is possible in theory, but uncommon + * in practice. Normal case is max sriov vfs is always enabled and then + * keep that config. + */ + pmt_adjust_nullify_chain(spmt->next); + + pciehwdev_put(phwdev, CLEAN); + return r; +} + +static void +pmt_setaddr_cb(int pmti, pciehw_spmt_t *spmt, void *arg) +{ + const u_int64_t addr = *(u_int64_t *)arg; + + pmt_bar_setaddr(&spmt->pmt, addr + spmt->baroff); + + /* if loaded, update hw too */ + if (spmt->loaded) { + pmt_set(pmti, &spmt->pmt); + } +} + +void +pciehw_pmt_setaddr(pciehwbar_t *phwbar, u_int64_t addr) +{ + pciehw_bar_foreach_pmt(phwbar, pmt_setaddr_cb, &addr); +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pmt.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pmt.h new file mode 100644 index 0000000000..56d13bc38d --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/pmt.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2018,2021, Pensando Systems Inc. + */ + +#ifndef __PMT_H__ +#define __PMT_H__ + +struct pmt_s; +typedef struct pmt_s pmt_t; + +void pmt_get(const int pmti, pmt_t *pmt); +void pmt_set(const int pmti, const pmt_t *pmt); + +void pmt_bar_setaddr(pmt_t *pmt, const u_int64_t addr); +void pmt_bar_setaddrm(pmt_t *pmt, const u_int64_t addr, const u_int64_t mask); +u_int64_t pmt_bar_getaddr(const pmt_t *pmt); + +void pmt_bar_set_bdf(pmt_t *pmt, const u_int16_t bdf); +void pmt_cfg_set_bus(pmt_t *pmt, const u_int8_t bus); + +union pciehwbar_s; +typedef union pciehwbar_u pciehwbar_t; + +void pciehw_pmt_setaddr(pciehwbar_t *phwbar, u_int64_t addr); +void pciehw_bar_load_pmts(pciehwbar_t *phwbar); +void pciehw_bar_unload_pmts(pciehwbar_t *phwbar); +void pciehw_bar_load_ovrds(pciehwbar_t *phwbar); +void pciehw_bar_unload_ovrds(pciehwbar_t *phwbar); +void pciehw_pmt_load_cfg(pciehwdev_t *phwdev); +void pciehw_pmt_unload_cfg(pciehwdev_t *phwdev); +int pciehw_pmt_adjust_vf0(pciehw_spmt_t *spmt, + const u_int64_t addr, + const int numvfs, + const int do_log); + +#endif /* __PMT_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/portcfg.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/portcfg.c new file mode 100644 index 0000000000..1f1e2ef20b --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/portcfg.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2021, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "portcfg.h" + +typedef union { + u_int32_t d; + u_int16_t w[2]; + u_int8_t b[4]; +} cfgdata_t; + +static u_int64_t +portcfg_base(const int port) +{ + return PXC_(DHS_C_MAC_APB_ENTRY, port); +} + +u_int32_t +portcfg_readdw(const int port, const u_int16_t addr) +{ + u_int32_t v; + pciesvc_assert(addr < 4096); + pciesvc_pciepreg_rd32(portcfg_base(port) + addr, &v); + return v; +} + +void +portcfg_writedw(const int port, const u_int16_t addr, u_int32_t val) +{ + pciesvc_assert(addr < 4096); + pciesvc_pciepreg_wr32(portcfg_base(port) + addr, val); +} + +u_int8_t +portcfg_readb(const int port, const u_int16_t addr) +{ + const u_int16_t addrdw = addr & ~0x3; + const u_int8_t byteidx = addr & 0x3; + cfgdata_t v; + + v.d = portcfg_readdw(port, addrdw); + return v.b[byteidx]; +} + +u_int16_t +portcfg_readw(const int port, const u_int16_t addr) +{ + const u_int16_t addrdw = addr & ~0x3; + const u_int8_t wordidx = (addr & 0x3) >> 1; + cfgdata_t v; + + pciesvc_assert((addr & 0x1) == 0); + v.d = portcfg_readdw(port, addrdw); + return v.w[wordidx]; +} + +u_int32_t +portcfg_readd(const int port, const u_int16_t addr) +{ + pciesvc_assert((addr & 0x3) == 0); + return portcfg_readdw(port, addr); +} + +void +portcfg_writeb(const int port, const u_int16_t addr, const u_int8_t val) +{ + const u_int16_t addrdw = addr & ~0x3; + const u_int8_t byteidx = addr & 0x3; + cfgdata_t v; + + v.d = portcfg_readdw(port, addrdw); + v.b[byteidx] = val; + portcfg_writedw(port, addrdw, v.d); +} + +void +portcfg_writew(const int port, const u_int16_t addr, const u_int16_t val) +{ + const u_int16_t addrdw = addr & ~0x3; + const u_int8_t wordidx = (addr & 0x3) >> 1; + cfgdata_t v; + + pciesvc_assert((addr & 0x1) == 0); + v.d = portcfg_readdw(port, addrdw); + v.w[wordidx] = val; + portcfg_writedw(port, addrdw, v.d); +} + +void +portcfg_writed(const int port, const u_int16_t addr, const u_int32_t val) +{ + pciesvc_assert((addr & 0x3) == 0); + portcfg_writedw(port, addr, val); +} + +void +portcfg_read_bus(const int port, + u_int8_t *pribus, u_int8_t *secbus, u_int8_t *subbus) +{ + cfgdata_t v; + + v.d = portcfg_readdw(port, PCI_PRIMARY_BUS); + + if (pribus) *pribus = v.b[0]; + if (secbus) *secbus = v.b[1]; + if (subbus) *subbus = v.b[2]; +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/portcfg.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/portcfg.h new file mode 100644 index 0000000000..f2242f31dc --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/portcfg.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018,2021, Pensando Systems Inc. + */ + +#ifndef __PORTCFG_H__ +#define __PORTCFG_H__ + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* close to calm emacs autoindent */ +#endif +#endif + +#define PORTCFG_CAP_PCIE 0x80 +#define PORTCFG_CAP_AER 0x200 +#define PORTCFG_CAP_PHYSLAYER 0x340 /* Gen4 Physical Layer */ + +/* rename these to avoid static link dups */ +#define portcfg_readb _pciesvc_portcfg_readb +#define portcfg_readw _pciesvc_portcfg_readw +#define portcfg_readd _pciesvc_portcfg_readd +#define portcfg_writeb _pciesvc_portcfg_writeb +#define portcfg_writew _pciesvc_portcfg_writew +#define portcfg_writed _pciesvc_portcfg_writed +#define portcfg_read_bus _pciesvc_portcfg_read_bus + +void portcfg_read_bus(const int port, + u_int8_t *pribus, u_int8_t *secbus, u_int8_t *subbus); + +u_int8_t portcfg_readb(const int port, const u_int16_t addr); +u_int16_t portcfg_readw(const int port, const u_int16_t addr); +u_int32_t portcfg_readd(const int port, const u_int16_t addr); + +void portcfg_writeb(const int port, const u_int16_t addr, const u_int8_t val); +void portcfg_writew(const int port, const u_int16_t addr, const u_int16_t val); +void portcfg_writed(const int port, const u_int16_t addr, const u_int32_t val); + +#ifdef __cplusplus +} +#endif + +#endif /* __PORTCFG_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/printf.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/printf.c new file mode 100644 index 0000000000..c25d04b95c --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/printf.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, Pensando Systems Inc. + */ + +#include "pciesvc_system.h" + +#ifdef PCIESVC_SYSTEM_EXTERN +struct ostr_s { + void (*cb)(int c, void *arg); + void *arg; +}; + +static inline void +prf_putc(const struct ostr_s *o, int c) +{ + o->cb(c, o->arg); +} + +static void +prf_emit_u64(const struct ostr_s *o, uint64_t n, + int base, int zeroes, int ptr, int neg, int width, int ljust) +{ + char buf[32]; + int i = 0; + if (n == 0) { + buf[i++] = '0'; + } else { + while (n) { + buf[i++] = "0123456789abcdef"[n % base]; + n /= base; + } + } + if (ljust) { + int rpad; + if (neg) { + prf_putc(o, '-'); + --width; + } else if (ptr) { + prf_putc(o, '0'); + prf_putc(o, 'x'); + width -= 2; + } + rpad = width - i; + while (i > 0) { + prf_putc(o, buf[--i]); + } + while (rpad-- > 0) { + prf_putc(o, ' '); + } + } else { + if (ptr) { + width -= 2; + } else if (neg) { + --width; + if (zeroes) { + prf_putc(o, '-'); + } + } + while (i < width) { + prf_putc(o, zeroes ? '0' : ' '); + --width; + } + if (ptr) { + prf_putc(o, '0'); + prf_putc(o, 'x'); + } else if (neg && !zeroes) { + prf_putc(o, '-'); + } + while (i > 0) { + prf_putc(o, buf[--i]); + } + } +} + +#define strlen _strlen +static size_t +strlen(const char *s) +{ + const char *e; + for (e = s; *e; e++) { + ; + } + return (size_t)(e - s); +} + +static void +prf_emit_str(const struct ostr_s *o, const char *s, int width, int ljust) +{ + int c, nspc; + + if (s == NULL) { + s = ""; + } + nspc = width - strlen(s); + if (width > 0 && !ljust) { + while (nspc-- > 0) { + prf_putc(o, ' '); + } + } + while ((c = *s++)) { + prf_putc(o, c); + } + if (width > 0 && ljust) { + while (nspc-- > 0) { + prf_putc(o, ' '); + } + } +} + +static void +subr_prf(const struct ostr_s *o, const char *s, va_list ap) +{ + int base, done, pop, is_long, zeroes, ptr, neg, sign, width, str, ljust; + uint64_t n; + char c; + + while ((c = *s++)) { + switch (c) { + case '%': + done = pop = is_long = ptr = neg = sign = zeroes = str = ljust = 0; + width = -1; + base = 10; + do { + c = *s++; + switch (c) { + case '\0': + return; + case '-': + ljust = 1; + break; + case '0': + if (width < 0) { + zeroes = 1; + break; + } + width = (((width < 0) ? 0 : width) * 10) + c - '0'; + break; + case '1' ... '9': + width = (((width < 0) ? 0 : width) * 10) + c - '0'; + break; + case '%': + prf_putc(o, '%'); + done = 1; + break; + case 'l': + is_long = 1; + break; + case 'p': + ptr = 1; + is_long = 1; + base = 16; + pop = 1; + done = 1; + break; + case 'x': + base = 16; + pop = 1; + done = 1; + break; + case 'd': + sign = 1; + pop = 1; + done = 1; + break; + case 'u': + pop = 1; + done = 1; + break; + case 's': + pop = 1; + done = 1; + str = 1; + break; + case 'c': + prf_putc(o, va_arg(ap, int)); + done = 1; + break; + default: + prf_putc(o, c); + done = 1; + } + } while (!done); + if (pop) { + if (str) { + prf_emit_str(o, va_arg(ap, char *), width, ljust); + } else { + n = is_long ? va_arg(ap, uint64_t) : va_arg(ap, uint32_t); + if (sign) { + if (is_long) { + neg = ((int64_t)n < 0); + if (neg) { + n = -n; + } + } else { + neg = ((int32_t)n < 0); + if (neg) { + n = -(int64_t)(int32_t)n; + } + } + } + prf_emit_u64(o, n, base, zeroes, ptr, neg, width, ljust); + } + } + break; + default: + prf_putc(o, c); + break; + } + } +} + +struct snprintf_ctx { + char *pos; + size_t remain; +}; + +static void +snprintf_outchar(int c, void *arg) +{ + struct snprintf_ctx *ctx = arg; + if (ctx->remain) { + *ctx->pos++ = c; + --ctx->remain; + } +} + +int +pciesvc_vsnprintf(char *buf, size_t len, const char *fmt, va_list ap) +{ +#ifdef PCIESVC_SYSTEM_EXTERN +/* + * Oracle environment wants runtime init of these structs to + * use pc-relative offsets that are within the module and need + * no relocation required when running in different environments. + */ +#define RUNTIME_INIT +#endif +#ifdef RUNTIME_INIT + struct snprintf_ctx ctx; + struct ostr_s o; + + ctx.pos = buf; + ctx.remain = len; + o.cb = snprintf_outchar; + o.arg = &ctx; +#else + struct snprintf_ctx ctx = { + .pos = buf, + .remain = len, + }; + struct ostr_s o = { + .cb = snprintf_outchar, + .arg = &ctx + }; +#endif + + subr_prf(&o, fmt, ap); + if (ctx.remain) { + *ctx.pos = '\0'; + } else { + *(ctx.pos - 1) = '\0'; + } + return len - ctx.remain; +} + +int +pciesvc_snprintf(char *buf, size_t len, const char *fmt, ...) +{ + int r; + va_list ap; + va_start(ap, fmt); + r = pciesvc_vsnprintf(buf, len, fmt, ap); + va_end(ap); + return r; +} +#endif + +#ifdef CONFIG_PRINTF +static void +printf_outchar(int c, void *arg) +{ + putchar(c); +} + +void +pciesvc_vprintf(const char *fmt, va_list ap) +{ +#ifdef RUNTIME_INIT + const struct ostr_s o; + o.cb = printf_outchar; + o.arg = NULL; +#else + const struct ostr_s o = { + .cb = printf_outchar, + .arg = NULL, + }; +#endif + subr_prf(&o, fmt, ap); +} + +void +printf(const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + pciesvc_vprintf(fmt, ap); + va_end(ap); +} +#endif diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/prt.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/prt.c new file mode 100644 index 0000000000..811a3e30b9 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/prt.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2018,2021-2022, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "prt.h" + +#define PRT_BASE PXB_(DHS_TGT_PRT) +#define PRT_STRIDE ASIC_(PXB_CSR_DHS_TGT_PRT_ENTRY_BYTE_SIZE) + +/* the only client freeing slabs is overrides */ +#define PRT_SLAB_SIZE PCIEHDEVICE_OVERRIDE_INTRGROUPS + +static int +prt_count(void) +{ + return PRT_COUNT; +} + +static void +assert_prti_in_range(const int prti) +{ + pciesvc_assert(prti >= 0 && prti < prt_count()); +} + +static void +assert_prts_in_range(const int prtb, const int prtc) +{ + if (prtc > 0) { + assert_prti_in_range(prtb); + assert_prti_in_range(prtb + prtc - 1); + } +} + +int +prt_alloc(const int n) +{ + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + pciehw_sprt_t *sprt; + int prti = -1; + + if (n == PRT_SLAB_SIZE && pshmem->freeprt_slab != PRT_INVALID) { + /* alloc slab entry from slab list */ + prti = pshmem->freeprt_slab; + sprt = pciesvc_sprt_get(prti); + pshmem->freeprt_slab = sprt->next; + sprt->next = PRT_INVALID; + pciesvc_sprt_put(sprt, DIRTY); + } else if (pshmem->allocprt + n < prt_count()) { + prti = pshmem->allocprt; + pshmem->allocprt += n; + } + return prti; +} + +void +prt_free(const int prtb, const int prtc) +{ + assert_prts_in_range(prtb, prtc); + + if (prtc == PRT_SLAB_SIZE) { + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + pciehw_sprt_t *sprt; + + sprt = pciesvc_sprt_get(prtb); + sprt->next = pshmem->freeprt_slab; + pciesvc_sprt_put(sprt, DIRTY); + pshmem->freeprt_slab = prtb; + } else { + /* XXX */ + } +} + +static u_int64_t +prt_addr(const int prti) +{ + assert_prti_in_range(prti); + return PRT_BASE + (prti * PRT_STRIDE); +} + +void +prt_get(const int prti, prt_t *prt) +{ + pciesvc_reg_rd32w(prt_addr(prti), prt->w, PRT_NWORDS); +} + +void +prt_set(const int prti, const prt_t *prt) +{ + pciesvc_reg_wr32w(prt_addr(prti), prt->w, PRT_NWORDS); +} + +/****************************************************************** + * apis + */ + +int +pciehw_prt_load(const int prtbase, const int prtcount) +{ + const int prtend = prtbase + prtcount; + pciehw_sprt_t *sprt; + int prti; + + assert_prts_in_range(prtbase, prtcount); + + for (prti = prtbase; prti < prtend; prti++) { + sprt = pciesvc_sprt_get(prti); + prt_set(prti, &sprt->prt); + pciesvc_sprt_put(sprt, CLEAN); + } + return 0; +} + +void +pciehw_prt_unload(const int prtbase, const int prtcount) +{ + const int prtend = prtbase + prtcount; + const prt_t prt0 = {{ 0 }}; + int prti; + + assert_prts_in_range(prtbase, prtcount); + + for (prti = prtbase; prti < prtend; prti++) { + prt_set(prti, &prt0); + } +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/prt.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/prt.h new file mode 100644 index 0000000000..62dc1d9214 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/prt.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2018,2021-2022, Pensando Systems Inc. + */ + +#ifndef __PRT_H__ +#define __PRT_H__ + +union prt_u; typedef union prt_u prt_t; +int prt_alloc(const int n); +void prt_free(const int prtbase, const int prtcount); +void prt_get(const int prti, prt_t *prt); +void prt_set(const int prti, const prt_t *prt); + +int pciehw_prt_load(const int prtbase, const int prtcount); +void pciehw_prt_unload(const int prtbase, const int prtcount); + +#endif /* __PRT_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/req_int.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/req_int.c new file mode 100644 index 0000000000..d30913090e --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/req_int.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018,2021-2022, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "req_int.h" + +#define REQ_INT_NWORDS 3 + +typedef union req_int_u { + struct { + u_int64_t data:32; + u_int64_t addrdw:34; + } __attribute__((packed)); + u_int32_t w[REQ_INT_NWORDS]; +} req_int_t; + +void +req_int_set(const u_int64_t reg, const u_int64_t addr, const u_int32_t data) +{ + req_int_t in = { .data = data, .addrdw = addr >> 2 }; + + pciesvc_reg_wr32w(reg, in.w, REQ_INT_NWORDS); +} + +void +req_int_get(const u_int64_t reg, u_int64_t *addrp, u_int32_t *datap) +{ + req_int_t in; + + pciesvc_reg_rd32w(reg, in.w, REQ_INT_NWORDS); + *addrp = in.addrdw << 2; + *datap = in.data; +} + +/* + * The pcie request hardware provides a single base register + * CFG_TGT_REQ_*_INT.addrdw to configure indirect/notify interrupts. + * Each per-port interrupt is sent to the address + * (CFG_TGT_REQ_*_INT.addrdw << 2) + (port * 4). + * + * If CFG_TGT_REQ_*_INT.data[31] == 0 then a "1" is written to + * the destination address. This is used to trigger an interrupt + * through a write to INTR_ASSERT register. + * If CFG_TGT_REQ_*_INT.data[31] == 1 then data written is + * data = (data & 0x7fffffff) + port. + * + * This routine provides the abstraction that we can configure each + * port independently. When the first port is configured we set the + * base port0 values for msgaddr0/msgdata0 and configure the hw to match. + * Subsequent ports msgaddr/data are validated to be sure they match + * what the hw will do. + */ +int +req_int_init(const u_int64_t reg, + const int port, u_int64_t msgaddr, u_int32_t msgdata) +{ + u_int64_t msgaddr0; + u_int32_t msgdata0; + int r = 0; + + /* + * First time through set msgaddr0/data0 and hw to match. + * Doesn't matter which port we configure first, + * but subsequent ports must follow the pattern + * msgaddr = msgaddr0 + (port * 4) + * msgdata = msgdata0 + port + */ + req_int_get(reg, &msgaddr0, &msgdata0); + if (port == 0 || msgaddr0 == 0) { + msgaddr0 = msgaddr - (port * 4); + if (MSGDATA_HAS_ADD_PORT(msgdata)) { + msgdata0 = (MSGDATA_DATA(msgdata) - port) | MSGDATA_ADD_PORT; + } else { + msgdata0 = msgdata; + } + req_int_set(reg, msgaddr0, msgdata0); + } + + if (msgaddr != msgaddr0 + (port * 4)) { + r = -1; + } else if (MSGDATA_HAS_ADD_PORT(msgdata) != + MSGDATA_HAS_ADD_PORT(msgdata0)) { + r = -2; + } else if (MSGDATA_HAS_ADD_PORT(msgdata) && + MSGDATA_DATA(msgdata) != + MSGDATA_DATA(MSGDATA_DATA(msgdata0) + port)) { + r = -3; + } else if (!MSGDATA_HAS_ADD_PORT(msgdata) && + MSGDATA_DATA(msgdata) != MSGDATA_DATA(msgdata0)) { + r = -4; + } + return r; +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/req_int.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/req_int.h new file mode 100644 index 0000000000..1900ac492d --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/req_int.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018,2021, Pensando Systems Inc. + */ + +#ifndef __REQ_INT_H__ +#define __REQ_INT_H__ + +/* + * Common interface for tgt_req_notify_int and tgt_req_indirect_int. + */ + +#define MSGDATA_ADD_PORT 0x80000000 /* intr: msgdata += port */ +#define MSGDATA_HAS_ADD_PORT(m) (((m) & MSGDATA_ADD_PORT) != 0) +#define MSGDATA_DATA(m) ((m) & ~MSGDATA_ADD_PORT) + +void +req_int_set(const u_int64_t reg, const u_int64_t addr, const u_int32_t data); + +void +req_int_get(const u_int64_t reg, u_int64_t *addrp, u_int32_t *datap); + +int +req_int_init(const u_int64_t reg, + const int port, u_int64_t msgaddr, u_int32_t msgdata); + +#endif /* __REQ_INT_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/reset.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/reset.c new file mode 100644 index 0000000000..04fc3e936b --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/reset.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019,2021-2022, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "intrutils.h" +#include "serial.h" +#include "reset.h" + +static void +pciehw_reset_lifs_event(pciehwdev_t *phwdev, + const int lifb, const int lifc, + const pciesvc_rsttype_t rsttype) +{ + pciesvc_eventdata_t evd; + pciesvc_reset_t *reset; + + pciesvc_memset(&evd, 0, sizeof(evd)); + evd.evtype = PCIESVC_EV_RESET; + evd.port = phwdev->port; + evd.lif = phwdev->lifb; + reset = &evd.reset; + reset->rsttype = rsttype; + reset->lifb = lifb; + reset->lifc = lifc; + pciesvc_event_handler(&evd, sizeof(evd)); +} + +static void +pciehw_reset_event(pciehwdev_t *phwdev, const pciesvc_rsttype_t rsttype) +{ + /* skip bridges, no lif so no reset event */ + if (phwdev->lifc) { + pciehw_reset_lifs_event(phwdev, phwdev->lifb, phwdev->lifc, rsttype); + } +} + +static void +pciehw_reset_device_intrs(pciehwdev_t *phwdev, const pciesvc_rsttype_t rsttype) +{ + const int dmask = phwdev->intrdmask; + int i; + + if (phwdev->novrdintr) { + for (i = 0; i < phwdev->novrdintr; i++) { + const u_int32_t intrb = phwdev->ovrdintr[i].intrb; + const u_int32_t intrc = phwdev->ovrdintr[i].intrc; + + intr_reset_pci(intrb, intrc, dmask); + } + } else { + intr_reset_pci(phwdev->intrb, phwdev->intrc, dmask); + } +} + +static void +pciehw_reset_device(pciehwdev_t *phwdev, const pciesvc_rsttype_t rsttype) +{ +#ifdef PCIEMGR_DEBUG + pciesvc_logdebug("%s: dev reset\n", pciehwdev_get_name(phwdev)); +#endif + + if (rsttype != PCIESVC_RSTTYPE_NONE) { + pciehw_reset_event(phwdev, rsttype); + } + pciehw_reset_device_intrs(phwdev, rsttype); + pciehw_cfg_reset(phwdev, rsttype); + + switch (phwdev->type) { + case PCIEHDEVICE_SERIAL: + serial_reset(phwdev, rsttype); + break; + default: + break; + } +} + +static void +pciehw_reset_descendents(pciehwdevh_t hwdevh, const pciesvc_rsttype_t rsttype) +{ + while (hwdevh) { + pciehwdev_t *phwdev = pciehwdev_get(hwdevh); + const int is_pf = phwdev->pf; + const pciehwdevh_t childh = phwdev->childh; + const pciehwdevh_t peerh = phwdev->peerh; + + pciehw_reset_device(phwdev, rsttype); + pciehwdev_put(phwdev, DIRTY); + + /* + * If we are a PF then resetting our cfg space will disable and + * reset all active VFs so no need to reset them again. If this + * is a bridge with child devices, go reset those children here. + */ + if (!is_pf) { + pciehw_reset_descendents(childh, rsttype); + } + + hwdevh = peerh; + } +} + +/* + * A "bus" reset originates on a bridge device with a request + * for a secondary bus reset. We're called with the phwdev of + * the bridge, but the bridge doesn't get reset. We reset all + * the descendents of the bridge device. + */ +void +pciehw_reset_bus(pciehwdev_t *phwdev, const u_int8_t bus) +{ + pciesvc_loginfo("%s: bus reset 0x%02x\n", pciehwdev_get_name(phwdev), bus); + pciehw_reset_descendents(phwdev->childh, PCIESVC_RSTTYPE_BUS); +} + +/* + * Function Level Reset (FLR) is issued on a device endpoint to reset + * the device. If issued on a PF then all the VFs get reset too. + */ +void +pciehw_reset_flr(pciehwdev_t *phwdev) +{ + pciesvc_loginfo("%s: flr reset\n", pciehwdev_get_name(phwdev)); + pciehw_reset_device(phwdev, PCIESVC_RSTTYPE_FLR); +} + +/* + * A PF controls enabling of VFs. If some enabled VFs get disabled + * by the PF then we want to reset the VFs. + * + * In order to reduce the number of msgs generated for this reset event + * we compress all the VF reset msgs into a single reset msg spanning + * all the lifs that were affected. + */ +void +pciehw_reset_vfs(pciehwdev_t *phwdev, const int vfb, const int vfc) +{ + pciehwdev_t *vfhwdev; + int vfidx, vflifb, vflifc; + + pciesvc_loginfo("%s: vfs reset %d-%d\n", + pciehwdev_get_name(phwdev), vfb, vfb + vfc - 1); + vflifb = 0; + vflifc = 0; + for (vfidx = vfb; vfidx < vfb + vfc; vfidx++) { + vfhwdev = pciehwdev_vfdev_get(phwdev, vfidx); + if (vfidx == vfb) { + /* save these from first reset vf for event */ + vflifb = vfhwdev->lifb; + vflifc = vfhwdev->lifc; + } + pciehw_reset_device(vfhwdev, PCIESVC_RSTTYPE_NONE); + pciehwdev_vfdev_put(vfhwdev, DIRTY); + } + pciehw_reset_lifs_event(phwdev, vflifb, vflifc * vfc, PCIESVC_RSTTYPE_VF); +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/reset.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/reset.h new file mode 100644 index 0000000000..505571ed3f --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/reset.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019, Pensando Systems Inc. + */ + +#ifndef __RESET_H__ +#define __RESET_H__ + +union pciehwdev_u; +typedef union pciehwdev_u pciehwdev_t; + +void pciehw_reset_bus(pciehwdev_t *phwdev, const u_int8_t bus); +void pciehw_reset_flr(pciehwdev_t *phwdev); +void pciehw_reset_vfs(pciehwdev_t *phwdev, const int vfb, const int vfc); + +#endif /* __RESET_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/serial.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/serial.c new file mode 100644 index 0000000000..981aca8135 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/serial.c @@ -0,0 +1,680 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "intrutils.h" +#include "serial_state.h" +#include "serial.h" +#include "uart.h" + +typedef struct serial { + int inited; /* state has been initialized */ + serial_state_t *state; /* serial state */ + uart_state_t *uart; /* uart state */ + memq_t *txq; /* txq transfer from device to memq */ + memq_t *rxq; /* rxq transfer from memq to device */ +} serial_t; + +static void serial_update_msl(serial_t *s); + +static int +memq_putc(volatile memq_t *q, const u_int8_t c) +{ + const unsigned int pidx = q->pidx; + const unsigned int cidx = q->cidx; + const unsigned int newpidx = (pidx + 1) % MEMQ_BUFSZ; + + /* check for full q */ + if (newpidx == cidx) return 0; + + q->buf[pidx] = c; + q->pidx = newpidx; + return 1; +} + +static int +memq_getc(volatile memq_t *q, u_int8_t *cp) +{ + /* check for empty q */ + if (q->cidx == q->pidx) return 0; + + *cp = q->buf[q->cidx]; + q->cidx = (q->cidx + 1) % MEMQ_BUFSZ; + return 1; +} + +static int +memq_full(volatile memq_t *q) +{ + const unsigned int pidx = q->pidx; + const unsigned int cidx = q->cidx; + const unsigned int newpidx = (pidx + 1) % MEMQ_BUFSZ; + + return newpidx == cidx; +} + +static u_int8_t +serial_rd_rbr(serial_t *s) +{ + u_int8_t c; + + if (memq_getc(s->rxq, &c)) { + return c; + } + return 0; +} + +static int +serial_wr_thr(serial_t *s, const u_int8_t c) +{ + if (!memq_putc(s->txq, c)) { + pciesvc_logerror("wr_thr: memq_putc failed\n"); + return 0; + } + return 1; +} + +static void +tx_fifo_reset(serial_t *s) +{ + serial_state_t *st = s->state; + + /* seriald detects generation change and resets cidx */ + st->gen++; +} + +static void +rx_fifo_reset(serial_t *s) +{ + serial_state_t *st = s->state; + + st->rxq.cidx = st->rxq.pidx; +} + +/** + * serial_rxq_empty: + * @s: serial struct + * + * Returns: true if the receive queue is empty otherwise false. + */ +static int +serial_rxq_empty(serial_t *s) +{ + volatile serial_state_t *st = s->state; + + return st->rxq.cidx == st->rxq.pidx; +} + +/** + * uart_reset: + * @s: serial struct + * + * Set uart state to power on default settings + */ +static void +uart_reset(serial_t *s) +{ + uart_state_t *uart = s->uart; + + uart->rbr = 0; + uart->ier = 0; + uart->iir = UART_IIR_NO_INT; + uart->lcr = 0; + uart->lsr = UART_LSR_TEMT | UART_LSR_THRE; + uart->msr = UART_MSR_DCD | UART_MSR_DSR | UART_MSR_CTS; + uart->scr = 0; + uart->divider = 0x0c; /* default 9600 baud 8-N-1 */ + uart->mcr = UART_MCR_RTS | UART_MCR_DTR; + uart->thr_ipending = ((uart->iir & UART_IIR_ID) == UART_IIR_THRI); + uart->flags = CHR_TIOCM_CAR; + + serial_update_msl(s); + uart->msr &= ~UART_MSR_ANY_DELTA; +} + +/** + * uart_write_fcr: + * @s: serial struct + * @val: register value + * + * Write fifo control register and interrupt identification + * register receive byte interrupt threshold. + */ +static void +uart_write_fcr(serial_t *s, uint8_t val) +{ + uart_state_t *uart = s->uart; + + uart->fcr = val & 0xc9; + + if (uart->fcr & UART_FCR_FE) { + uart->iir |= UART_IIR_FE; + /* Set recv_fifo trigger Level */ + switch (val & 0xc0) { + case UART_FCR_ITL_1: + uart->recv_fifo_itl = 1; + break; + case UART_FCR_ITL_2: + uart->recv_fifo_itl = 4; + break; + case UART_FCR_ITL_3: + uart->recv_fifo_itl = 8; + break; + case UART_FCR_ITL_4: + uart->recv_fifo_itl = 14; + break; + } + } else { + uart->iir &= ~UART_IIR_FE; + } +} + +/** + * uart_update_parameters: + * @s: serial struct + * + * Set uart settings based on line control register. + */ +static void +uart_update_parameters(serial_t *s) +{ + uart_state_t *uart = s->uart; + int parity, data_bits, stop_bits; + + /* Start bit */ + if (uart->lcr & 0x08) { + /* Parity bit. */ + if (uart->lcr & 0x10) + parity = 'E'; + else + parity = 'O'; + } else { + parity = 'N'; + } + if (uart->lcr & 0x04) { + stop_bits = 2; + } else { + stop_bits = 1; + } + + data_bits = (uart->lcr & 0x03) + 5; + uart->parity = parity; + uart->data_bits = data_bits; + uart->stop_bits = stop_bits; +} + +/** + * uart_update_irq: + * @s: serial struct + * + * Emulate interrupt identification register. + */ +static void +uart_update_irq(serial_t *s) +{ + uart_state_t *uart = s->uart; + volatile serial_state_t *st = s->state; + uint8_t tmp_iir = UART_IIR_NO_INT; + + if ((uart->ier & UART_IER_RLSI) && (uart->lsr & UART_LSR_INT_ANY)) { + tmp_iir = UART_IIR_RLSI; + } else if ((uart->ier & UART_IER_RDI) && (uart->lsr & UART_LSR_DR) && + (!(uart->fcr & UART_FCR_FE) || !serial_rxq_empty(s))) { + tmp_iir = UART_IIR_RDI; + } else if ((uart->ier & UART_IER_THRI) && uart->thr_ipending) { + tmp_iir = UART_IIR_THRI; + } else if ((uart->ier & UART_IER_MSI) && (uart->msr & UART_MSR_ANY_DELTA)) { + tmp_iir = UART_IIR_MSI; + } + + uart->iir = tmp_iir | (uart->iir & 0xf0); + + if (tmp_iir != UART_IIR_NO_INT) { + intr_assert(st->intrb); /* raise interrupt */ + } else { + intr_deassert(st->intrb); /* lower interrupt */ + } +} + +static void +serial_update_tiocm(serial_t *s) +{ + uart_state_t *uart = s->uart; + + /* Clear flags and set to match modem control */ + uart->flags &= ~(CHR_TIOCM_RTS | CHR_TIOCM_DTR); + + if (uart->mcr & UART_MCR_RTS) { + uart->flags |= CHR_TIOCM_RTS; + } + if (uart->mcr & UART_MCR_DTR) { + uart->flags |= CHR_TIOCM_DTR; + } +} + +static void +serial_update_msl(serial_t *s) +{ + uart_state_t *uart = s->uart; + uint8_t omsr = uart->msr; + int flags = uart->flags; + + uart->msr = (flags & CHR_TIOCM_CTS) ? + uart->msr | UART_MSR_CTS : uart->msr & ~UART_MSR_CTS; + uart->msr = (flags & CHR_TIOCM_DSR) ? + uart->msr | UART_MSR_DSR : uart->msr & ~UART_MSR_DSR; + uart->msr = (flags & CHR_TIOCM_CAR) ? + uart->msr | UART_MSR_DCD : uart->msr & ~UART_MSR_DCD; + uart->msr = (flags & CHR_TIOCM_RI) ? + uart->msr | UART_MSR_RI : uart->msr & ~UART_MSR_RI; + + if (uart->msr != omsr) { + /* Set delta bits */ + uart->msr = uart->msr | ((uart->msr >> 4) ^ (omsr >> 4)); + /* UART_MSR_TERI only if change was from 1 -> 0 */ + if ((uart->msr & UART_MSR_TERI) && !(omsr & UART_MSR_RI)) + uart->msr &= ~UART_MSR_TERI; + uart_update_irq(s); + } +} + +/** + * uart_xmit: + * @st: serial struct + * + * Transmit bytes to memq + */ +static void +uart_xmit(serial_t *s) +{ + uart_state_t *uart = s->uart; + + if (uart->mcr & UART_MCR_LOOP) { + /* Loopback mode, copy holding reg thr to receive reg rbr */ + uart->rbr = uart->thr; + uart->lsr |= UART_LSR_THRE; /* tx holding empty */ + uart->lsr |= UART_LSR_DR; /* rx data ready */ + + /* Add to rx queue in loopback */ + memq_putc(s->rxq, uart->thr); + uart_update_irq(s); + } else { + if (!memq_full(s->txq)) { + serial_wr_thr(s, uart->thr); + } + } + + if ((uart->lsr & UART_LSR_THRE) && !uart->thr_ipending) { + uart->thr_ipending = 1; + uart_update_irq(s); + } + + uart->lsr |= UART_LSR_TEMT; + uart->thr_ipending = 0; +} + +/** + * extract32: + * @value: the value to extract the bit field from + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * + * Extract from the 32 bit input @value the bit field specified by the + * @start and @length parameters, and return it. The bit field must + * lie entirely within the 32 bit word. It is valid to request that + * all 32 bits are returned (ie @length 32 and @start 0). + * + * Returns: the value of the bit field extracted from the input value. + */ +static inline uint32_t extract32(uint32_t value, int start, int length) +{ + pciesvc_assert(start >= 0 && length > 0 && length <= 32 - start); + return (value >> start) & (~0U >> (32 - length)); +} + +/** + * extract16: + * @value: the value to extract the bit field from + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * + * Extract from the 16 bit input @value the bit field specified by the + * @start and @length parameters, and return it. The bit field must + * lie entirely within the 16 bit word. It is valid to request that + * all 16 bits are returned (ie @length 16 and @start 0). + * + * Returns: the value of the bit field extracted from the input value. + */ +static inline uint16_t extract16(uint16_t value, int start, int length) +{ + pciesvc_assert(start >= 0 && length > 0 && length <= 16 - start); + return extract32(value, start, length); +} + +/** + * deposit32: + * @value: initial value to insert bit field into + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * @fieldval: the value to insert into the bit field + * + * Deposit @fieldval into the 32 bit @value at the bit field specified + * by the @start and @length parameters, and return the modified + * @value. Bits of @value outside the bit field are not modified. + * Bits of @fieldval above the least significant @length bits are + * ignored. The bit field must lie entirely within the 32 bit word. + * It is valid to request that all 32 bits are modified (ie @length + * 32 and @start 0). + * + * Returns: the modified @value. + */ +static inline uint32_t deposit32(uint32_t value, int start, int length, + uint32_t fieldval) +{ + uint32_t mask; + pciesvc_assert(start >= 0 && length > 0 && length <= 32 - start); + mask = (~0U >> (32 - length)) << start; + return (value & ~mask) | ((fieldval << start) & mask); +} + +static serial_t * +serial_get(pciehwdev_t *phwdev) +{ + static serial_t serial; + + if (!serial.inited) { + pciehw_shmem_t *pshmem = pciesvc_shmem_get(); + serial_uart_state_t *su = (serial_uart_state_t *)pshmem->serial[0]; + serial_state_t *st = &su->serial_state; + + serial.state = st; + serial.uart = &su->uart_state; + serial.txq = &st->txq; + serial.rxq = &st->rxq; + + if (st->gen == 0) { + st->intrb = phwdev->intrb; + st->intrc = phwdev->intrc; + + uart_reset(&serial); + rx_fifo_reset(&serial); + tx_fifo_reset(&serial); + } + serial.inited = 1; + } + return &serial; +} + +uint64_t +serial_barrd(pciehwdev_t *phwdev, + const u_int64_t baroff, const size_t size) +{ + serial_t *s = serial_get(phwdev); + uart_state_t *uart = s->uart; + uint32_t r; + + /* only byte access */ + if (size != 1) return 0; + if (baroff >= 8) return 0; + + switch (baroff) { + case UART_RX_BUF: + if (uart->lcr & UART_LCR_DLAB) { + r = extract16(uart->divider, 8 * (int)baroff, 8); + } else { + r = 0; + if (uart->mcr & UART_MCR_LOOP) { + if (!serial_rxq_empty(s)) { + r = serial_rd_rbr(s); + uart->lsr |= UART_LSR_DR; + } else { + uart->lsr &= ~(UART_LSR_DR | UART_LSR_BI); + } + } else { + r = serial_rd_rbr(s); + if (uart->fcr & UART_FCR_FE) { + if (serial_rxq_empty(s)) { + uart->lsr &= ~(UART_LSR_DR | UART_LSR_BI); + } + } else { + uart->lsr &= ~(UART_LSR_DR | UART_LSR_BI); + } + uart_update_irq(s); + } + } + break; + case UART_INTERRUPT_ENABLE: + if (uart->lcr & UART_LCR_DLAB) { + r = extract16(uart->divider, 8 * (int)baroff, 8); + } else { + r = uart->ier; + } + break; + case UART_INTERRUPT_ID: + if (!serial_rxq_empty(s)) { + uart->lsr |= UART_LSR_DR; + } + uart_update_irq(s); + + if ((uart->iir & UART_IIR_ID) == UART_IIR_THRI) { + /* transmit hold register is empty */ + uart->thr_ipending = 0; + uart_update_irq(s); + } + r = uart->iir; + break; + case UART_LINE_CONTROL: + r = uart->lcr; + break; + case UART_MODEM_CONTROL: + uart->mcr_read = 1; + if (uart->mcr_write == 0) { + /* linux */ + uart->flags = CHR_TIOCM_CTS | CHR_TIOCM_DSR | CHR_TIOCM_CAR; + serial_update_msl(s); + } + r = uart->mcr; + break; + case UART_LINE_STATUS: + if (!serial_rxq_empty(s)) { + uart->lsr |= UART_LSR_DR; + } + if (memq_full(s->txq)) { + uart->lsr &= ~UART_LSR_THRE; /* clear thr empty */ + uart->lsr &= ~UART_LSR_TEMT; /* clear transmitter empty */ + } else { + uart->lsr |= UART_LSR_THRE; /* thr empty */ + uart->lsr |= UART_LSR_TEMT; /* transmitter empty */ + } + /* Clear break and overrun interrupts */ + if (uart->lsr & (UART_LSR_BI | UART_LSR_OE)) { + uart->lsr &= ~(UART_LSR_BI | UART_LSR_OE); + uart_update_irq(s); + } + r = uart->lsr; + break; + case UART_MODEM_STATUS: + if (uart->mcr & UART_MCR_LOOP) { + /* In loopback modem output pins are connected to the inputs */ + r = (uart->mcr & 0x0c) << 4; + r |= (uart->mcr & 0x02) << 3; + r |= (uart->mcr & 0x01) << 5; + } else { + serial_update_msl(s); + r = uart->msr; + /* Clear delta bits & msr int after read, if they were set */ + if (uart->msr & UART_MSR_ANY_DELTA) { + uart->msr &= 0xf0; + uart_update_irq(s); + } + } + break; + case UART_SCRATCH: + r = uart->scr; + break; + default: + break; + } + + return r; +} + +void +serial_barwr(pciehwdev_t *phwdev, + const u_int64_t baroff, const size_t size, const u_int64_t val) +{ + serial_t *s = serial_get(phwdev); + volatile serial_state_t *st = s->state; + uart_state_t *uart = s->uart; + uint8_t changed; + uint8_t temp; + + /* only byte access */ + if (size != 1) return; + if (baroff >= 8) return; + + switch (baroff) { + case UART_TX_BUF: + if (uart->lcr & UART_LCR_DLAB) { + uart->divider = deposit32(uart->divider, 8 * (int)baroff, 8, + (int)val); + uart_update_parameters(s); + } else { + uart->thr = (uint8_t)val; + uart->thr_ipending = 0; + uart->lsr &= ~UART_LSR_THRE; /* clear thr empty */ + uart->lsr &= ~UART_LSR_TEMT; /* clear transmitter empty */ + uart_update_irq(s); + uart_xmit(s); + } + break; + case UART_INTERRUPT_ENABLE: + if (uart->lcr & UART_LCR_DLAB) { + uart->divider = deposit32(uart->divider, 8 * (int)baroff, 8, (int)val); + uart_update_parameters(s); + } else { + changed = (uart->ier ^ val) & 0x0f; + uart->ier = val & 0x0f; + + if (changed & UART_IER_MSI) { + if (uart->ier & UART_IER_MSI) { + /* + * Carry over mcr RTS/DTR to msr and let + * serial_update_msl set the delta bits. + */ + if (uart->mcr == 0xb) { + uart->msr = 0xb0; + } else if (uart->mcr == 0x3) { + uart->msr = 0xb0; + } else if (uart->mcr == 0x8) { + uart->msr = 0x80; + } else if (uart->mcr == 0x0) { + uart->msr = 0x80; + } + serial_update_msl(s); + } + } + + /* Turning on the THRE interrupt on IER can trigger the interrupt + * if LSR.THRE=1, even if it had been masked before by reading IIR. + * This is not in the datasheet, but Windows relies on it. It is + * unclear if THRE has to be resampled every time THRI becomes + * 1, or only on the rising edge. Bochs does the latter, and + * Windows always toggles IER to all zeroes and back to all ones, + * so do the same. + * + * If IER.THRI is zero, thr_ipending is not used. Set it to zero + * so that the thr_ipending subsection is not migrated. + */ + if (changed & UART_IER_THRI) { + if ((uart->ier & UART_IER_THRI) && + (uart->lsr & UART_LSR_THRE)) { + uart->thr_ipending = 1; + } else { + uart->thr_ipending = 0; + } + } + + if (changed) { + uart_update_irq(s); + } + + if ((uart->ier & 0xf) == 0) { + intr_drvcfg_mask(st->intrb, 1); /* mask */ + } else { + intr_drvcfg_mask(st->intrb, 0); /* unmask */ + } + } + break; + case UART_FIFO_CONTROL: + /* Flush FIFOs if enable/disable flag changed */ + temp = (uint8_t)val; + if ((temp ^ uart->fcr) & UART_FCR_FE) { + temp |= UART_FCR_XFR | UART_FCR_RFR; + } + + if (temp & UART_FCR_RFR) { + /* Reset the receive fifo */ + uart->lsr &= ~(UART_LSR_DR | UART_LSR_BI); + rx_fifo_reset(s); + } + + if (temp & UART_FCR_XFR) { + /* Reset the transmit fifo */ + uart->lsr |= UART_LSR_THRE; + uart->thr_ipending = 1; + tx_fifo_reset(s); + } + uart_write_fcr(s, val); + uart_update_irq(s); + break; + case UART_LINE_CONTROL: + uart->lcr = (uint8_t)val; + if (uart->lcr & UART_LCR_BRK) st->breakreq++; + uart_update_parameters(s); + break; + case UART_MODEM_CONTROL: + { + int old_mcr = uart->mcr; + + uart->mcr_write = 1; + uart->mcr = val & 0x1f; + if (uart->mcr & UART_MCR_LOOP) { + break; + } + + if (old_mcr != uart->mcr) { + serial_update_tiocm(s); + } + } + break; + case UART_LINE_STATUS: + break; + case UART_MODEM_STATUS: + break; + case UART_SCRATCH: + uart->scr = (uint8_t)val; + break; + default: + break; + } +} + +void +serial_reset(pciehwdev_t *phwdev, const pciesvc_rsttype_t rsttype) +{ + serial_t *s = serial_get(phwdev); + + /* + * It makes some sense to do this here: + * uart_reset(s); + * but it looks like the linux serial driver doesn't + * expect the uart registers to change during FLR so doing + * the uart_reset() will cause the serial driver not to recover. + */ + rx_fifo_reset(s); + tx_fifo_reset(s); + s->state->breakreq++; +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/serial.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/serial.h new file mode 100644 index 0000000000..9e28bf128d --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/serial.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Pensando Systems Inc. + */ + +#ifndef __SERIAL_H__ +#define __SERIAL_H__ + +u_int64_t +serial_barrd(pciehwdev_t *phwdev, + const u_int64_t baroff, const size_t size); + +void +serial_barwr(pciehwdev_t *phwdev, + const u_int64_t baroff, const size_t size, const u_int64_t val); + +void +serial_reset(pciehwdev_t *phwdev, const pciesvc_rsttype_t rsttype); + +#endif /* __SERIAL_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/virtio.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/virtio.c new file mode 100644 index 0000000000..00930126c9 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/virtio.c @@ -0,0 +1,401 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" +#include "virtio.h" + +#include "virtio_spec.h" + +#define FMT64X "0x%" PRIx64 +#define FMT64U "%" PRIu64 +#define FMT64S "%lu" + +#define VIRTIO_DEV_REG_NOTIFY(fld) \ + case VIRTIO_DEV_REG_OFF(fld): \ + *do_notify = 1; \ + break; + +#define VIRTIO_DEV_REG_RD(fld) \ + case VIRTIO_DEV_REG_OFF(fld): \ + pciesvc_mem_rd(addr, &val, VIRTIO_DEV_REG_SZ(fld)); \ + pciesvc_logdebug("%s: read %s addr "FMT64X" off "FMT64U" size "FMT64S" val "FMT64X"", \ + pciehwdev_get_name(phwdev), #fld, addr, baroff, size, val); \ + break; + +#define VIRTIO_DEV_REG_RD_ARR(fld, arr_fld, idx_fld, idx_count) \ + case VIRTIO_DEV_REG_OFF(fld): \ + pciesvc_mem_rd(VIRTIO_DEV_REG_ADDR(base, idx_fld), \ + &idx, VIRTIO_DEV_REG_SZ(idx_fld)); \ + if (idx < idx_count) { \ + pciesvc_mem_rd(VIRTIO_DEV_REG_ADDR(base, arr_fld), \ + &val, VIRTIO_DEV_REG_SZ(arr_fld)); \ + pciesvc_logdebug("%s: read %s["FMT64U"] addr "FMT64X" off "FMT64U" size "FMT64S" val "FMT64X"",\ + pciehwdev_get_name(phwdev), #fld, idx, \ + VIRTIO_DEV_REG_ADDR(base, arr_fld), \ + baroff, size, val); \ + } else { \ + pciesvc_logerror("%s: read %s["FMT64U"] addr "FMT64X" off "FMT64U" size "FMT64S" val "FMT64X" (out of bounds)",\ + pciehwdev_get_name(phwdev), #fld, idx, \ + VIRTIO_DEV_REG_ADDR(base, arr_fld), \ + baroff, size, val); \ + } \ + break; + +#define VIRTIO_DEV_REG_WR(fld) \ + case VIRTIO_DEV_REG_OFF(fld): \ + pciesvc_logdebug("%s: write %s addr "FMT64X" off "FMT64U" size "FMT64S" val "FMT64X"",\ + pciehwdev_get_name(phwdev), #fld, addr, baroff, size, val); \ + pciesvc_mem_wr(addr, &val, VIRTIO_DEV_REG_SZ(fld)); \ + break; + +#define VIRTIO_DEV_REG_WR_COND(fld, cond) \ + case VIRTIO_DEV_REG_OFF(fld): \ + pciesvc_logdebug("%s: write %s addr "FMT64X" off "FMT64U" size "FMT64S" val "FMT64X" cond %u", \ + pciehwdev_get_name(phwdev), #fld, addr, baroff, size, val, cond); \ + if (cond) { \ + pciesvc_mem_wr(addr, &val, VIRTIO_DEV_REG_SZ(fld)); \ + } \ + break; + +#define VIRTIO_DEV_REG_WR_PROC(fld, proc) \ + case VIRTIO_DEV_REG_OFF(fld): \ + pciesvc_logdebug("%s: write %s addr "FMT64X" off "FMT64U" size "FMT64S" val "FMT64X" proc %s", \ + pciehwdev_get_name(phwdev), #fld, addr, baroff, size, val, #proc); \ + proc(phwdev, addr, baroff, size, val); \ + break; + +#define VIRTIO_DEV_REG_WR_IGN(fld) \ + case VIRTIO_DEV_REG_OFF(fld): \ + pciesvc_logdebug("%s: write %s addr "FMT64X" off "FMT64U" size "FMT64S" val "FMT64X" ignore",\ + pciehwdev_get_name(phwdev), #fld, addr, baroff, size, val); \ + break; + +#define VIRTIO_DEV_REG_WR_ARR(fld, arr_fld, idx_fld, idx_count) \ + case VIRTIO_DEV_REG_OFF(fld): \ + pciesvc_mem_rd(VIRTIO_DEV_REG_ADDR(base, idx_fld), \ + &idx, VIRTIO_DEV_REG_SZ(idx_fld)); \ + if (idx < idx_count) { \ + pciesvc_logdebug("%s: write %s["FMT64U"] addr "FMT64X" off "FMT64U" size "FMT64S" val "FMT64X"",\ + pciehwdev_get_name(phwdev), #fld, idx, \ + VIRTIO_DEV_REG_ADDR(base, arr_fld), \ + baroff, size, val); \ + pciesvc_mem_wr(VIRTIO_DEV_REG_ADDR(base, arr_fld), \ + &val, VIRTIO_DEV_REG_SZ(arr_fld)); \ + } else { \ + pciesvc_logerror("%s: write %s["FMT64U"] addr "FMT64X" off "FMT64U" size "FMT64S" val "FMT64X" (out of bounds)",\ + pciehwdev_get_name(phwdev), #fld, idx, \ + VIRTIO_DEV_REG_ADDR(base, arr_fld), \ + baroff, size, val); \ + } \ + break; + +#define VIRTIO_DEV_REG_WR_ARR_IGN(fld, arr_fld, idx_fld, idx_count) \ + case VIRTIO_DEV_REG_OFF(fld): \ + pciesvc_mem_rd(VIRTIO_DEV_REG_ADDR(base, idx_fld), \ + &idx, VIRTIO_DEV_REG_SZ(idx_fld)); \ + if (idx < idx_count) { \ + pciesvc_logdebug("%s: write %s["FMT64U"] addr "FMT64X" off "FMT64U" size "FMT64S" val "FMT64X" ignore",\ + pciehwdev_get_name(phwdev), #fld, idx, \ + VIRTIO_DEV_REG_ADDR(base, arr_fld), \ + baroff, size, val); \ + pciesvc_mem_wr(VIRTIO_DEV_REG_ADDR(base, arr_fld), \ + &val, VIRTIO_DEV_REG_SZ(arr_fld)); \ + } else { \ + pciesvc_logerror("%s: write %s["FMT64U"] addr "FMT64X" off "FMT64U" size "FMT64S" val "FMT64X" ignore (out of bounds)",\ + pciehwdev_get_name(phwdev), #fld, idx, \ + VIRTIO_DEV_REG_ADDR(base, arr_fld), \ + baroff, size, val); \ + } \ + break; + +#define VIRTIO_DEV_REG_INSIDE(_fld, _offs, _sz) \ + (_offs >= VIRTIO_DEV_REG_OFF(_fld) && \ + (_offs + _sz) <= VIRTIO_DEV_REG_OFF(_fld) + VIRTIO_DEV_REG_SZ(_fld)) + +u_int64_t +virtio_barrd(pciehwdev_t *phwdev, u_int64_t addr, + const u_int64_t baroff, const size_t size, + u_int8_t *do_notify) +{ + u_int64_t base = addr - baroff; + u_int64_t val = 0; + u_int64_t idx = 0; + + /* net_cfg */ + if (VIRTIO_DEV_REG_INSIDE(part1, baroff, size)) { + pciesvc_mem_rd(addr, &val, size); + pciesvc_logdebug("%s: read part1 addr "FMT64X" " + "off "FMT64U" size "FMT64S" val "FMT64X"", + pciehwdev_get_name(phwdev), addr, baroff, size, val); + return val; + } + + switch (baroff) { + VIRTIO_DEV_REG_RD(cmn_cfg.device_feature_select); + + VIRTIO_DEV_REG_RD_ARR(cmn_cfg.device_feature, + cmn_cfg.device_feature_cfg[idx], + cmn_cfg.device_feature_select, + VIRTIO_PCI_FEATURE_SELECT_COUNT); + + VIRTIO_DEV_REG_RD(cmn_cfg.driver_feature_select); + + VIRTIO_DEV_REG_RD_ARR(cmn_cfg.driver_feature, + cmn_cfg.driver_feature_cfg[idx], + cmn_cfg.driver_feature_select, + VIRTIO_PCI_FEATURE_SELECT_COUNT); + + VIRTIO_DEV_REG_RD(cmn_cfg.config_msix_vector); + VIRTIO_DEV_REG_RD(cmn_cfg.num_queues); + VIRTIO_DEV_REG_RD(cmn_cfg.device_status); + VIRTIO_DEV_REG_RD(cmn_cfg.config_generation); + VIRTIO_DEV_REG_RD(cmn_cfg.queue_select); + + VIRTIO_DEV_REG_RD_ARR(cmn_cfg.queue_cfg.queue_size, + queue_cfg[idx].queue_size, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_RD_ARR(cmn_cfg.queue_cfg.queue_msix_vector, + queue_cfg[idx].queue_msix_vector, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_RD_ARR(cmn_cfg.queue_cfg.queue_enable, + queue_cfg[idx].queue_enable, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_RD_ARR(cmn_cfg.queue_cfg.queue_notify_off, + queue_cfg[idx].queue_notify_off, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_RD_ARR(cmn_cfg.queue_cfg.queue_desc_lo, + queue_cfg[idx].queue_desc_lo, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_RD_ARR(cmn_cfg.queue_cfg.queue_desc_hi, + queue_cfg[idx].queue_desc_hi, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_RD_ARR(cmn_cfg.queue_cfg.queue_avail_lo, + queue_cfg[idx].queue_avail_lo, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_RD_ARR(cmn_cfg.queue_cfg.queue_avail_hi, + queue_cfg[idx].queue_avail_hi, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_RD_ARR(cmn_cfg.queue_cfg.queue_used_lo, + queue_cfg[idx].queue_used_lo, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_RD_ARR(cmn_cfg.queue_cfg.queue_used_hi, + queue_cfg[idx].queue_used_hi, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + default: + val = 0; + pciesvc_logerror("%s: read addr "FMT64X" off "FMT64U" size "FMT64S" default ignore", + pciehwdev_get_name(phwdev), addr, baroff, size); + break; + } + + return val; +} + +static void +virtio_barwr_device_status(pciehwdev_t *phwdev, u_int64_t addr, + const u_int64_t baroff, const size_t size, + const u_int64_t val) +{ + u_int64_t base = addr - baroff; + u_int8_t old = 0; + + pciesvc_mem_rd(addr, &old, VIRTIO_DEV_REG_SZ(cmn_cfg.device_status)); + + if (!val) { + // If pciemgr sees the transition nonzero -> zero, then nicmgr needs to + // reset the device before device_status actually changes to zero. + // + // If the device status was already zero, and is written zero, there is + // a race! After writing zero, the driver would read zero and proceed. + // + // Nicmgr, when it handles the event, cannot depend on the current + // value being the old value. The driver may have written a new + // nonzero value after proceeding. If nicmgr assumes that the now + // current nonzero value is the old value, it will falsely observe a + // transition from nonzero to zero, which did not actually occur, and + // reset the device at the same time as the driver is initializing. + // + // To avoid this, when pciemgr sees the transition from nonzero -> + // zero, then pciemgr indicates so in need_reset. + // + // If nicmgr receives an event for the device_status register, it + // should check need_reset. If reset is needed, then clear need_reset, + // reset the device, and then finally clear device_status. If reset is + // not needed, nicmgr should not reset the device, to avoid the race. + // + if (old) { + old = 1; + pciesvc_mem_wr(VIRTIO_DEV_REG_ADDR(base, cmn_cfg.need_reset), + &old, sizeof(old)); + } + + // Eventually, nicmgr will update device_status. Not here. + return; + } + + if ((val & VIRTIO_S_FEATURES_OK) && !(old & VIRTIO_S_FEATURES_OK)) { + u_int32_t feature_lo = 0; + u_int32_t feature_hi = 0; + u_int64_t feature = 0; + + pciesvc_mem_rd(VIRTIO_DEV_REG_ADDR(base, cmn_cfg.driver_feature_cfg[0]), + &feature_lo, sizeof(feature_lo)); + + pciesvc_mem_rd(VIRTIO_DEV_REG_ADDR(base, cmn_cfg.driver_feature_cfg[1]), + &feature_hi, sizeof(feature_hi)); + + feature = (u_int64_t)feature_lo | ((u_int64_t)feature_hi << 32); + + pciesvc_loginfo("proc: features_ok "FMT64X"", feature); + + if (feature & VIRTIO_F_NOTIFICATION_DATA) { + // Nicmgr initialized the queue configs with notify offsets in + // the incr_pi_dbell range. If this feature is selected, + // modify the queue configs to ring the same doorbell via the + // set_pi_dbell range. + // + // This is done here in pciesvc, so that the driver can read + // the notify offset of queues _immediately_ after setting + // features ok. + + const uint16_t notify_offset = + offsetof(struct virtio_pci_notify_reg, set_pi_dbell) + / VIRTIO_NOTIFY_MULTIPLIER; + + u_int16_t vq_i = 0, vq_count = 0; + + pciesvc_mem_rd(VIRTIO_DEV_REG_ADDR(base, cmn_cfg.num_queues), + &vq_count, sizeof(vq_count)); + + pciesvc_logdebug("proc: vq_count %u notify_offset %u", + vq_count, notify_offset); + + for (; vq_i < vq_count; ++vq_i) { + u_int64_t off_addr = + VIRTIO_DEV_REG_ADDR(base, queue_cfg[vq_i].queue_notify_off); + + u_int16_t off = 0; + + pciesvc_mem_rd(off_addr, &off, sizeof(off)); + off += notify_offset; + pciesvc_mem_wr(off_addr, &off, sizeof(off)); + } + } + } + + pciesvc_mem_wr(addr, &val, VIRTIO_DEV_REG_SZ(cmn_cfg.device_status)); +} + +void +virtio_barwr(pciehwdev_t *phwdev, u_int64_t addr, + const u_int64_t baroff, const size_t size, const u_int64_t val, + u_int8_t *do_notify) +{ + u_int64_t base = addr - baroff; + u_int64_t idx = 0; + + switch (baroff) { + VIRTIO_DEV_REG_WR(cmn_cfg.device_feature_select); + + VIRTIO_DEV_REG_WR_ARR_IGN(cmn_cfg.device_feature, + cmn_cfg.device_feature_cfg[idx], + cmn_cfg.device_feature_select, + VIRTIO_PCI_FEATURE_SELECT_COUNT); + + VIRTIO_DEV_REG_WR(cmn_cfg.driver_feature_select); + + VIRTIO_DEV_REG_WR_ARR(cmn_cfg.driver_feature, + cmn_cfg.driver_feature_cfg[idx], + cmn_cfg.driver_feature_select, + VIRTIO_PCI_FEATURE_SELECT_COUNT); + + VIRTIO_DEV_REG_WR(cmn_cfg.config_msix_vector); + VIRTIO_DEV_REG_WR_IGN(cmn_cfg.num_queues); + VIRTIO_DEV_REG_WR_PROC(cmn_cfg.device_status, virtio_barwr_device_status); + VIRTIO_DEV_REG_WR_IGN(cmn_cfg.config_generation); + VIRTIO_DEV_REG_WR(cmn_cfg.queue_select); + + VIRTIO_DEV_REG_WR_ARR(cmn_cfg.queue_cfg.queue_size, + queue_cfg[idx].queue_size, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_WR_ARR(cmn_cfg.queue_cfg.queue_msix_vector, + queue_cfg[idx].queue_msix_vector, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_WR_ARR(cmn_cfg.queue_cfg.queue_enable, + queue_cfg[idx].queue_enable, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_WR_ARR(cmn_cfg.queue_cfg.queue_notify_off, + queue_cfg[idx].queue_notify_off, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_WR_ARR(cmn_cfg.queue_cfg.queue_desc_lo, + queue_cfg[idx].queue_desc_lo, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_WR_ARR(cmn_cfg.queue_cfg.queue_desc_hi, + queue_cfg[idx].queue_desc_hi, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_WR_ARR(cmn_cfg.queue_cfg.queue_avail_lo, + queue_cfg[idx].queue_avail_lo, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_WR_ARR(cmn_cfg.queue_cfg.queue_avail_hi, + queue_cfg[idx].queue_avail_hi, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_WR_ARR(cmn_cfg.queue_cfg.queue_used_lo, + queue_cfg[idx].queue_used_lo, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + VIRTIO_DEV_REG_WR_ARR(cmn_cfg.queue_cfg.queue_used_hi, + queue_cfg[idx].queue_used_hi, + cmn_cfg.queue_select, + VIRTIO_PCI_QUEUE_SELECT_COUNT); + + default: + pciesvc_logerror("%s: write addr "FMT64X" off "FMT64U" size "FMT64S" val "FMT64X" default ignore", + pciehwdev_get_name(phwdev), addr, baroff, size, val); + break; + } + + switch (baroff) { + VIRTIO_DEV_REG_NOTIFY(cmn_cfg.device_status); + VIRTIO_DEV_REG_NOTIFY(cmn_cfg.queue_select); + VIRTIO_DEV_REG_NOTIFY(cmn_cfg.queue_cfg.queue_enable); + } +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/virtio.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/virtio.h new file mode 100644 index 0000000000..9e41903c69 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/virtio.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Pensando Systems Inc. + */ + +#ifndef __VIRTIO_H__ +#define __VIRTIO_H__ + +u_int64_t +virtio_barrd(pciehwdev_t *phwdev, u_int64_t addr, + const u_int64_t baroff, const size_t size, + u_int8_t *do_notify); + +void +virtio_barwr(pciehwdev_t *phwdev, u_int64_t addr, + const u_int64_t baroff, const size_t size, const u_int64_t val, + u_int8_t *do_notify); + +#endif /* __VIRTIO_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/vpd.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/vpd.c new file mode 100644 index 0000000000..4ad3ecbc2c --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/vpd.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019,2021-2022, Pensando Systems Inc. + */ + +#include "pciesvc_impl.h" + +uint32_t +pciehw_vpd_read(const pciehwdevh_t hwdevh, const uint16_t addr) +{ + if (addr < PCIEHW_VPDSZ) { + const uint16_t aligned_addr = addr & ~0x3; + const uint8_t *vpddata = pciesvc_vpd_get(hwdevh); + const uint32_t data = (((uint32_t)vpddata[aligned_addr + 3] << 24) | + ((uint32_t)vpddata[aligned_addr + 2] << 16) | + ((uint32_t)vpddata[aligned_addr + 1] << 8) | + ((uint32_t)vpddata[aligned_addr + 0] << 0)); + pciesvc_vpd_put(vpddata, CLEAN); + return data; + } + return 0; +} + +void +pciehw_vpd_write(const pciehwdevh_t hwdevh, + const uint16_t addr, const uint32_t data) +{ + /* No writeable vpd data (yet). */ +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/vpd.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/vpd.h new file mode 100644 index 0000000000..fd17b40afa --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc/src/vpd.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019,2021, Pensando Systems Inc. + */ + +#ifndef __VPD_H__ +#define __VPD_H__ + +typedef u_int32_t pciehwdevh_t; + +uint32_t pciehw_vpd_read(const pciehwdevh_t hwdevh, const uint16_t addr); +void pciehw_vpd_write(const pciehwdevh_t hwdevh, + const uint16_t addr, const uint32_t data); + +#endif /* __VPD_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc_end.c b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc_end.c new file mode 100644 index 0000000000..53fee743d9 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc_end.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021, 2022, Oracle and/or its affiliates. + */ + +/* + * PCIESVC Library Loader - End of Code Marker + * + * This object file is last in the module link order and so the symbol + * pciesvc_end gives us the address of the end of the code section in + * the module. What follows are the various data sections. + * + * The reason this is needed is to be able to examine the code in in + * kpcimgr_module_register() without accidentally looking at data. At + * the time kpcimgr_module_register() is called, the kernel has + * completely finished loading the module and all the meta data (i.e., + * section headers etc) have been discarded, and so there is nothing + * to tell us where the code ends. + * + * Author: rob.gardner@oracle.com + */ +noinline void pciesvc_end(void) +{ +} diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc_system_extern.h b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc_system_extern.h new file mode 100644 index 0000000000..6d73f80a19 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/pciesvc_system_extern.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2021, Pensando Systems Inc. + */ + +#ifndef __PCIESVC_SYSTEM_EXTERN_H__ +#define __PCIESVC_SYSTEM_EXTERN_H__ + +#include "kpcimgr_api.h" +#include "pciesvc.h" +#include "portcfg.h" + +#include +#include "notify_entry.h" +#include "cfgspace.h" + +#define KPR_LINESZ 512 +#define kpr_err(fmt, ...) \ + do { \ + char buf[KPR_LINESZ]; \ + if (virtual()) \ + pciesvc_snprintf(buf, KPR_LINESZ, KERN_ERR fmt, ##__VA_ARGS__); \ + else \ + pciesvc_snprintf(buf, KPR_LINESZ, fmt, ##__VA_ARGS__); \ + pciesvc_log(buf); \ + } while (0) + +#define kdbg_puts_caller() \ + do { \ + kstate_t *kstate = get_kstate(); \ + kpr_err("%s called from offset %lx\n", __func__, \ + ((unsigned long)__builtin_return_address(0) - (unsigned long)kstate->code_base)); \ + } while (0) + +#define pciesvc_assert(expr) \ + if (unlikely(!(expr))) { \ + kpr_err("Assertion failed! %s,%s,%s,line=%d\n", \ + #expr, __FILE__, __func__, __LINE__); \ + } + +#define pciesvc_usleep kp_udelay +#define pciesvc_ffs ffs +#define pciesvc_ffsll __builtin_ffsl + +#define MIN(x,y) ((x) < (y) ? x : y) +#define MAX(x,y) ((x) > (y) ? x : y) + +#define PRIi64 "lld" + +#define PRIx8 "x" +#define PRIx16 "x" +#define PRIx32 "x" +#define PRIx64 "llx" +#define PRIu64 "llu" + +#define pciesvc_htobe32(x) __cpu_to_be32(x) +#define pciesvc_be32toh(x) __be32_to_cpu(x) + +#define pciesvc_htole32(x) __cpu_to_le32(x) +#define pciesvc_le32toh(x) __le32_to_cpu(x) + +#define pciesvc_htobe16(x) __cpu_to_be16(x) +#define pciesvc_be16toh(x) __be16_to_cpu(x) + +#define CLEAN 0 +#define DIRTY 1 + +int +pciesvc_snprintf(char *buf, size_t len, const char *fmt, ...); + +int +pciesvc_vsnprintf(char *buf, size_t len, const char *fmt, va_list ap) + __attribute__((weak)); + + +u64 +pciesvc_vtop(const void *hwmemva); + +void +*pciesvc_hwmem_get(void); +uint32_t +pciesvc_reg_rd32(const uint64_t pa); +void +pciesvc_pciepreg_rd32(const uint64_t pa, uint32_t *dest); +void +pciesvc_reg_wr32(const uint64_t pa, const uint32_t val); +#define pciesvc_pciepreg_wr32 pciesvc_reg_wr32 + +int +pciesvc_mem_rd(const uint64_t pa, void *buf, const size_t sz); +void +pciesvc_mem_wr(const uint64_t pa, const void *buf, const size_t sz); +void +pciesvc_mem_barrier(void); + +void * +pciesvc_memset(void *s, int c, size_t n); +void * +pciesvc_memcpy(void *dst, const void *src, size_t n); +void * +pciesvc_memcpy_toio(void *dsthw, const void *src, size_t n); + +void +pciesvc_log(const char *msg); + +int +pciesvc_event_handler(pciesvc_eventdata_t *evdata, const size_t evsize); + +void * +pciesvc_shmem_get(void); + +int virtual(void); +int cpuid(void); +unsigned long release(void); +long read_el(void); +void kpcimgr_init_poll(kstate_t *ks); +void pciesvc_debug_cmd(uint32_t *val); +void kpcimgr_poll(kstate_t *ks, int index, int phase); + +/* functions in kpci_test.c */ +void kp_udelay(unsigned long us); +int time_elapsed(unsigned long start, unsigned long elapsed); +void _uart_write(unsigned char *reg, char c); +void uart_write(kstate_t *ks, char c); +int uart_read(kstate_t *ks, char *c); +void uart_write_debug(kstate_t *ks, char c); +void kdbg_puts(const char *s); +void trigger_serr(int val); +void kpcimgr_report_stats(kstate_t *ks, int phase, int always, int rightnow); + +/* functions in kpci_kexec.c */ +void set_kstate(kstate_t *ks); + +#endif /* __PCIESVC_SYSTEM_EXTERN_H__ */ diff --git a/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/tools/reloc_check b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/tools/reloc_check new file mode 100755 index 0000000000..9c74736bb5 --- /dev/null +++ b/platform/pensando/dsc-drivers/src/drivers/linux/pciesvc/tools/reloc_check @@ -0,0 +1,78 @@ +#!/bin/bash + +relocs='R_AARCH64_ADR_PREL_PG_HI21|R_AARCH64_ADR_PREL_LO21|R_AARCH64_CALL26|R_AARCH64_JUMP26|LO12' + +OBJDUMP=${CROSS_COMPILE}objdump +tmp=/tmp/reloc_check$$ +trap "rm -f $tmp" EXIT + +$OBJDUMP --section=.text --section=.data --reloc $1 | grep '^000' >$tmp + +safe=yes +num_relocs=$(wc -l $tmp | awk '{ print $1 }') +echo Examining $num_relocs relocations +cat $tmp | grep -vE "$relocs" |\ + { + illegals=0 + while read line + do + echo Illegal relocation: $line + ((illegals++)) + done + + if ((illegals>0)) + then + echo DO NOT PROCEED! + exit 1 + else + echo No illegal relocations found + exit 0 + fi + } +if [ $? != 0 ] +then + safe=no +fi + +# look for external symbol references + +nm -u $1 | grep -v mcount | awk '{ print $2 }' | + { + read pattern + if [ "$pattern" != kpcimgr_module_register ] + then + echo Caution: found undesirable symbol $pattern + fi + + while read symbol + do + pattern="$pattern|$symbol" + if [ "$symbol" != kpcimgr_module_register ] + then + echo Caution: found undesirable symbol $symbol + fi + done + echo checking objects for any of these symbols: $pattern + + nm kpci_entry.o kpcinterface.o pciesvc/src/*.o | grep -E $pattern >$tmp + } + +if [ -s $tmp ] +then + echo Found external references: + cat $tmp + echo + echo DO NOT PROCEED! + safe=no +else + echo No external references found +fi + + +if [ $safe = yes ] +then + echo Safe to proceed. +else + echo NOT safe to proceed. + rm -f $1 +fi diff --git a/platform/pensando/dsc-drivers/systemd/ionic-modules.service b/platform/pensando/dsc-drivers/systemd/ionic-modules.service new file mode 100644 index 0000000000..892e6b926a --- /dev/null +++ b/platform/pensando/dsc-drivers/systemd/ionic-modules.service @@ -0,0 +1,12 @@ +[Unit] +Description=Ionic kernel modules init +After=local-fs.target + +[Service] +Type=oneshot +ExecStart=-/etc/init.d/ionic-modules start +ExecStop=-/etc/init.d/ionic-modules stop +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/platform/pensando/elba-asic-psci.dtb b/platform/pensando/elba-asic-psci.dtb new file mode 100755 index 0000000000..c2400062d4 Binary files /dev/null and b/platform/pensando/elba-asic-psci.dtb differ diff --git a/platform/pensando/one-image.mk b/platform/pensando/one-image.mk new file mode 100644 index 0000000000..88558cc8d3 --- /dev/null +++ b/platform/pensando/one-image.mk @@ -0,0 +1,17 @@ +# sonic centec one image installer + +SONIC_ONE_IMAGE = sonic-pensando.tar +$(SONIC_ONE_IMAGE)_MACHINE = pensando +$(SONIC_ONE_IMAGE)_IMAGE_TYPE = dsc + +$(SONIC_ONE_IMAGE)_INSTALLS += $(SYSTEMD_SONIC_GENERATOR) +$(SONIC_ONE_IMAGE)_INSTALLS += $(DPU_MODULE) +$(SONIC_ONE_IMAGE)_INSTALLS += $(IONIC_MODULE) + +ifeq ($(INSTALL_DEBUG_TOOLS),y) +$(SONIC_ONE_IMAGE)_DOCKERS += $(SONIC_INSTALL_DOCKER_DBG_IMAGES) +$(SONIC_ONE_IMAGE)_DOCKERS += $(filter-out $(patsubst %-$(DBG_IMAGE_MARK).gz,%.gz, $(SONIC_INSTALL_DOCKER_DBG_IMAGES)), $(SONIC_INSTALL_DOCKER_IMAGES)) +else +$(SONIC_ONE_IMAGE)_DOCKERS = $(SONIC_INSTALL_DOCKER_IMAGES) +endif +SONIC_INSTALLERS += $(SONIC_ONE_IMAGE) diff --git a/platform/pensando/platform.conf b/platform/pensando/platform.conf new file mode 100644 index 0000000000..b10f20e1ca --- /dev/null +++ b/platform/pensando/platform.conf @@ -0,0 +1,283 @@ +####### Pensando ######## + +#!/bin/sh + +R="" +export LD_LIBRARY_PATH=/platform/lib:/nic/lib:$LD_LIBRARY_PATH +export PATH=/platform/bin:$PATH + +root_mnt=$R/mnt +bl_conf_path=$root_mnt +HOST=/host + +image_dir=image-$image_version + +INSTALLER_PAYLOAD=fs.zip +DOCKERFS_DIR=docker +FILESYSTEM_DOCKERFS=dockerfs.tar.gz +BL_CONF=boot-$image_dir.conf + +DATA_PARTUUID=6ED62003-DD8D-44B8-9538-0A2B7C7E628F +ROOT_PARTUUID=C7F48DD2-C265-404B-959D-C64D21D49168 + +ROOT_PARTSIZE=24G + +exec 0< /dev/tty 1> /dev/tty 2> /dev/tty + +PKG="" +ACTION="" + +root_pn=0 +data_pn=0 + +REPART_NEEDED=0 + +set -e + +fatal() +{ + echo "FATAL: $1" >&2 + exit 1 +} + +check_existing_parts() +{ + local nparts i partuuid boot_partsize boot_lastsec data_firstsec + + nparts=$(sgdisk -p /dev/mmcblk0 | grep '^[ ]*[1-9]' | wc -l) + for i in $(seq $nparts); do + partuuid=$(sgdisk -i $i /dev/mmcblk0 | awk '/Partition unique GUID/ { print $NF }') + case "$partuuid" in + $DATA_PARTUUID) data_pn=$i; ;; + $ROOT_PARTUUID) root_pn=$i; ;; + esac + done + + if [ $root_pn -ne 0 ]; then + boot_partsize=$(sgdisk -i $root_pn /dev/mmcblk0 | awk -F '[( ]' '/Partition size/ {print int($6)}') + boot_lastsec=$(sgdisk -i $root_pn /dev/mmcblk0 | awk '/Last sector/ {print $3}') + if [ ${boot_partsize}G = $ROOT_PARTSIZE ]; then + echo "SONiC root partitions already present with requested size. No repartition, only formatting" + else + echo "SONiC root partitions already present with mismatch size ${partsize}G. Repartition needed" + REPART_NEEDED=1 + fi + fi + + if [ $data_pn -eq 0 ]; then + echo "Data partition not found; Repartition needed" + REPART_NEEDED=1 + elif [ $data_pn -ne $nparts ]; then + fatal "Data partition is not the last partition; exiting." >&2 + else + data_firstsec=$(sgdisk -i $data_pn /dev/mmcblk0 | awk '/First sector/ {print $3}') + if [ $data_firstsec -ne $((boot_lastsec+1)) ]; then + echo "Data partition not contigent with boot partition. Repartition needed" + REPART_NEEDED=1 + fi + fi +} + +setup_partitions_multi() +{ + echo "==> Setting up partitions..." + + set +e + if [ $REPART_NEEDED -eq 0 ]; then + mkfs.ext4 -F -q /dev/mmcblk0p$root_pn >/dev/null + else + + if [ $root_pn -ne 0 ]; then + sgdisk -d $root_pn /dev/mmcblk0 >/dev/null + fi + [ $data_pn -ne 0 ] && sgdisk -d $data_pn /dev/mmcblk0 >/dev/null + + if [ $root_pn -eq 0 ]; then + root_pn=10 + data_pn=$(($root_pn + 1)) + fi + + if [ $data_pn -eq 0 ]; then + data_pn=$(($root_pn + 1)) + fi + + sgdisk \ + -n $root_pn:+0:+$ROOT_PARTSIZE -t $root_pn:8300 \ + -u $root_pn:$ROOT_PARTUUID -c $root_pn:"SONiC Root Filesystem" \ + -n $data_pn:+0:0 -t $data_pn:8300 -u $data_pn:$DATA_PARTUUID \ + -c $data_pn:"Data Filesystem" \ + /dev/mmcblk0 >/dev/null + sgdisk -U R /dev/mmcblk0 >/dev/null + + while true; do + partprobe + if [ -e $R/dev/mmcblk0p$data_pn ]; then + break + fi + sleep 1 + done + + echo "==> Creating filesystems" + for i in $root_pn $data_pn; do + mkfs.ext4 -F -q /dev/mmcblk0p$i >/dev/null + done + fi + set -e +} + +setup_partitions() +{ + setup_partitions_multi +} + +cleanup() +{ + echo "==> Cleaning up residual files" + running_sonic_revision=`cat /etc/sonic/sonic_version.yml | grep build_version | awk -F \' '{print $2}'` + config_files=$(find /host -type f -name "boot*.conf" | grep -iv "$running_sonic_revision\|$image_dir" ) + if [ -z $config_files ]; then + echo "No config files to remove" + else + echo "config files to remove are: $config_files" + fi + + for file in $config_files; do + if [ -f "$file" ]; then + echo "Removing file: $file" + rm "$file" + fi + done + + faulty_image_dir=$(find /host -type d -name "image-*" | grep -iv "$running_sonic_revision\|$image_dir") + if [ -z $faulty_image_dir ]; then + echo "No faulty image directories to remove" + else + echo "Faulty image directories to remove are: $faulty_image_dir" + fi + + for d in $faulty_image_dir; do + if [ -d "$d" ]; then + echo "Removing directory: $d" + rm -rfd "$d" + fi + done + +} + +create_bootloader_conf() +{ + echo "==> Create bootloader config" + +cat <> $bl_conf_path/$BL_CONF +default main + +label main + kernel /$image_dir/boot/vmlinuz-6.1.0-11-2-arm64 + initrd /$image_dir/boot/initrd.img-6.1.0-11-2-arm64 + devicetree /$image_dir/boot/elba-asic-psci.dtb + append softdog.soft_panic=1 FW_NAME=mainfwa root=/dev/mmcblk0p10 rw rootwait rootfstype=ext4 loopfstype=squashfs loop=/$image_dir/fs.squashfs +} +EOF +} + +set_boot_command() +{ + local pn + + echo "==> Setting u-boot environment for Debian Boot" + pn=$(printf "%x" $root_pn) + + fw_setenv -f baudrate 115200 + fw_setenv -f bootcmd 'test -n "$boot_once" && setenv do_boot_once "$boot_once" && setenv boot_once && saveenv && run do_boot_once; run boot_next' + fw_setenv -f bootdelay 0 + fw_setenv -f fdt_addr_r bb100000 + fw_setenv -f kernel_addr_r a0000000 + fw_setenv -f kernel_comp_addr_r 88000000 + fw_setenv -f kernel_comp_size 8000000 + fw_setenv -f ramdisk_addr_r a4000000 +} + +set_sonic_env() { + echo "current env is $env" + BL_CONF_OLD=NONE + pn=$(printf "%x" $root_pn) + if [ "$install_env" = "onie" ]; then + echo "set sonic env onie" + fw_setenv -f linuxargs "${extra_cmdline_linux}" + fw_setenv -f sonic_image_1 "sysboot mmc 0:$pn any bf000000 /$BL_CONF" + fw_setenv -f sonic_image_2 "NONE" + fw_setenv -f sonic_dir_1 $image_dir + fw_setenv -f sonic_dir_2 "NONE" + fw_setenv -f sonic_version_1 `echo $image_dir | sed "s/^image-/SONiC-OS-/g"` + fw_setenv -f sonic_version_2 "NONE" + + fw_setenv -f boot_next "run sonic_image_1" + else + check_existing_parts + pn=$(printf "%x" $root_pn) + running_sonic_revision=`cat /etc/sonic/sonic_version.yml | grep build_version | awk -F \' '{print $2}'` + SONIC_IMAGE_MAX=2 + idx=0 + for i in $(seq 1 $SONIC_IMAGE_MAX); do + if [ "`fw_printenv sonic_version_$i 2>/dev/null | awk -F = '{print $2}'`" != "SONiC-OS-$running_sonic_revision" ]; then + idx=$i + break + fi + done + + fw_setenv linuxargs "${extra_cmdline_linux}" + fw_setenv sonic_image_$idx "sysboot mmc 0:$pn any bf000000 /$BL_CONF" + fw_setenv sonic_dir_$idx $image_dir + fw_setenv sonic_version_$idx `echo $image_dir | sed "s/^image-/SONiC-OS-/g"` + + fw_setenv boot_next "run sonic_image_$idx" + + cleanup + fi +} + + + +######################################################################################################################## + +prepare_boot_menu() { + echo "Sync up cache ..." + sync + echo "Setting up U-Boot environment..." + if [ "$install_env" = "onie" ]; then + bl_conf_path=$root_mnt + else + bl_conf_path=$HOST + fi + file=$bl_conf_path/$BL_CONF + if [ -f "$file" ]; then + rm "$file" + fi + create_bootloader_conf + MTD_UBOOTENV=$(cat /proc/mtd | grep -e 'ubootenv' | awk '{print $1}' | tr -dc '0-9') + FW_ENV_DEFAULT="/dev/mtd$MTD_UBOOTENV 0x0 0x1000 0x10000" + echo $FW_ENV_DEFAULT > /etc/fw_env.config + set_boot_command + set_sonic_env +} + +create_partition() { + check_existing_parts + setup_partitions +} + +mount_partition() { + demo_mnt=$root_mnt + mounted=$(mount | grep "/dev/mmcblk0p$root_pn on $demo_mnt type ext4" | wc -l) + if [ $mounted -eq 0 ]; then + mount /dev/mmcblk0p$root_pn $demo_mnt + fi +} + +bootloader_menu_config() { + # Update uboot Environment + prepare_boot_menu + if [ "$install_env" = "onie" ]; then + chmod -x /bin/onie-nos-mode + fi +} diff --git a/platform/pensando/rules.mk b/platform/pensando/rules.mk new file mode 100644 index 0000000000..4af757c3ae --- /dev/null +++ b/platform/pensando/rules.mk @@ -0,0 +1,16 @@ +include $(PLATFORM_PATH)/docker-dpu-base.mk +include $(PLATFORM_PATH)/docker-dpu.mk +include $(PLATFORM_PATH)/one-image.mk +include $(PLATFORM_PATH)/sdk.mk +include $(PLATFORM_PATH)/docker-syncd-pensando.mk +include $(PLATFORM_PATH)/dsc-drivers.mk + +SONIC_ALL += $(SONIC_ONE_IMAGE) \ + $(DOCKER_FPM) + +# Inject pensando sai into syncd +$(SYNCD)_DEPENDS += $(PENSANDO_SAI) +$(SYNCD)_UNINSTALLS += $(PENSANDO_SAI) + +#Runtime dependency on pensando sai is set only for syncd +$(SYNCD)_RDEPENDS += $(PENSANDO_SAI) diff --git a/platform/pensando/sdk.mk b/platform/pensando/sdk.mk new file mode 100644 index 0000000000..072ac893b7 --- /dev/null +++ b/platform/pensando/sdk.mk @@ -0,0 +1,10 @@ +# Pensando SAI +PENSANDO_SAI = libsai_1.10.1-0_arm64.deb +PENSANDO_SAI_DEV = libsai-dev_1.10.1-0_arm64.deb +$(PENSANDO_SAI)_URL = https://github.com/pensando/dsc-artifacts/blob/main/libsai_1.10.1-0_arm64.deb?raw=true +$(PENSANDO_SAI_DEV)_URL = https://github.com/pensando/dsc-artifacts/blob/main/libsai-dev_1.10.1-0_arm64.deb?raw=true + +$(eval $(call add_conflict_package,$(PENSANDO_SAI_DEV),$(LIBSAIVS_DEV))) + +SONIC_ONLINE_DEBS += $(PENSANDO_SAI) +SONIC_ONLINE_DEBS += $(PENSANDO_SAI_DEV) diff --git a/scripts/prepare_docker_buildinfo.sh b/scripts/prepare_docker_buildinfo.sh index 0ee79c201f..6dfd63bdd5 100755 --- a/scripts/prepare_docker_buildinfo.sh +++ b/scripts/prepare_docker_buildinfo.sh @@ -1,5 +1,7 @@ #!/bin/bash +grep "^# SKIP_HOOK" $2 && exit 0 + [[ ! -z "${DBGOPT}" && $0 =~ ${DBGOPT} ]] && set -x BUILDINFO_BASE=/usr/local/share/buildinfo diff --git a/slave.mk b/slave.mk index 92103b5e27..c8dc936b19 100644 --- a/slave.mk +++ b/slave.mk @@ -1086,6 +1086,16 @@ $(foreach IMAGE,$(DOCKER_IMAGES), $(eval $(IMAGE)_FILES_PATH := $(FILES_PATH))) $(foreach IMAGE,$(DOCKER_DBG_IMAGES), $(eval $(IMAGE)_DEBS_PATH := $(DEBS_PATH))) $(foreach IMAGE,$(DOCKER_DBG_IMAGES), $(eval $(IMAGE)_FILES_PATH := $(FILES_PATH))) +# Targets for downloaded docker images +$(addprefix $(TARGET_PATH)/,$(DOWNLOADED_DOCKER_IMAGES)) : $(TARGET_PATH)/%.gz : .platform \ + $$(%.gz_DEP_FILES) + $(HEADER) + + rm -rf $@ $@.log + wget "$($*.gz_URL)" -O target/$(DOWNLOADED_DOCKER_IMAGES) $(LOG) + + $(FOOTER) + # Targets for building docker images $(addprefix $(TARGET_PATH)/, $(DOCKER_IMAGES)) : $(TARGET_PATH)/%.gz : .platform docker-start \ $$(addprefix $$($$*.gz_DEBS_PATH)/,$$($$*.gz_DEPENDS)) \ @@ -1141,7 +1151,7 @@ $(addprefix $(TARGET_PATH)/, $(DOCKER_IMAGES)) : $(TARGET_PATH)/%.gz : .platform DBGOPT='$(DBGOPT)' \ scripts/prepare_docker_buildinfo.sh $* $($*.gz_PATH)/Dockerfile $(CONFIGURED_ARCH) $(LOG) docker info $(LOG) - docker build --squash --no-cache \ + docker build --no-cache $$( [[ "$($*.gz_SQUASH)" != n ]] && echo --squash)\ --build-arg http_proxy=$(HTTP_PROXY) \ --build-arg https_proxy=$(HTTPS_PROXY) \ --build-arg no_proxy=$(NO_PROXY) \ @@ -1245,6 +1255,7 @@ SONIC_TARGET_LIST += $(addprefix $(TARGET_PATH)/, $(DOCKER_DBG_IMAGES)) DOCKER_LOAD_TARGETS = $(addsuffix -load,$(addprefix $(TARGET_PATH)/, \ $(SONIC_SIMPLE_DOCKER_IMAGES) \ + $(DOWNLOADED_DOCKER_IMAGES) \ $(DOCKER_IMAGES) \ $(DOCKER_DBG_IMAGES))) diff --git a/sonic-slave-buster/Dockerfile.j2 b/sonic-slave-buster/Dockerfile.j2 index bb1ec2c970..2d582f659f 100644 --- a/sonic-slave-buster/Dockerfile.j2 +++ b/sonic-slave-buster/Dockerfile.j2 @@ -347,6 +347,7 @@ RUN apt-get update && apt-get install -y eatmydata && eatmydata apt-get install # For SAI3.7 protobuf-compiler \ libprotobuf-dev \ + libgrpc++-dev \ xxd \ # For DHCP Monitor tool libexplain-dev \