Compare commits
3 Commits
master
...
xumia/test
Author | SHA1 | Date | |
---|---|---|---|
|
3d12c7c7c7 | ||
|
6ef964eaf8 | ||
|
123993cc03 |
@ -33,42 +33,21 @@ parameters:
|
||||
type: object
|
||||
default:
|
||||
- vs
|
||||
- barefoot
|
||||
- broadcom
|
||||
- centec
|
||||
- centec-arm64
|
||||
- generic
|
||||
- marvell-armhf
|
||||
- marvell-arm64
|
||||
- mellanox
|
||||
- pensando
|
||||
|
||||
stages:
|
||||
- stage: Prepare
|
||||
jobs:
|
||||
- job: Prepare
|
||||
steps:
|
||||
- script: |
|
||||
DEFAULT_MIRROR_URL_PREFIX=http://packages.trafficmanager.net
|
||||
DEBIAN_TIMESTAMP=$(curl $DEFAULT_MIRROR_URL_PREFIX/snapshot/debian/latest/timestamp)
|
||||
DEBIAN_SECURITY_TIMESTAMP=$(curl $DEFAULT_MIRROR_URL_PREFIX/snapshot/debian-security/latest/timestamp)
|
||||
echo "DEBIAN_TIMESTAMP=$DEBIAN_TIMESTAMP, DEBIAN_SECURITY_TIMESTAMP=$DEBIAN_SECURITY_TIMESTAMP"
|
||||
echo "##vso[task.setvariable variable=DEBIAN_TIMESTAMP;isOutput=true]$DEBIAN_TIMESTAMP"
|
||||
echo "##vso[task.setvariable variable=DEBIAN_SECURITY_TIMESTAMP;isOutput=true]$DEBIAN_SECURITY_TIMESTAMP"
|
||||
name: SetVersions
|
||||
displayName: 'Set snapshot versions'
|
||||
- stage: Build
|
||||
dependsOn: Prepare
|
||||
variables:
|
||||
- name: CACHE_MODE
|
||||
value: none
|
||||
- name: VERSION_CONTROL_OPTIONS
|
||||
value: 'SONIC_VERSION_CONTROL_COMPONENTS='
|
||||
- name: SKIP_CHECKOUT
|
||||
value: true
|
||||
- name: DEBIAN_TIMESTAMP
|
||||
value: $[ stageDependencies.Prepare.Prepare.outputs['SetVersions.DEBIAN_TIMESTAMP'] ]
|
||||
- name: DEBIAN_SECURITY_TIMESTAMP
|
||||
value: $[ stageDependencies.Prepare.Prepare.outputs['SetVersions.DEBIAN_SECURITY_TIMESTAMP'] ]
|
||||
- template: .azure-pipelines/template-variables.yml@buildimage
|
||||
jobs:
|
||||
- template: azure-pipelines-build.yml
|
||||
@ -77,21 +56,6 @@ stages:
|
||||
buildOptions: '${{ variables.VERSION_CONTROL_OPTIONS }} ENABLE_DOCKER_BASE_PULL=n SONIC_BUILD_JOBS=$(nproc) ENABLE_IMAGE_SIGNATURE=y'
|
||||
preSteps:
|
||||
- template: .azure-pipelines/template-clean-sonic-slave.yml@buildimage
|
||||
- checkout: self
|
||||
submodules: recursive
|
||||
fetchDepth: 0
|
||||
path: s
|
||||
displayName: 'Checkout code'
|
||||
- script: |
|
||||
echo "DEBIAN_TIMESTAMP=$DEBIAN_TIMESTAMP, DEBIAN_SECURITY_TIMESTAMP=$DEBIAN_SECURITY_TIMESTAMP"
|
||||
if [ "$MIRROR_SNAPSHOT" == y ]; then
|
||||
mkdir -p target/versions/default/
|
||||
echo "debian==$DEBIAN_TIMESTAMP" > target/versions/default/versions-mirror
|
||||
echo "debian-security==$DEBIAN_SECURITY_TIMESTAMP" >> target/versions/default/versions-mirror
|
||||
cat target/versions/default/versions-mirror
|
||||
fi
|
||||
displayName: 'Set snapshot versions'
|
||||
|
||||
- stage: UpgradeVersions
|
||||
jobs:
|
||||
- job: UpgradeVersions
|
||||
@ -162,8 +126,8 @@ stages:
|
||||
git branch -u remote/$BRANCH_NAME
|
||||
|
||||
echo $GIT_PASSWORD | gh auth login --with-token
|
||||
TITLE="[${SOURCE_BRANCH#refs/heads/}] Upgrade SONiC package Versions"
|
||||
BODY=$TITLE
|
||||
TITLE="Upgrade SONiC Versions"
|
||||
BODY="Upgrade SONiC Versions"
|
||||
RET=0
|
||||
if ! gh pr create -t "$TITLE" -b "$BODY" -B $(Build.SourceBranch) -R $(Build.Repository.Name) > pr.log 2>&1; then
|
||||
if ! grep -q "already exists" pr.log; then
|
||||
|
@ -1,67 +0,0 @@
|
||||
# C/C++ with GCC
|
||||
# Build your C/C++ project with GCC using make.
|
||||
# Add steps that publish test results, save build artifacts, deploy, and more:
|
||||
# https://docs.microsoft.com/azure/devops/pipelines/apps/c-cpp/gcc
|
||||
pr: none
|
||||
|
||||
trigger:
|
||||
batch: true
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
- 202???
|
||||
|
||||
schedules:
|
||||
- cron: "0 0 * * *"
|
||||
displayName: Daily build
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
- 202???
|
||||
always: true
|
||||
|
||||
stages:
|
||||
- stage: Build
|
||||
|
||||
jobs:
|
||||
- job:
|
||||
displayName: "amd64/ubuntu-20.04"
|
||||
pool:
|
||||
vmImage: 'ubuntu-20.04'
|
||||
|
||||
steps:
|
||||
- checkout: self
|
||||
submodules: true
|
||||
- script: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y make wget libtool m4 autoconf dh-exec libdebhelper-perl=13.6ubuntu1~bpo20.04.1 debhelper=13.6ubuntu1~bpo20.04.1 \
|
||||
cmake pkg-config python3-pip python cmake libgtest-dev libgmock-dev libyang-dev \
|
||||
debhelper-compat dh-elpa dh-sequence-python3 python3-all \
|
||||
libpython3-all-dev python3-six xmlto unzip rake-compiler gem2deb pkg-php-tools \
|
||||
ant default-jdk maven-repo-helper libguava-java \
|
||||
libboost-all-dev libgtest-dev build-essential swig4.0 swig
|
||||
sudo pip3 install pytest click
|
||||
wget http://ftp.us.debian.org/debian/pool/main/libg/libgoogle-gson-java/libgoogle-gson-java_2.8.6-1+deb11u1_all.deb
|
||||
sudo dpkg -i libgoogle-gson-java_2.8.6-1+deb11u1_all.deb
|
||||
mkdir -p /tmp/artifacts
|
||||
displayName: "Install dependencies"
|
||||
- script: |
|
||||
BLDENV=bullseye SONIC_CONFIG_MAKE_JOBS=$(nproc) CONFIGURED_ARCH=amd64 DEST=/tmp/artifacts make -f ../rules/protobuf.mk -f protobuf/Makefile
|
||||
workingDirectory: src
|
||||
displayName: "Build protobuf"
|
||||
- script: |
|
||||
sudo dpkg -i protobuf-compiler_3.21.12-3_amd64.deb libprotoc32_3.21.12-3_amd64.deb \
|
||||
libprotobuf32_3.21.12-3_amd64.deb libprotobuf-dev_3.21.12-3_amd64.deb \
|
||||
libprotobuf-lite32_3.21.12-3_amd64.deb
|
||||
workingDirectory: /tmp/artifacts
|
||||
displayName: "Install protobuf"
|
||||
- script: |
|
||||
dpkg-buildpackage -rfakeroot -b -us -uc
|
||||
workingDirectory: src/sonic-dash-api
|
||||
displayName: "Build sonic-dash-api"
|
||||
- script: |
|
||||
cp *.deb /tmp/artifacts
|
||||
workingDirectory: src
|
||||
- publish: /tmp/artifacts
|
||||
artifact: sonic-buildimage.amd64.ubuntu20_04
|
||||
displayName: "Archive sonic-buildimage debian packages for ubuntu20.04"
|
@ -35,6 +35,7 @@ jobs:
|
||||
dbg_image: no
|
||||
asan_image: no
|
||||
swi_image: no
|
||||
raw_image: no
|
||||
docker_syncd_rpc_image: no
|
||||
syncd_rpc_image: no
|
||||
platform_rpc: no
|
||||
@ -61,6 +62,7 @@ jobs:
|
||||
variables:
|
||||
dbg_image: yes
|
||||
swi_image: yes
|
||||
raw_image: yes
|
||||
docker_syncd_rpc_image: yes
|
||||
platform_rpc: brcm
|
||||
|
||||
@ -92,15 +94,6 @@ jobs:
|
||||
variables:
|
||||
PLATFORM_ARCH: armhf
|
||||
|
||||
- name: marvell-arm64
|
||||
${{ if not(parameters.qemuOrCrossBuild) }}:
|
||||
pool: sonicbld-arm64
|
||||
timeoutInMinutes: 2880
|
||||
variables:
|
||||
PLATFORM_ARCH: arm64
|
||||
|
||||
- name: marvell
|
||||
|
||||
- name: mellanox
|
||||
variables:
|
||||
dbg_image: yes
|
||||
@ -114,19 +107,11 @@ jobs:
|
||||
docker_syncd_rpc_image: yes
|
||||
platform_rpc: nephos
|
||||
|
||||
- name: pensando
|
||||
pool: sonicbld-arm64
|
||||
variables:
|
||||
PLATFORM_ARCH: arm64
|
||||
|
||||
buildSteps:
|
||||
- template: .azure-pipelines/template-skipvstest.yml@buildimage
|
||||
- template: .azure-pipelines/template-daemon.yml@buildimage
|
||||
- template: template-skipvstest.yml
|
||||
- bash: |
|
||||
set -ex
|
||||
if [ $(GROUP_NAME) == pensando ]; then
|
||||
make $BUILD_OPTIONS target/sonic-pensando.tar
|
||||
elif [ $(GROUP_NAME) == vs ]; then
|
||||
if [ $(GROUP_NAME) == vs ]; then
|
||||
if [ $(dbg_image) == yes ]; then
|
||||
make $BUILD_OPTIONS INSTALL_DEBUG_TOOLS=y target/sonic-vs.img.gz
|
||||
mv target/sonic-vs.img.gz target/sonic-vs-dbg.img.gz
|
||||
@ -135,19 +120,11 @@ jobs:
|
||||
make $BUILD_OPTIONS ENABLE_ASAN=y target/docker-sonic-vs.gz
|
||||
mv target/docker-sonic-vs.gz target/docker-sonic-vs-asan.gz
|
||||
fi
|
||||
if [ "$(K8S_MASTER_CHANGED)" == 'YES' ]; then
|
||||
make $BUILD_OPTIONS INCLUDE_KUBERNETES_MASTER=y target/sonic-vs.img.gz
|
||||
gzip -d target/sonic-vs.img.gz
|
||||
SONIC_RUN_CMDS="qemu-img convert target/sonic-vs.img -O vhdx -o subformat=dynamic target/sonic-vs.vhdx" make $BUILD_OPTIONS sonic-slave-run
|
||||
gzip target/sonic-vs.vhdx
|
||||
mv target/sonic-vs.vhdx.gz target/sonic-vs-k8s.vhdx.gz
|
||||
rm target/sonic-vs.img
|
||||
fi
|
||||
make $BUILD_OPTIONS target/docker-sonic-vs.gz target/sonic-vs.img.gz target/docker-ptf.gz
|
||||
make $BUILD_OPTIONS target/docker-ptf-sai.gz
|
||||
if [ $(Build.Reason) != 'PullRequest' ];then
|
||||
gzip -kd target/sonic-vs.img.gz
|
||||
SONIC_RUN_CMDS="qemu-img convert target/sonic-vs.img -O vhdx -o subformat=dynamic target/sonic-vs.vhdx" make $BUILD_OPTIONS sonic-slave-run
|
||||
SONIC_RUN_CMDS="qemu-img convert target/sonic-vs.img -O vhdx -o subformat=dynamic target/sonic-vs.vhdx" make sonic-slave-run
|
||||
rm target/sonic-vs.img
|
||||
fi
|
||||
else
|
||||
@ -158,17 +135,13 @@ jobs:
|
||||
if [ $(swi_image) == yes ]; then
|
||||
make $BUILD_OPTIONS ENABLE_IMAGE_SIGNATURE=y target/sonic-aboot-$(GROUP_NAME).swi
|
||||
fi
|
||||
if [ $(raw_image) == yes ]; then
|
||||
make $BUILD_OPTIONS target/sonic-$(GROUP_NAME).raw
|
||||
fi
|
||||
if [ $(docker_syncd_rpc_image) == yes ]; then
|
||||
# workaround for issue in rules/sairedis.dep, git ls-files will list un-exist files for cache
|
||||
make $BUILD_OPTIONS ENABLE_SYNCD_RPC=y target/docker-syncd-$(platform_rpc)-rpc.gz
|
||||
pushd ./src/sonic-sairedis/SAI
|
||||
git stash
|
||||
popd
|
||||
if [ $(GROUP_NAME) == broadcom ]; then
|
||||
make $BUILD_OPTIONS ENABLE_SYNCD_RPC=y target/docker-syncd-$(platform_rpc)-dnx-rpc.gz
|
||||
pushd ./src/sonic-sairedis/SAI
|
||||
git stash
|
||||
popd
|
||||
fi
|
||||
fi
|
||||
if [ $(syncd_rpc_image) == yes ]; then
|
||||
@ -178,4 +151,4 @@ jobs:
|
||||
make $BUILD_OPTIONS target/sonic-$(GROUP_NAME).bin
|
||||
fi
|
||||
displayName: "Build sonic image"
|
||||
- template: .azure-pipelines/check-dirty-version.yml@buildimage
|
||||
- template: check-dirty-version.yml
|
||||
|
@ -25,21 +25,17 @@ jobs:
|
||||
jobFilters: ${{ parameters.jobFilters }}
|
||||
jobVariables: ${{ parameters.jobVariables }}
|
||||
preSteps:
|
||||
- template: .azure-pipelines/cleanup.yml@buildimage
|
||||
- template: cleanup.yml
|
||||
- ${{ parameters.preSteps }}
|
||||
- script: |
|
||||
[ -n "$OVERRIDE_BUILD_OPTIONS" ] && OVERRIDE_BUILD_OPTIONS=$(OVERRIDE_BUILD_OPTIONS)
|
||||
BUILD_OPTIONS="$(BUILD_OPTIONS) $OVERRIDE_BUILD_OPTIONS"
|
||||
if [ -n "$(CACHE_MODE)" ] && echo $(PLATFORM_AZP) | grep -E -q "^(vs|broadcom|mellanox|marvell-armhf|marvell-arm64)$"; then
|
||||
if [ -n "$(CACHE_MODE)" ] && echo $(PLATFORM_AZP) | grep -E -q "^(vs|broadcom|mellanox|marvell-armhf)$"; then
|
||||
CACHE_OPTIONS="SONIC_DPKG_CACHE_METHOD=$(CACHE_MODE) SONIC_DPKG_CACHE_SOURCE=/nfs/dpkg_cache/$(PLATFORM_AZP)"
|
||||
BUILD_OPTIONS="$BUILD_OPTIONS $CACHE_OPTIONS"
|
||||
BUILD_OPTIONS="$(BUILD_OPTIONS) $CACHE_OPTIONS"
|
||||
echo "##vso[task.setvariable variable=BUILD_OPTIONS]$BUILD_OPTIONS"
|
||||
fi
|
||||
echo $BUILD_OPTIONS
|
||||
echo "##vso[task.setvariable variable=BUILD_OPTIONS]$BUILD_OPTIONS"
|
||||
displayName: "Set cache options"
|
||||
- checkout: self
|
||||
submodules: recursive
|
||||
fetchDepth: 0
|
||||
condition: and(succeeded(), eq(variables.SKIP_CHECKOUT, ''))
|
||||
displayName: 'Checkout code'
|
||||
- script: |
|
||||
@ -70,6 +66,6 @@ jobs:
|
||||
artifactName: 'sonic-buildimage.$(GROUP_NAME)$(GROUP_EXTNAME)'
|
||||
publishPrefix: '$(Build.DefinitionName)/$(Build.SourceBranchName)/$(GROUP_NAME)'
|
||||
- ${{ parameters.postSteps }}
|
||||
- template: .azure-pipelines/cleanup.yml@buildimage
|
||||
- template: cleanup.yml
|
||||
jobGroups: ${{ parameters.jobGroups }}
|
||||
buildSteps: ${{ parameters.buildSteps }}
|
||||
|
@ -1,2 +1,7 @@
|
||||
variables:
|
||||
VERSION_CONTROL_OPTIONS: 'SONIC_VERSION_CONTROL_COMPONENTS=deb,py2,py3,web,git,docker'
|
||||
${{ if eq(variables['Build.Reason'],'PullRequest') }}:
|
||||
VERSION_CONTROL_OPTIONS: 'SONIC_VERSION_CONTROL_COMPONENTS=$([[ "$(System.PullRequest.TargetBranch)" =~ ^20[2-9][0-9]{3}$ ]] && echo deb,py2,py3,web,git,docker)'
|
||||
${{ elseif ne(variables['Build.SourceBranchName'],'master') }}:
|
||||
VERSION_CONTROL_OPTIONS: 'SONIC_VERSION_CONTROL_COMPONENTS=deb,py2,py3,web,git,docker'
|
||||
${{ else }}:
|
||||
VERSION_CONTROL_OPTIONS: ''
|
||||
|
@ -101,24 +101,11 @@ jobs:
|
||||
if [ ${{ parameters.swi_image }} == true ]; then
|
||||
make USERNAME=admin $CACHE_OPTIONS SONIC_BUILD_JOBS=$(nproc) ENABLE_IMAGE_SIGNATURE=y target/sonic-aboot-${{ parameters.platform }}.swi
|
||||
fi
|
||||
if [ ${{ parameters.raw_image }} == true ]; then
|
||||
make USERNAME=admin $CACHE_OPTIONS SONIC_BUILD_JOBS=$(nproc) target/sonic-${{ parameters.platform }}.raw
|
||||
fi
|
||||
if [ ${{ parameters.sync_rpc_image }} == true ]; then
|
||||
make USERNAME=admin $CACHE_OPTIONS SONIC_BUILD_JOBS=$(nproc) ENABLE_SYNCD_RPC=y target/docker-syncd-${{ parameters.platform_short }}-rpc.gz
|
||||
# workaround for issue in rules/sairedis.dep, git ls-files will list un-exist files for cache
|
||||
pushd ./src/sonic-sairedis/SAI
|
||||
git stash
|
||||
popd
|
||||
if [ ${{ parameters.platform }} == broadcom ]; then
|
||||
make USERNAME=admin $CACHE_OPTIONS SONIC_BUILD_JOBS=$(nproc) ENABLE_SYNCD_RPC=y SAITHRIFT_V2=y target/docker-saiserverv2-brcm.gz
|
||||
pushd ./src/sonic-sairedis/SAI
|
||||
git stash
|
||||
popd
|
||||
fi
|
||||
if [ ${{ parameters.platform }} == barefoot ]; then
|
||||
make USERNAME=admin $CACHE_OPTIONS SONIC_BUILD_JOBS=$(nproc) SAITHRIFT_V2=y ENABLE_SYNCD_RPC=y target/docker-saiserverv2-bfn.gz
|
||||
pushd ./src/sonic-sairedis/SAI
|
||||
git stash
|
||||
popd
|
||||
fi
|
||||
fi
|
||||
|
||||
make USERNAME=admin $CACHE_OPTIONS SONIC_BUILD_JOBS=$(nproc) target/sonic-${{ parameters.platform }}.bin
|
||||
|
@ -1,11 +1,5 @@
|
||||
steps:
|
||||
- script: |
|
||||
set -x
|
||||
# kill daemon process
|
||||
ps $(cat /tmp/azp_daemon_kill_docker_pid)
|
||||
sudo kill $(cat /tmp/azp_daemon_kill_docker_pid)
|
||||
rm /tmp/azp_daemon_kill_docker_pid
|
||||
|
||||
if sudo [ -f /var/run/march/docker.pid ] ; then
|
||||
pid=`sudo cat /var/run/march/docker.pid` ; sudo kill $pid
|
||||
fi
|
||||
@ -17,5 +11,4 @@ steps:
|
||||
pid=`sudo cat dockerfs/var/run/docker.pid` ; sudo kill $pid
|
||||
fi
|
||||
sudo rm -rf $(ls -A1)
|
||||
condition: always()
|
||||
displayName: "Clean Workspace"
|
||||
|
@ -1,63 +0,0 @@
|
||||
# Starter pipeline
|
||||
# Start with a minimal pipeline that you can customize to build and deploy your code.
|
||||
# Add steps that build, run tests, deploy, and more:
|
||||
# https://aka.ms/yaml
|
||||
# Build and push sonic-mgmt image
|
||||
|
||||
schedules:
|
||||
- cron: "0 8 * * *"
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
always: true
|
||||
|
||||
trigger: none
|
||||
pr:
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
paths:
|
||||
include:
|
||||
- dockers/docker-sonic-mgmt
|
||||
|
||||
parameters:
|
||||
- name: registry_url
|
||||
type: string
|
||||
default: sonicdev-microsoft.azurecr.io
|
||||
- name: registry_conn
|
||||
type: string
|
||||
default: sonicdev
|
||||
|
||||
stages:
|
||||
- stage: Build
|
||||
jobs:
|
||||
- job: Build
|
||||
pool: sonicbld
|
||||
timeoutInMinutes: 360
|
||||
steps:
|
||||
- template: cleanup.yml
|
||||
- checkout: self
|
||||
clean: true
|
||||
- bash: |
|
||||
set -xe
|
||||
git submodule update --init --recursive -- src/sonic-platform-daemons src/sonic-genl-packet src/sonic-sairedis src/ptf src/sonic-device-data
|
||||
|
||||
make SONIC_BUILD_JOBS=$(nproc) DEFAULT_CONTAINER_REGISTRY=publicmirror.azurecr.io ENABLE_DOCKER_BASE_PULL=y configure PLATFORM=generic
|
||||
make -f Makefile.work BLDENV=bullseye SONIC_BUILD_JOBS=$(nproc) DEFAULT_CONTAINER_REGISTRY=publicmirror.azurecr.io ENABLE_DOCKER_BASE_PULL=y LEGACY_SONIC_MGMT_DOCKER=n target/docker-sonic-mgmt.gz
|
||||
cp target -r $(Build.ArtifactStagingDirectory)/target
|
||||
docker load -i target/docker-sonic-mgmt.gz
|
||||
docker tag docker-sonic-mgmt $REGISTRY_SERVER/docker-sonic-mgmt:py3only
|
||||
env:
|
||||
REGISTRY_SERVER: ${{ parameters.registry_url }}
|
||||
displayName: Build docker-sonic-mgmt.gz
|
||||
- task: Docker@2
|
||||
displayName: Upload image
|
||||
inputs:
|
||||
containerRegistry: ${{ parameters.registry_conn }}
|
||||
repository: docker-sonic-mgmt
|
||||
command: push
|
||||
tags: py3only
|
||||
- publish: $(Build.ArtifactStagingDirectory)
|
||||
artifact: 'docker-sonic-mgmt'
|
||||
displayName: "Archive docker image sonic-mgmt"
|
||||
|
@ -32,18 +32,17 @@ stages:
|
||||
- stage: Build
|
||||
jobs:
|
||||
- job: Build
|
||||
pool: sonicbld
|
||||
pool: sonictest
|
||||
timeoutInMinutes: 360
|
||||
steps:
|
||||
- template: cleanup.yml
|
||||
- checkout: self
|
||||
clean: true
|
||||
submodules: recursive
|
||||
- bash: |
|
||||
set -xe
|
||||
git submodule update --init --recursive -- src/sonic-platform-daemons src/sonic-genl-packet src/sonic-sairedis src/ptf src/sonic-device-data
|
||||
|
||||
make SONIC_BUILD_JOBS=$(nproc) DEFAULT_CONTAINER_REGISTRY=publicmirror.azurecr.io ENABLE_DOCKER_BASE_PULL=y configure PLATFORM=generic
|
||||
make -f Makefile.work BLDENV=bullseye SONIC_BUILD_JOBS=$(nproc) DEFAULT_CONTAINER_REGISTRY=publicmirror.azurecr.io ENABLE_DOCKER_BASE_PULL=y LEGACY_SONIC_MGMT_DOCKER=y target/docker-sonic-mgmt.gz
|
||||
make configure PLATFORM=generic
|
||||
make target/docker-sonic-mgmt.gz
|
||||
cp target -r $(Build.ArtifactStagingDirectory)/target
|
||||
docker load -i target/docker-sonic-mgmt.gz
|
||||
docker tag docker-sonic-mgmt $REGISTRY_SERVER/docker-sonic-mgmt:latest
|
||||
|
@ -16,7 +16,6 @@ parameters:
|
||||
- name: dist
|
||||
type: string
|
||||
values:
|
||||
- bookworm
|
||||
- bullseye
|
||||
- buster
|
||||
- stretch
|
||||
@ -36,7 +35,7 @@ parameters:
|
||||
- sonicbld-armhf
|
||||
|
||||
jobs:
|
||||
- job: sonic_slave_${{ parameters.dist }}${{ parameters.march }}
|
||||
- job: Build_${{ parameters.dist }}_${{ parameters.march }}${{ parameters.arch }}
|
||||
timeoutInMinutes: 360
|
||||
variables:
|
||||
- template: /.azure-pipelines/template-variables.yml@buildimage
|
||||
@ -47,6 +46,7 @@ jobs:
|
||||
- template: /.azure-pipelines/template-clean-sonic-slave.yml@buildimage
|
||||
- checkout: self
|
||||
clean: true
|
||||
submodules: recursive
|
||||
- task: Docker@2
|
||||
displayName: Login to ACR
|
||||
inputs:
|
||||
@ -54,29 +54,22 @@ jobs:
|
||||
containerRegistry: ${{ parameters.registry_conn }}
|
||||
- bash: |
|
||||
set -ex
|
||||
build_options="$(VERSION_CONTROL_OPTIONS)"
|
||||
image_tag=$(BLDENV=${{ parameters.dist }} make -f Makefile.work showtag $build_options PLATFORM=generic PLATFORM_ARCH=${{ parameters.arch }} | grep sonic-slave | tail -n 1)
|
||||
image_tag=$(BLDENV=${{ parameters.dist }} make -f Makefile.work showtag PLATFORM=generic PLATFORM_ARCH=${{ parameters.arch }} | grep sonic-slave | tail -n 1)
|
||||
image_latest=$(echo $(echo $image_tag | awk -F: '{print$1}'):latest)
|
||||
if echo ${{ parameters.pool }} | grep ${{ parameters.arch }};then
|
||||
image_latest=$(echo ${image_latest} | sed 's/:/-${{ parameters.arch }}:/')
|
||||
fi
|
||||
image_branch=$(echo $(echo $image_latest | awk -F: '{print$1}'):$(Build.SourceBranchName))
|
||||
docker rmi $image_tag || true
|
||||
|
||||
if [[ "$(Build.Reason)" =~ [a-zA-Z]*CI ]] && docker pull ${{ parameters.registry_url }}/${image_tag};then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
DOCKER_DATA_ROOT_FOR_MULTIARCH=/data/march/docker BLDENV=${{ parameters.dist }} make -f Makefile.work configure $build_options PLATFORM=generic PLATFORM_ARCH=${{ parameters.arch }} $args || docker image ls $image_tag
|
||||
DOCKER_DATA_ROOT_FOR_MULTIARCH=/data/march/docker BLDENV=${{ parameters.dist }} make -f Makefile.work configure PLATFORM=generic PLATFORM_ARCH=${{ parameters.arch }} $args || docker image ls $image_tag
|
||||
if [[ "$(Build.Reason)" == "PullRequest" ]];then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
docker tag ${image_tag} ${REGISTRY_SERVER}/${image_tag}
|
||||
docker push ${REGISTRY_SERVER}/${image_tag}
|
||||
docker tag ${image_tag} ${REGISTRY_SERVER}/${image_branch}
|
||||
docker push ${REGISTRY_SERVER}/${image_branch}
|
||||
if [[ "$(Build.SourceBranchName)" == "master" ]];then
|
||||
if [[ "${{ parameters.arch }}" == "amd64" ]];then
|
||||
docker tag ${image_tag} ${REGISTRY_SERVER}/${image_latest}
|
||||
docker push ${REGISTRY_SERVER}/${image_latest}
|
||||
fi
|
||||
|
@ -8,7 +8,7 @@ resources:
|
||||
- repository: buildimage
|
||||
type: github
|
||||
name: sonic-net/sonic-buildimage
|
||||
ref: bookworm
|
||||
ref: master
|
||||
endpoint: sonic-net
|
||||
|
||||
schedules:
|
||||
@ -30,6 +30,7 @@ trigger:
|
||||
paths:
|
||||
include:
|
||||
- sonic-slave-*
|
||||
- src/sonic-build-hooks
|
||||
- files/build/versions
|
||||
- Makefile
|
||||
- Makefile.work
|
||||
@ -44,7 +45,6 @@ parameters:
|
||||
- name: 'dists'
|
||||
type: object
|
||||
default:
|
||||
- bookworm
|
||||
- bullseye
|
||||
- buster
|
||||
- stretch
|
||||
@ -57,7 +57,7 @@ parameters:
|
||||
default: sonicdev
|
||||
|
||||
stages:
|
||||
- stage: Build_in_amd64
|
||||
- stage: Build
|
||||
jobs:
|
||||
- ${{ each dist in parameters.dists }}:
|
||||
- ${{ if endswith(variables['Build.DefinitionName'], dist) }}:
|
||||
@ -67,9 +67,7 @@ stages:
|
||||
pool: sonicbld
|
||||
arch: ${{ arch }}
|
||||
dist: ${{ dist }}
|
||||
${{ if ne(arch, 'amd64') }}:
|
||||
march: _march_${{ arch }}
|
||||
- stage: Build_native_arm
|
||||
- stage: Build_march
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- ${{ each dist in parameters.dists }}:
|
||||
@ -81,4 +79,4 @@ stages:
|
||||
pool: sonicbld-${{ arch }}
|
||||
arch: ${{ arch }}
|
||||
dist: ${{ dist }}
|
||||
march: _${{ arch }}
|
||||
march: march_
|
||||
|
@ -15,34 +15,13 @@ trigger: none
|
||||
pr: none
|
||||
|
||||
jobs:
|
||||
- job: Amd
|
||||
- job: Build
|
||||
pool: sonicbld
|
||||
timeoutInMinutes: 5
|
||||
steps:
|
||||
- checkout: none
|
||||
- script: |
|
||||
df -h
|
||||
set -xe
|
||||
sudo find /nfs/dpkg_cache/ -name *.tgz -mtime +30 -type f -delete
|
||||
df -h
|
||||
displayName: clean dpkg cache
|
||||
- job: Armhf
|
||||
pool: sonicbld-armhf
|
||||
timeoutInMinutes: 5
|
||||
steps:
|
||||
- checkout: none
|
||||
- script: |
|
||||
df -h
|
||||
sudo find /nfs/dpkg_cache/ -name *.tgz -mtime +30 -type f -delete
|
||||
df -h
|
||||
displayName: clean dpkg cache
|
||||
- job: Arm64
|
||||
pool: sonicbld-arm64
|
||||
timeoutInMinutes: 5
|
||||
steps:
|
||||
- checkout: none
|
||||
- script: |
|
||||
df -h
|
||||
sudo find /nfs/dpkg_cache/ -name *.tgz -mtime +30 -type f -delete
|
||||
df -h
|
||||
displayName: clean dpkg cache
|
||||
|
||||
|
@ -11,14 +11,6 @@ schedules:
|
||||
- master
|
||||
- 202012
|
||||
|
||||
resources:
|
||||
repositories:
|
||||
- repository: buildimage
|
||||
type: github
|
||||
name: sonic-net/sonic-buildimage
|
||||
ref: master
|
||||
endpoint: sonic-net
|
||||
|
||||
trigger: none
|
||||
pr: none
|
||||
|
||||
@ -28,8 +20,7 @@ stages:
|
||||
variables:
|
||||
- name: CACHE_MODE
|
||||
value: cache
|
||||
- template: .azure-pipelines/azure-pipelines-repd-build-variables.yml@buildimage
|
||||
- template: .azure-pipelines/template-variables.yml@buildimage
|
||||
- template: azure-pipelines-repd-build-variables.yml
|
||||
jobs:
|
||||
- template: azure-pipelines-build.yml
|
||||
parameters:
|
||||
@ -39,6 +30,7 @@ stages:
|
||||
- name: broadcom
|
||||
variables:
|
||||
swi_image: yes
|
||||
raw_image: yes
|
||||
docker_syncd_rpc_image: yes
|
||||
platform_rpc: brcm
|
||||
- name: mellanox
|
||||
|
51
.azure-pipelines/official-build-multi-asic.yml
Normal file
51
.azure-pipelines/official-build-multi-asic.yml
Normal file
@ -0,0 +1,51 @@
|
||||
# Starter pipeline
|
||||
# Start with a minimal pipeline that you can customize to build and deploy your code.
|
||||
# Add steps that build, run tests, deploy, and more:
|
||||
# https://aka.ms/yaml
|
||||
|
||||
schedules:
|
||||
- cron: "0 18 * * Sun,Wed,Fri"
|
||||
displayName: "Alternate day build"
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
always: true
|
||||
|
||||
trigger: none
|
||||
pr: none
|
||||
|
||||
stages:
|
||||
- stage: Build
|
||||
pool: sonicbld
|
||||
variables:
|
||||
- template: azure-pipelines-repd-build-variables.yml
|
||||
- name: CACHE_MODE
|
||||
value: wcache
|
||||
jobs:
|
||||
- template: azure-pipelines-build.yml
|
||||
parameters:
|
||||
buildOptions: 'USERNAME=admin SONIC_BUILD_JOBS=$(nproc) BUILD_MULTIASIC_KVM=y ${{ variables.VERSION_CONTROL_OPTIONS }}'
|
||||
jobGroups:
|
||||
- name: vs
|
||||
|
||||
- stage: Test
|
||||
variables:
|
||||
- name: inventory
|
||||
value: veos_vtb
|
||||
- name: testbed_file
|
||||
value: vtestbed.csv
|
||||
|
||||
jobs:
|
||||
- job:
|
||||
pool: sonictest-ma
|
||||
displayName: "kvmtest-multi-asic-t1-lag"
|
||||
timeoutInMinutes: 240
|
||||
|
||||
steps:
|
||||
- template: run-test-template.yml
|
||||
parameters:
|
||||
dut: vlab-08
|
||||
tbname: vms-kvm-four-asic-t1-lag
|
||||
ptf_name: ptf_vms6-4
|
||||
tbtype: multi-asic-t1-lag
|
||||
image: sonic-4asic-vs.img.gz
|
@ -11,16 +11,12 @@ parameters:
|
||||
default: 36000
|
||||
|
||||
- name: MIN_WORKER
|
||||
type: string
|
||||
type: number
|
||||
default: 1
|
||||
|
||||
- name: MAX_WORKER
|
||||
type: string
|
||||
default: 1
|
||||
|
||||
- name: NUM_ASIC
|
||||
type: number
|
||||
default: 1
|
||||
default: 2
|
||||
|
||||
- name: TEST_SET
|
||||
type: string
|
||||
@ -38,103 +34,39 @@ parameters:
|
||||
type: string
|
||||
default: "ceos"
|
||||
|
||||
- name: TESTBED_NAME
|
||||
- name: SPECIFIED_PARAMS
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
- name: IMAGE_URL
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
- name: HWSKU
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
- name: TEST_PLAN_TYPE
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
- name: PLATFORM
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
- name: SCRIPTS
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
- name: FEATURES
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
- name: SCRIPTS_EXCLUDE
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
- name: FEATURES_EXCLUDE
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
- name: REPO_NAME
|
||||
type: string
|
||||
default: ""
|
||||
default: "{}"
|
||||
|
||||
- name: MGMT_BRANCH
|
||||
type: string
|
||||
default: ""
|
||||
default: master
|
||||
|
||||
- name: STOP_ON_FAILURE
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
- name: RETRY_TIMES
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
- name: DUMP_KVM_IF_FAIL
|
||||
type: string
|
||||
default: "True"
|
||||
values:
|
||||
- "True"
|
||||
- "False"
|
||||
|
||||
- name: REQUESTER
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
- name: MAX_RUN_TEST_MINUTES
|
||||
- name: NUM_ASIC
|
||||
type: number
|
||||
default: 480
|
||||
|
||||
default: 1
|
||||
|
||||
steps:
|
||||
- script: |
|
||||
set -e
|
||||
set -ex
|
||||
wget -O ./.azure-pipelines/test_plan.py https://raw.githubusercontent.com/sonic-net/sonic-mgmt/master/.azure-pipelines/test_plan.py
|
||||
wget -O ./.azure-pipelines/pr_test_scripts.yaml https://raw.githubusercontent.com/sonic-net/sonic-mgmt/master/.azure-pipelines/pr_test_scripts.yaml
|
||||
displayName: Download test plan scripts
|
||||
displayName: Download TestbedV2 scripts
|
||||
|
||||
- script: |
|
||||
set -e
|
||||
set -ex
|
||||
pip install PyYAML
|
||||
rm -f new_test_plan_id.txt
|
||||
|
||||
python ./.azure-pipelines/test_plan.py create \
|
||||
-t ${{ parameters.TOPOLOGY }} \
|
||||
-o new_test_plan_id.txt \
|
||||
--min-worker ${{ parameters.MIN_WORKER }} \
|
||||
--max-worker ${{ parameters.MAX_WORKER }} \
|
||||
--test-set ${{ parameters.TEST_SET }} \
|
||||
--kvm-build-id $(KVM_BUILD_ID) \
|
||||
--deploy-mg-extra-params "${{ parameters.DEPLOY_MG_EXTRA_PARAMS }}" \
|
||||
--common-extra-params "${{ parameters.COMMON_EXTRA_PARAMS }}" \
|
||||
--mgmt-branch ${{ parameters.MGMT_BRANCH }} \
|
||||
--vm-type ${{ parameters.VM_TYPE }} \
|
||||
python ./.azure-pipelines/test_plan.py create -t ${{ parameters.TOPOLOGY }} -o new_test_plan_id.txt \
|
||||
--min-worker ${{ parameters.MIN_WORKER }} --max-worker ${{ parameters.MAX_WORKER }} \
|
||||
--test-set ${{ parameters.TEST_SET }} --kvm-build-id $(KVM_BUILD_ID) \
|
||||
--deploy-mg-extra-params "${{ parameters.DEPLOY_MG_EXTRA_PARAMS }}" --common-extra-params "${{ parameters.COMMON_EXTRA_PARAMS }}" \
|
||||
--mgmt-branch ${{ parameters.MGMT_BRANCH }} --vm-type ${{ parameters.VM_TYPE }} --specified-params "${{ parameters.SPECIFIED_PARAMS }}" \
|
||||
--num-asic ${{ parameters.NUM_ASIC }}
|
||||
|
||||
TEST_PLAN_ID=`cat new_test_plan_id.txt`
|
||||
|
||||
echo "Created test plan $TEST_PLAN_ID"
|
||||
echo "Check $(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID for test plan status"
|
||||
echo "Check https://www.testbed-tools.org/scheduler/testplan/$TEST_PLAN_ID for test plan status"
|
||||
echo "##vso[task.setvariable variable=TEST_PLAN_ID]$TEST_PLAN_ID"
|
||||
env:
|
||||
TESTBED_TOOLS_URL: $(TESTBED_TOOLS_URL)
|
||||
@ -144,55 +76,58 @@ steps:
|
||||
displayName: Trigger test
|
||||
|
||||
- script: |
|
||||
set -e
|
||||
set -ex
|
||||
echo "Lock testbed"
|
||||
echo "SONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com"
|
||||
echo "Runtime detailed progress at $(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID "
|
||||
echo "TestbedV2 is just online and might not be stable enough, for any issue, please send email to sonictestbedtools@microsoft.com"
|
||||
echo "Runtime detailed progress at https://www.testbed-tools.org/scheduler/testplan/$TEST_PLAN_ID"
|
||||
# When "LOCK_TESTBED" finish, it changes into "PREPARE_TESTBED"
|
||||
python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --expected-state LOCK_TESTBED
|
||||
python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --timeout 43200 --expected-states PREPARE_TESTBED EXECUTING KVMDUMP FINISHED CANCELLED FAILED
|
||||
env:
|
||||
TESTBED_TOOLS_URL: $(TESTBED_TOOLS_URL)
|
||||
displayName: Lock testbed
|
||||
timeoutInMinutes: 240
|
||||
|
||||
- script: |
|
||||
set -e
|
||||
set -ex
|
||||
echo "Prepare testbed"
|
||||
echo "Preparing the testbed(add-topo, deploy-mg) may take 15-30 minutes. Before the testbed is ready, the progress of the test plan keeps displayed as 0, please be patient"
|
||||
echo "SONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com"
|
||||
echo "Runtime detailed progress at $(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID "
|
||||
echo "Preparing the testbed(add-topo, deploy-mg) may take 15-30 minutes. Before the testbed is ready, the progress of the test plan keeps displayed as 0, please be patient(We will improve the indication in a short time)"
|
||||
echo "If the progress keeps as 0 for more than 1 hour, please cancel and retry this pipeline"
|
||||
echo "TestbedV2 is just online and might not be stable enough, for any issue, please send email to sonictestbedtools@microsoft.com"
|
||||
echo "Runtime detailed progress at https://www.testbed-tools.org/scheduler/testplan/$TEST_PLAN_ID"
|
||||
# When "PREPARE_TESTBED" finish, it changes into "EXECUTING"
|
||||
python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --expected-state PREPARE_TESTBED
|
||||
python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --timeout 2400 --expected-states EXECUTING KVMDUMP FINISHED CANCELLED FAILED
|
||||
env:
|
||||
TESTBED_TOOLS_URL: $(TESTBED_TOOLS_URL)
|
||||
displayName: Prepare testbed
|
||||
timeoutInMinutes: 40
|
||||
|
||||
- script: |
|
||||
set -e
|
||||
set -ex
|
||||
echo "Run test"
|
||||
echo "SONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com"
|
||||
echo "Runtime detailed progress at $(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID "
|
||||
echo "TestbedV2 is just online and might not be stable enough, for any issue, please send email to sonictestbedtools@microsoft.com"
|
||||
echo "Runtime detailed progress at https://www.testbed-tools.org/scheduler/testplan/$TEST_PLAN_ID"
|
||||
# When "EXECUTING" finish, it changes into "KVMDUMP", "FAILED", "CANCELLED" or "FINISHED"
|
||||
python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --expected-state EXECUTING
|
||||
python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --timeout 18000 --expected-states KVMDUMP FINISHED CANCELLED FAILED
|
||||
env:
|
||||
TESTBED_TOOLS_URL: $(TESTBED_TOOLS_URL)
|
||||
displayName: Run test
|
||||
timeoutInMinutes: ${{ parameters.MAX_RUN_TEST_MINUTES }}
|
||||
|
||||
- ${{ if eq(parameters.DUMP_KVM_IF_FAIL, 'True') }}:
|
||||
- script: |
|
||||
set -e
|
||||
echo "KVM dump"
|
||||
echo "SONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com"
|
||||
echo "Runtime detailed progress at $(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID "
|
||||
# When "KVMDUMP" finish, it changes into "FAILED", "CANCELLED" or "FINISHED"
|
||||
python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --expected-state KVMDUMP
|
||||
condition: succeededOrFailed()
|
||||
env:
|
||||
TESTBED_TOOLS_URL: $(TESTBED_TOOLS_URL)
|
||||
displayName: KVM dump
|
||||
timeoutInMinutes: 300
|
||||
|
||||
- script: |
|
||||
set -e
|
||||
set -ex
|
||||
echo "KVM dump"
|
||||
echo "TestbedV2 is just online and might not be stable enough, for any issue, please send email to sonictestbedtools@microsoft.com"
|
||||
echo "Runtime detailed progress at https://www.testbed-tools.org/scheduler/testplan/$TEST_PLAN_ID"
|
||||
# When "KVMDUMP" finish, it changes into "FAILED", "CANCELLED" or "FINISHED"
|
||||
python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --timeout 43200 --expected-states FINISHED CANCELLED FAILED
|
||||
condition: succeededOrFailed()
|
||||
env:
|
||||
TESTBED_TOOLS_URL: $(TESTBED_TOOLS_URL)
|
||||
displayName: KVM dump
|
||||
timeoutInMinutes: 20
|
||||
|
||||
- script: |
|
||||
set -ex
|
||||
echo "Try to cancel test plan $TEST_PLAN_ID, cancelling finished test plan has no effect."
|
||||
python ./.azure-pipelines/test_plan.py cancel -i "$(TEST_PLAN_ID)"
|
||||
condition: always()
|
||||
|
@ -1,24 +0,0 @@
|
||||
steps:
|
||||
- bash: |
|
||||
(
|
||||
while true
|
||||
do
|
||||
sleep 120
|
||||
now=$(date +%s)
|
||||
pids=$(ps -C docker -o pid,etime,args | grep "docker build" | cut -d" " -f2)
|
||||
for pid in $pids
|
||||
do
|
||||
start=$(date --date="$(ls -dl /proc/$pid --time-style full-iso | awk '{print$6,$7}')" +%s)
|
||||
time_s=$(($now-$start))
|
||||
if [[ $time_s -gt $(DOCKER_BUILD_TIMEOUT) ]]; then
|
||||
echo =========== $(date +%F%T) $time_s &>> target/daemon.log
|
||||
ps $pid &>> target/daemon.log
|
||||
sudo kill $pid
|
||||
fi
|
||||
done
|
||||
done
|
||||
) &
|
||||
daemon_pid=$!
|
||||
ps $daemon_pid
|
||||
echo $daemon_pid >> /tmp/azp_daemon_kill_docker_pid
|
||||
displayName: start daemon to kill hang docker
|
@ -2,18 +2,7 @@ steps:
|
||||
- ${{ if eq(variables['Build.Reason'], 'PullRequest') }}:
|
||||
- script: |
|
||||
set -ex
|
||||
tar_branch=origin/$(System.PullRequest.TargetBranchName)
|
||||
# Check if k8s master entrance script is changed
|
||||
K8S_MASTER_CHANGED=NO
|
||||
if git diff $tar_branch..HEAD --name-only | grep -F files/image_config/kubernetes/kubernetes_master_entrance.sh; then
|
||||
K8S_MASTER_CHANGED=YES
|
||||
fi
|
||||
set +x
|
||||
echo "##vso[task.setvariable variable=K8S_MASTER_CHANGED;]$K8S_MASTER_CHANGED"
|
||||
displayName: "Check if k8s master image build is needed."
|
||||
- script: |
|
||||
set -ex
|
||||
tar_branch=origin/$(System.PullRequest.TargetBranchName)
|
||||
tar_branch=origin/$(System.PullRequest.TargetBranch)
|
||||
git diff $tar_branch..HEAD --name-only | grep -v -f .azure-pipelines/vstest-exclude && exit 0
|
||||
git diff $tar_branch..HEAD --name-only | grep -f .azure-pipelines/vstest-include && exit 0
|
||||
set +x
|
||||
|
@ -2,8 +2,3 @@ variables:
|
||||
DEFAULT_CONTAINER_REGISTRY: 'publicmirror.azurecr.io'
|
||||
COMMON_LIB_BUILD_ENVS: 'bullseye'
|
||||
SONIC_SLAVE_DOCKER_DRIVER: 'overlay2'
|
||||
SONIC_BUILD_RETRY_COUNT: 3
|
||||
SONIC_BUILD_RETRY_INTERVAL: 600
|
||||
DOCKER_BUILDKIT: 0
|
||||
DOCKER_BUILD_TIMEOUT: 3600
|
||||
MIRROR_SNAPSHOT: y
|
||||
|
@ -1,3 +1,4 @@
|
||||
^platform
|
||||
^.azure-pipelines
|
||||
^files/build/versions
|
||||
^sonic-slave-
|
||||
|
87
.github/CODEOWNERS
vendored
87
.github/CODEOWNERS
vendored
@ -19,9 +19,9 @@
|
||||
/Makefile.work @qiluo-msft @xumia @lguohan
|
||||
/slave.mk @qiluo-msft @xumia @lguohan
|
||||
/scripts @qiluo-msft @xumia @lguohan
|
||||
/src/sonic-build-hooks/ @sonic-net/sonic-build
|
||||
/src/debootstrap/ @sonic-net/sonic-build
|
||||
/src/sonic-fips/ @sonic-net/sonic-build
|
||||
/src/sonic-build-hooks/ @Azure/sonic-build
|
||||
/src/debootstrap/ @Azure/sonic-build
|
||||
/src/sonic-fips/ @Azure/sonic-build
|
||||
|
||||
# installer
|
||||
/installer/ @qiluo-msft
|
||||
@ -35,77 +35,76 @@
|
||||
/dockers/docker-snmp/ @qiluo-msft
|
||||
|
||||
# kernel
|
||||
/src/sonic-linux-kernel/ @sonic-net/sonic-kernel
|
||||
/src/sonic-linux-kernel/ @Azure/sonic-kernel
|
||||
|
||||
# devices
|
||||
/device/ @sonic-net/sonic-platform
|
||||
/src/sonic-platform-common/ @sonic-net/sonic-platform
|
||||
/src/sonic-platform-daemons/ @sonic-net/sonic-platform
|
||||
/src/sonic-platform-pde/ @sonic-net/sonic-platform
|
||||
/src/lm-sensors/ @sonic-net/sonic-platform
|
||||
/src/flashrom/ @sonic-net/sonic-platform
|
||||
/device/ @Azure/sonic-platform
|
||||
/src/sonic-platform-common/ @Azure/sonic-platform
|
||||
/src/sonic-platform-daemons/ @Azure/sonic-platform
|
||||
/src/sonic-platform-pde/ @Azure/sonic-platform
|
||||
/src/lm-sensors/ @Azure/sonic-platform
|
||||
/src/flashrom/ @Azure/sonic-platform
|
||||
|
||||
# common library
|
||||
/src/initramfs-tools/ @qiluo-msft
|
||||
/src/redis-dump-load/ @sonic-net/sonic-management
|
||||
/src/sonic-py-common/ @sonic-net/sonic-management
|
||||
/src/sonic-py-swsssdk/ @sonic-net/sonic-management
|
||||
/src/sonic-swss-common/ @sonic-net/sonic-management
|
||||
/src/bash/ @sonic-net/sonic-management
|
||||
/src/tacacs/ @sonic-net/sonic-management
|
||||
/src/radius/ @sonic-net/sonic-management
|
||||
/src/swig/ @sonic-net/sonic-management
|
||||
/src/socat/ @sonic-net/sonic-management
|
||||
/src/redis-dump-load/ @Azure/sonic-management
|
||||
/src/sonic-py-common/ @Azure/sonic-management
|
||||
/src/sonic-py-swsssdk/ @Azure/sonic-management
|
||||
/src/sonic-swss-common/ @Azure/sonic-management
|
||||
/src/bash/ @Azure/sonic-management
|
||||
/src/tacacs/ @Azure/sonic-management
|
||||
/src/radius/ @Azure/sonic-management
|
||||
/src/swig/ @Azure/sonic-management
|
||||
/src/socat/ @Azure/sonic-management
|
||||
|
||||
# redis
|
||||
/src/redis/ @sonic-net/sonic-management
|
||||
/src/hiredis/ @sonic-net/sonic-management
|
||||
/src/redis/ @Azure/sonic-management
|
||||
/src/hiredis/ @Azure/sonic-management
|
||||
|
||||
# yang
|
||||
/src/sonic-yang-models/ @praveen-li @dgsudharsan @rathnasabapathyv @venkatmahalingam @qiluo-msft
|
||||
/src/sonic-yang-mgmt/ @sonet-net/sonic-management
|
||||
/src/libyang/ @sonic-net/sonic-management
|
||||
/src/libyang1/ @sonic-net/sonic-management
|
||||
/src/libyang2/ @sonic-net/sonic-management
|
||||
/src/sonic-yang-mgmt/ @Azure/sonic-management
|
||||
/src/libyang/ @Azure/sonic-management
|
||||
/src/libyang1/ @Azure/sonic-management
|
||||
/src/libyang2/ @Azure/sonic-management
|
||||
|
||||
# bgpcfgd
|
||||
/src/sonic-bgpcfgd/ @StormLiangMS
|
||||
|
||||
# sonic-config-engine
|
||||
/src/sonic-config-engine/ @sonic-net/sonic-management
|
||||
/src/sonic-config-engine/ @Azure/sonic-management
|
||||
|
||||
# sonic-utilities
|
||||
/src/sonic-utilities/ @sonic-net/sonic-management
|
||||
/src/sonic-utilities/ @Azure/sonic-management
|
||||
|
||||
# sonic-telemetry
|
||||
/dockers/docker-sonic-telemetry @sonic-net/sonic-management
|
||||
/src/sonic-telemetry/ @sonic-net/sonic-management
|
||||
/src/sonic-telemetry/ @Azure/sonic-management
|
||||
|
||||
# snmp
|
||||
/src/sonic-snmpagent/ @sonic-net/sonic-management
|
||||
/src/snmpd/ @sonic-net/sonic-management
|
||||
/src/sonic-snmpagent/ @Azure/sonic-management
|
||||
/src/snmpd/ @Azure/sonic-management
|
||||
|
||||
# dhcp relay
|
||||
/src/dhcp6relay/ @sonic-net/sonic-fundamentals
|
||||
/src/dhcpmon/ @sonic-net/sonic-fundamentals
|
||||
/src/isc-dhcp/ @sonic-net/sonic-fundamentals
|
||||
/src/dhcp6relay/ @Azure/sonic-fundamentals
|
||||
/src/dhcpmon/ @Azure/sonic-fundamentals
|
||||
/src/isc-dhcp/ @Azure/sonic-fundamentals
|
||||
|
||||
# sflow
|
||||
/src/sflow/ @sonic-net/sonic-dataplane
|
||||
/src/sflow/ @Azure/sonic-dataplane
|
||||
|
||||
# sonic restapi
|
||||
/src/sonic-restapi/ @sonic-net/sonic-dataplane
|
||||
/src/sonic-restapi/ @Azure/sonic-dataplane
|
||||
|
||||
# sonic swss
|
||||
/src/sonic-swss/ @sonic-net/sonic-dataplane
|
||||
/src/sonic-swss/ @Azure/sonic-dataplane
|
||||
|
||||
# linux networking, e.g., libnl3, iproute2, ifupdown2, ethtool
|
||||
/src/libnl3/ @sonic-net/sonic-dataplane
|
||||
/src/iproute2/ @sonic-net/sonic-dataplane
|
||||
/src/ifupdown2/ @sonic-net/sonic-dataplane
|
||||
/src/ethtool/ @sonic-net/sonic-dataplane
|
||||
/src/libnl3/ @Azure/sonic-dataplane
|
||||
/src/iproute2/ @Azure/sonic-dataplane
|
||||
/src/ifupdown2/ @Azure/sonic-dataplane
|
||||
/src/ethtool/ @Azure/sonic-dataplane
|
||||
|
||||
# ptf
|
||||
/src/ptf/ @sonic-net/sonic-fundamentals
|
||||
/src/ptf-py3/ @sonic-net/sonic-fundamentals
|
||||
/src/scapy/ @sonic-net/sonic-fundamentals
|
||||
/src/ptf/ @Azure/sonic-fundamentals
|
||||
/src/ptf-py3/ @Azure/sonic-fundamentals
|
||||
/src/scapy/ @Azure/sonic-fundamentals
|
||||
|
24
.github/pull_request_template.md
vendored
24
.github/pull_request_template.md
vendored
@ -12,17 +12,10 @@
|
||||
|
||||
#### Why I did it
|
||||
|
||||
##### Work item tracking
|
||||
- Microsoft ADO **(number only)**:
|
||||
|
||||
#### How I did it
|
||||
|
||||
#### How to verify it
|
||||
|
||||
<!--
|
||||
If PR needs to be backported, then the PR must be tested against the base branch and the earliest backport release branch and provide tested image version on these two branches. For example, if the PR is requested for master, 202211 and 202012, then the requester needs to provide test results on master and 202012.
|
||||
-->
|
||||
|
||||
#### Which release branch to backport (provide reason below if selected)
|
||||
|
||||
<!--
|
||||
@ -39,19 +32,6 @@ If PR needs to be backported, then the PR must be tested against the base branch
|
||||
- [ ] 202106
|
||||
- [ ] 202111
|
||||
- [ ] 202205
|
||||
- [ ] 202211
|
||||
- [ ] 202305
|
||||
|
||||
#### Tested branch (Please provide the tested image version)
|
||||
|
||||
<!--
|
||||
- Please provide tested image version
|
||||
- e.g.
|
||||
- [x] 20201231.100
|
||||
-->
|
||||
|
||||
- [ ] <!-- image version 1 -->
|
||||
- [ ] <!-- image version 2 -->
|
||||
|
||||
#### Description for the changelog
|
||||
<!--
|
||||
@ -59,9 +39,7 @@ Write a short (one line) summary that describes the changes in this
|
||||
pull request for inclusion in the changelog:
|
||||
-->
|
||||
|
||||
<!--
|
||||
Ensure to add label/tag for the feature raised. example - PR#2174 under sonic-utilities repo. where, Generic Config and Update feature has been labelled as GCU.
|
||||
-->
|
||||
#### Ensure to add label/tag for the feature raised. example - PR#2174 under sonic-utilities repo. where, Generic Config and Update feature has been labelled as GCU.
|
||||
|
||||
#### Link to config_db schema for YANG module changes
|
||||
<!--
|
||||
|
3
.github/workflows/automerge.yml
vendored
3
.github/workflows/automerge.yml
vendored
@ -9,8 +9,7 @@ on:
|
||||
|
||||
jobs:
|
||||
automerge:
|
||||
# Disable workflow. Use automerge_scan instead to have the same behaviour.
|
||||
if: github.event.check_suite.app.name == 'Azure Pipelines' && github.event.check_suite.conclusion == 'success' && false
|
||||
if: github.event.check_suite.app.name == 'Azure Pipelines' && github.event.check_suite.conclusion == 'success'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: automerge
|
||||
|
97
.github/workflows/automerge_scan.yml
vendored
97
.github/workflows/automerge_scan.yml
vendored
@ -1,97 +0,0 @@
|
||||
name: AutoMergeScan
|
||||
on:
|
||||
schedule:
|
||||
- cron: '31 */2 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
automerge_scan:
|
||||
# Disable workflow, and use pipeline instead to have same logic with other automation
|
||||
if: github.repository_owner == 'sonic-net' && false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Debug
|
||||
env:
|
||||
TOKEN: ${{ secrets.TOKEN }}
|
||||
run: |
|
||||
set -e
|
||||
|
||||
echo ${TOKEN} | gh auth login --with-token
|
||||
gh pr list -R sonic-net/sonic-buildimage -A mssonicbld -L 100 -S "label:automerge" --json url,body,title,createdAt,labels,statusCheckRollup > prs.log
|
||||
cat prs.log | jq
|
||||
- name: Main
|
||||
run: |
|
||||
set -e
|
||||
|
||||
# PR merge run per 2 hours
|
||||
# Other operation run per day.
|
||||
# Cherry pick PR:
|
||||
# more than 3 days, comment @author to check
|
||||
# more than 10 days, stop comment.
|
||||
# more than 28 days, comment @author PR will be closed
|
||||
# more than 30 days, close PR
|
||||
date_3d_ago=$(date --date "3 day ago" -u +"%FT%TZ")
|
||||
date_10d_ago=$(date --date "10 day ago" -u +"%FT%TZ")
|
||||
date_28d_ago=$(date --date "28 day ago" -u +"%FT%TZ")
|
||||
date_30d_ago=$(date --date "30 day ago" -u +"%FT%TZ")
|
||||
date_now=$(date -u +"%T")
|
||||
operate=false
|
||||
[[ "$date_now" < "02:00:00" ]] && operate=true
|
||||
|
||||
count=$(cat prs.log | jq 'length')
|
||||
for ((i=0;i<$count;i++))
|
||||
do
|
||||
url=$(cat prs.log | jq -r ".[$i].url")
|
||||
body=$(cat prs.log | jq -r ".[$i].body")
|
||||
title=$(cat prs.log | jq -r ".[$i].title")
|
||||
origin_pr_id=$(echo $title | grep -Eo "\[action\] \[PR:[0-9]*\]" | grep -Eo [0-9]* || true)
|
||||
created_at=$(cat prs.log | jq -r ".[$i].createdAt")
|
||||
echo PR: $(($i+1))/$count, URL: $url, origin PR: $origin_pr_id, createdAt: $created_at, operate: $operate
|
||||
[[ "$url" == "" ]] && continue
|
||||
[[ $created_at > $(date --date "1 hour ago" -u +"%FT%TZ") ]] && continue
|
||||
|
||||
checks=$(cat prs.log | jq ".[$i].statusCheckRollup")
|
||||
checks_count=$(echo $checks | jq 'length')
|
||||
pr_success=true
|
||||
for ((j=0;j<$checks_count;j++))
|
||||
do
|
||||
check=$(echo $checks | jq ".[$j]")
|
||||
status=$(echo $check | jq -r '.status')
|
||||
conclusion=$(echo $check | jq -r '.conclusion')
|
||||
name=$(echo $check | jq -r '.name')
|
||||
|
||||
# EasyCLA success flag: state=SUCCESS
|
||||
# Others success flag: conclusion in SUCCESS,NEUTRAL
|
||||
# only check Azure.sonic-buildimage currently
|
||||
echo "$name" | grep -v "Azure.sonic-buildimage" > /dev/null && continue
|
||||
[[ "$status" != "COMPLETED" ]] && echo "$name: $status" && continue 2
|
||||
|
||||
success=true
|
||||
( [[ "$conclusion" == "FAILURE" ]] || [[ "$conclusion" == "CANCELLED" ]] ) && success=false && pr_success=false
|
||||
! $success && echo "FAIL: $name"
|
||||
done
|
||||
|
||||
# rerun Azure.sonic-buildimage per day
|
||||
! $pr_success && $operate && gh pr comment $url --body "/azp run Azure.sonic-buildimage"
|
||||
|
||||
# If auto cherry pick PRs failed, comment in original PR and close cherry pick PR
|
||||
if [ -n "$origin_pr_id" ] && [[ $created_at < $date_3d_ago ]] && ! $pr_success;then
|
||||
origin_pr_url=https://github.com/sonic-net/sonic-buildimage/pull/$origin_pr_id
|
||||
author=$(gh pr view $origin_pr_url --json author | jq .author.login -r)
|
||||
echo "Original author will check."
|
||||
$operate && [[ $created_at > $date_10d_ago ]] && gh pr comment $origin_pr_url --body "@$author cherry pick PR didn't pass PR checker. Please check!!!<br>$url"
|
||||
$operate && [[ $created_at < $date_28d_ago ]] && gh pr comment $origin_pr_url --body "@$author cherry pick PR didn't pass PR checker. Please check!!! Auto cherry pick PR will be closed in 2 days.<br>$url"
|
||||
$operate && [[ $created_at < $date_30d_ago ]] && echo "$url Closed" && gh pr close $url
|
||||
fi
|
||||
|
||||
! $pr_success && continue
|
||||
# merge the PR
|
||||
echo ========Merging PR========
|
||||
if echo $title | grep "^\[submodule\]";then
|
||||
gh pr merge --squash --admin -R sonic-net/sonic-buildimage $url -b "$body" || true
|
||||
else
|
||||
gh pr merge --rebase --admin -R sonic-net/sonic-buildimage $url || true
|
||||
fi
|
||||
echo ========Finished PR========
|
||||
done
|
||||
|
26
.github/workflows/build-broadcom.yml
vendored
26
.github/workflows/build-broadcom.yml
vendored
@ -1,26 +0,0 @@
|
||||
name: Build broadcom ONEIMAGE
|
||||
run-name: ${{ gitea.actor }} is building SONiC for broadcom
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
|
||||
|
||||
jobs:
|
||||
Configure-Build-Package-Action:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: echo "Triggered ${{ gitea.event_name }}"
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- run: pip install j2cli
|
||||
- name: Init Package
|
||||
run: make init
|
||||
- name: Configure Package
|
||||
run: make configure PLATFORM=broadcom
|
||||
- name: Compile Image
|
||||
run: make target/sonic-broadcom.bin
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: |
|
||||
*
|
1
.github/workflows/codeql-analysis.yml
vendored
1
.github/workflows/codeql-analysis.yml
vendored
@ -14,7 +14,6 @@ on:
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
if: github.repository_owner == 'sonic-net'
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
|
5
.github/workflows/label.yml
vendored
5
.github/workflows/label.yml
vendored
@ -12,13 +12,14 @@ on:
|
||||
- opened
|
||||
- reopened
|
||||
branches:
|
||||
- master
|
||||
- '202012'
|
||||
- '202[1-9][0-9][0-9]'
|
||||
- '20[3-9][0-9][0-9][0-9]'
|
||||
paths:
|
||||
- 'files/build/versions/**'
|
||||
|
||||
jobs:
|
||||
label:
|
||||
if: github.repository_owner == 'sonic-net'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@main
|
||||
|
5
.github/workflows/pr_cherrypick_poststep.yml
vendored
5
.github/workflows/pr_cherrypick_poststep.yml
vendored
@ -8,8 +8,7 @@ on:
|
||||
|
||||
jobs:
|
||||
post_cherry_pick:
|
||||
# Use azpw instead to have stable performance
|
||||
if: false && github.event.pull_request.merged == true && contains(github.event.pull_request.labels.*.name, 'automerge') && github.event.pull_request.head.user.login == 'mssonicbld' && startsWith(github.event.pull_request.title, '[action]')
|
||||
if: github.event.pull_request.merged == true && contains(github.event.pull_request.labels.*.name, 'automerge') && github.event.pull_request.head.user.login == 'mssonicbld' && startsWith(github.event.pull_request.title, '[action]')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Debug
|
||||
@ -47,4 +46,4 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
gh pr edit $origin_pr_url --add-label "Included in ${base_ref} Branch"
|
||||
gh pr edit $origin_pr_url --remove-label "Created PR to ${base_ref} Branch"
|
||||
gh pr edit $origin_pr_url --remove-label "Created PR to ${base_ref} Branch,Request for ${base_ref} Branch,Approved for ${base_ref} Branch"
|
||||
|
7
.github/workflows/pr_cherrypick_prestep.yml
vendored
7
.github/workflows/pr_cherrypick_prestep.yml
vendored
@ -9,8 +9,7 @@ on:
|
||||
|
||||
jobs:
|
||||
pre_cherry_pick:
|
||||
# Use azpw instead of workflow to have stable performance.
|
||||
if: false && github.repository_owner == 'sonic-net' && github.event.pull_request.merged == true && ( (github.event.action == 'closed' && contains(join(github.event.pull_request.labels.*.name, ','), 'Approved for 20')) || (github.event.action == 'labeled' && startsWith(github.event.label.name, 'Approved for 20')) )
|
||||
if: github.event.pull_request.merged == true && ( (github.event.action == 'closed' && contains(join(github.event.pull_request.labels.*.name, ','), 'Approved for 20')) || (github.event.action == 'labeled' && startsWith(github.event.label.name, 'Approved for 20')) )
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
@ -40,7 +39,6 @@ jobs:
|
||||
labels=$(echo $GITHUB_CONTEXT | jq -r ".event.label.name")
|
||||
fi
|
||||
title=$(echo $GITHUB_CONTEXT | jq -r ".event.pull_request.title")
|
||||
body=$(echo $GITHUB_CONTEXT | jq -r ".event.pull_request.body")
|
||||
echo =============================
|
||||
echo SHA: $sha
|
||||
echo PRID: $pr_id
|
||||
@ -52,7 +50,6 @@ jobs:
|
||||
echo ${TOKEN} | gh auth login --with-token
|
||||
echo author: $author
|
||||
echo title: $title
|
||||
echo body: "$body"
|
||||
echo =============================
|
||||
|
||||
git config user.name mssonicbld
|
||||
@ -108,7 +105,7 @@ jobs:
|
||||
else
|
||||
# Create PR to release branch
|
||||
git push mssonicbld HEAD:cherry/$branch/${pr_id} -f
|
||||
result=$(gh pr create -R ${repository} -H mssonicbld:cherry/$branch/${pr_id} -B $branch -t "[action] [PR:$pr_id] $title" -b "$body" 2>&1)
|
||||
result=$(gh pr create -R ${repository} -H mssonicbld:cherry/$branch/${pr_id} -B $branch -t "[action] [PR:$pr_id] $title" -b '' 2>&1)
|
||||
echo $result | grep "already exists" && { echo $result; return 0; }
|
||||
echo $result | grep github.com || { echo $result; return 1; }
|
||||
new_pr_rul=$(echo $result | grep github.com)
|
||||
|
1
.github/workflows/semgrep.yml
vendored
1
.github/workflows/semgrep.yml
vendored
@ -10,7 +10,6 @@ on:
|
||||
|
||||
jobs:
|
||||
semgrep:
|
||||
if: github.repository_owner == 'sonic-net'
|
||||
name: Semgrep
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
|
12
.gitignore
vendored
12
.gitignore
vendored
@ -41,9 +41,6 @@ installer/x86_64/platforms/
|
||||
# Misc. files
|
||||
asic_config_checksum
|
||||
files/Aboot/boot0
|
||||
files/dsc/MANIFEST
|
||||
files/dsc/install_debian
|
||||
files/dsc/fs.zip
|
||||
files/initramfs-tools/arista-convertfs
|
||||
files/initramfs-tools/union-mount
|
||||
|
||||
@ -66,12 +63,6 @@ platform/**/*.egg-info
|
||||
platform/**/*-none-any.whl
|
||||
platform/**/.pybuild
|
||||
platform/**/debian/*
|
||||
!platform/**/debian/control
|
||||
!platform/**/debian/rules
|
||||
!platform/**/debian/changelog
|
||||
!platform/**/debian/compat
|
||||
!platform/**/debian/*.postinst
|
||||
!platform/**/debian/*.install
|
||||
platform/**/build
|
||||
platform/**/*.ko
|
||||
platform/**/*.mod.c
|
||||
@ -108,6 +99,3 @@ htmlcov/
|
||||
# Debian mirror Sources
|
||||
sources.list.*
|
||||
!sources.list*.j2
|
||||
|
||||
# Generated mirror configs
|
||||
apt-retries-count
|
||||
|
31
.gitmodules
vendored
31
.gitmodules
vendored
@ -1,15 +1,19 @@
|
||||
[submodule "sonic-swss-common"]
|
||||
path = src/sonic-swss-common
|
||||
url = https://github.com/sonic-net/sonic-swss-common
|
||||
branch = 202211
|
||||
[submodule "sonic-linux-kernel"]
|
||||
path = src/sonic-linux-kernel
|
||||
url = https://github.com/sonic-net/sonic-linux-kernel
|
||||
branch = 202211
|
||||
[submodule "sonic-sairedis"]
|
||||
path = src/sonic-sairedis
|
||||
url = https://github.com/sonic-net/sonic-sairedis
|
||||
branch = 202211
|
||||
[submodule "sonic-swss"]
|
||||
path = src/sonic-swss
|
||||
url = https://github.com/sonic-net/sonic-swss
|
||||
branch = 202211
|
||||
[submodule "src/p4c-bm/p4c-bm"]
|
||||
path = platform/p4/p4c-bm/p4c-bm
|
||||
url = https://github.com/krambn/p4c-bm
|
||||
@ -31,22 +35,25 @@
|
||||
[submodule "src/sonic-utilities"]
|
||||
path = src/sonic-utilities
|
||||
url = https://github.com/sonic-net/sonic-utilities
|
||||
branch = 202211
|
||||
[submodule "platform/broadcom/sonic-platform-modules-arista"]
|
||||
path = platform/broadcom/sonic-platform-modules-arista
|
||||
url = https://github.com/aristanetworks/sonic
|
||||
[submodule "src/sonic-platform-common"]
|
||||
path = src/sonic-platform-common
|
||||
url = https://github.com/sonic-net/sonic-platform-common
|
||||
branch = 202211
|
||||
[submodule "src/sonic-platform-daemons"]
|
||||
path = src/sonic-platform-daemons
|
||||
url = https://github.com/sonic-net/sonic-platform-daemons
|
||||
branch = 202211
|
||||
[submodule "src/sonic-platform-pde"]
|
||||
path = src/sonic-platform-pde
|
||||
url = https://github.com/sonic-net/sonic-platform-pdk-pde
|
||||
[submodule "src/sonic-frr/frr"]
|
||||
path = src/sonic-frr/frr
|
||||
url = https://github.com/sonic-net/sonic-frr.git
|
||||
branch = frr/8.5
|
||||
branch = frr/8.2
|
||||
[submodule "platform/p4/p4-hlir/p4-hlir-v1.1"]
|
||||
path = platform/p4/p4-hlir/p4-hlir-v1.1
|
||||
url = https://github.com/p4lang/p4-hlir.git
|
||||
@ -65,9 +72,15 @@
|
||||
[submodule "src/scapy"]
|
||||
path = src/scapy
|
||||
url = https://github.com/secdev/scapy.git
|
||||
[submodule "platform/mellanox/mlnx-sai/SAI-Implementation"]
|
||||
path = platform/mellanox/mlnx-sai/SAI-Implementation
|
||||
url = https://github.com/Mellanox/SAI-Implementation
|
||||
[submodule "src/sonic-mgmt-framework"]
|
||||
path = src/sonic-mgmt-framework
|
||||
url = https://github.com/sonic-net/sonic-mgmt-framework
|
||||
[submodule "Switch-SDK-drivers"]
|
||||
path = platform/mellanox/sdk-src/sx-kernel/Switch-SDK-drivers
|
||||
url = https://github.com/Mellanox/Switch-SDK-drivers
|
||||
[submodule "src/sonic-ztp"]
|
||||
path = src/sonic-ztp
|
||||
url = https://github.com/sonic-net/sonic-ztp
|
||||
@ -91,6 +104,7 @@
|
||||
[submodule "src/linkmgrd"]
|
||||
path = src/linkmgrd
|
||||
url = https://github.com/sonic-net/sonic-linkmgrd.git
|
||||
branch = 202211
|
||||
[submodule "src/sonic-p4rt/sonic-pins"]
|
||||
path = src/sonic-p4rt/sonic-pins
|
||||
url = https://github.com/sonic-net/sonic-pins.git
|
||||
@ -106,18 +120,3 @@
|
||||
[submodule "src/sonic-gnmi"]
|
||||
path = src/sonic-gnmi
|
||||
url = https://github.com/sonic-net/sonic-gnmi.git
|
||||
[submodule "src/sonic-genl-packet"]
|
||||
path = src/sonic-genl-packet
|
||||
url = https://github.com/sonic-net/sonic-genl-packet
|
||||
[submodule "src/dhcpmon"]
|
||||
path = src/dhcpmon
|
||||
url = https://github.com/sonic-net/sonic-dhcpmon.git
|
||||
[submodule "src/sonic-dash-api"]
|
||||
path = src/sonic-dash-api
|
||||
url = https://github.com/sonic-net/sonic-dash-api.git
|
||||
[submodule "platform/marvell-arm64/mrvl-prestera"]
|
||||
path = platform/marvell-arm64/mrvl-prestera
|
||||
url = https://github.com/Marvell-switching/mrvl-prestera.git
|
||||
[submodule "platform/marvell-arm64/sonic-platform-marvell"]
|
||||
path = platform/marvell-arm64/sonic-platform-marvell
|
||||
url = https://github.com/Marvell-switching/sonic-platform-arm64.git
|
||||
|
22
LICENSE
22
LICENSE
@ -1,13 +1,15 @@
|
||||
Copyright (C) 2023 Microsoft
|
||||
Copyright (C) 2016 Microsoft
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
This program is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU General Public License
|
||||
as published by the Free Software Foundation; either version 2
|
||||
of the License, or (at your option) any later version.
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
28
Makefile
28
Makefile
@ -1,10 +1,9 @@
|
||||
# SONiC make file
|
||||
|
||||
NOJESSIE ?= 1
|
||||
NOSTRETCH ?= 1
|
||||
NOSTRETCH ?= 0
|
||||
NOBUSTER ?= 0
|
||||
NOBULLSEYE ?= 0
|
||||
NOBOOKWORM ?= 0
|
||||
|
||||
override Q := @
|
||||
ifeq ($(QUIET),n)
|
||||
@ -30,34 +29,26 @@ ifeq ($(NOBULLSEYE),0)
|
||||
BUILD_BULLSEYE=1
|
||||
endif
|
||||
|
||||
ifeq ($(NOBOOKWORM),0)
|
||||
BUILD_BOOKWORM=1
|
||||
endif
|
||||
|
||||
PLATFORM_PATH := platform/$(if $(PLATFORM),$(PLATFORM),$(CONFIGURED_PLATFORM))
|
||||
PLATFORM_CHECKOUT := platform/checkout
|
||||
PLATFORM_CHECKOUT_FILE := $(PLATFORM_CHECKOUT)/$(PLATFORM).ini
|
||||
PLATFORM_CHECKOUT_CMD := $(shell if [ -f $(PLATFORM_CHECKOUT_FILE) ]; then PLATFORM_PATH=$(PLATFORM_PATH) j2 $(PLATFORM_CHECKOUT)/template.j2 $(PLATFORM_CHECKOUT_FILE); fi)
|
||||
MAKE_WITH_RETRY := ./scripts/run_with_retry $(MAKE)
|
||||
|
||||
%::
|
||||
@echo "+++ --- Making $@ --- +++"
|
||||
ifeq ($(NOJESSIE), 0)
|
||||
$(MAKE_WITH_RETRY) EXTRA_DOCKER_TARGETS=$(notdir $@) -f Makefile.work jessie
|
||||
EXTRA_DOCKER_TARGETS=$(notdir $@) $(MAKE) -f Makefile.work jessie
|
||||
endif
|
||||
ifeq ($(NOSTRETCH), 0)
|
||||
$(MAKE_WITH_RETRY) EXTRA_DOCKER_TARGETS=$(notdir $@) BLDENV=stretch -f Makefile.work stretch
|
||||
EXTRA_DOCKER_TARGETS=$(notdir $@) BLDENV=stretch $(MAKE) -f Makefile.work stretch
|
||||
endif
|
||||
ifeq ($(NOBUSTER), 0)
|
||||
$(MAKE_WITH_RETRY) EXTRA_DOCKER_TARGETS=$(notdir $@) BLDENV=buster -f Makefile.work buster
|
||||
EXTRA_DOCKER_TARGETS=$(notdir $@) BLDENV=buster $(MAKE) -f Makefile.work buster
|
||||
endif
|
||||
ifeq ($(NOBULLSEYE), 0)
|
||||
$(MAKE_WITH_RETRY) EXTRA_DOCKER_TARGETS=$(notdir $@) BLDENV=bullseye -f Makefile.work bullseye
|
||||
BLDENV=bullseye $(MAKE) -f Makefile.work $@
|
||||
endif
|
||||
ifeq ($(NOBOOKWORM), 0)
|
||||
$(MAKE_WITH_RETRY) BLDENV=bookworm -f Makefile.work $@
|
||||
endif
|
||||
BLDENV=bookworm $(MAKE) -f Makefile.work docker-cleanup
|
||||
BLDENV=bullseye $(MAKE) -f Makefile.work docker-cleanup
|
||||
|
||||
jessie:
|
||||
@echo "+++ Making $@ +++"
|
||||
@ -77,12 +68,6 @@ ifeq ($(NOBUSTER), 0)
|
||||
$(MAKE) -f Makefile.work buster
|
||||
endif
|
||||
|
||||
bullseye:
|
||||
@echo "+++ Making $@ +++"
|
||||
ifeq ($(NOBUSTER), 0)
|
||||
$(MAKE) -f Makefile.work bullseye
|
||||
endif
|
||||
|
||||
init:
|
||||
@echo "+++ Making $@ +++"
|
||||
$(MAKE) -f Makefile.work $@
|
||||
@ -96,7 +81,6 @@ define make_work
|
||||
$(if $(BUILD_STRETCH),BLDENV=stretch $(MAKE) -f Makefile.work $@,)
|
||||
$(if $(BUILD_BUSTER),BLDENV=buster $(MAKE) -f Makefile.work $@,)
|
||||
$(if $(BUILD_BULLSEYE),BLDENV=bullseye $(MAKE) -f Makefile.work $@,)
|
||||
$(if $(BUILD_BOOKWORM),BLDENV=bookworm $(MAKE) -f Makefile.work $@,)
|
||||
endef
|
||||
|
||||
.PHONY: $(PLATFORM_PATH)
|
||||
|
@ -78,8 +78,7 @@ SONIC_COMMON_DPKG_LIST := debian/control debian/changelog debian/rules \
|
||||
SONIC_COMMON_BASE_FILES_LIST := sonic-slave-jessie/Dockerfile.j2 sonic-slave-jessie/Dockerfile.user.j2 \
|
||||
sonic-slave-stretch/Dockerfile.j2 sonic-slave-stretch/Dockerfile.user.j2 \
|
||||
sonic-slave-buster/Dockerfile.j2 sonic-slave-buster/Dockerfile.user.j2 \
|
||||
sonic-slave-bullseye/Dockerfile.j2 sonic-slave-bullseye/Dockerfile.user.j2 \
|
||||
sonic-slave-bookworm/Dockerfile.j2 sonic-slave-bookworm/Dockerfile.user.j2
|
||||
sonic-slave-bullseye/Dockerfile.j2 sonic-slave-bullseye/Dockerfile.user.j2
|
||||
|
||||
|
||||
|
||||
@ -295,11 +294,9 @@ define SAVE_INTO_CACHE
|
||||
echo "Target $(1) dependencies are modifed - global save cache skipped" >> $($(1)_DST_PATH)/$(1).log
|
||||
$(eval $(1)_CACHE_DIR := $(SONIC_DPKG_LOCAL_CACHE_DIR))
|
||||
)
|
||||
cp $($(1)_DST_PATH)/$(1).log $($(1)_DST_PATH)/$(1).cached.log
|
||||
$($(1)_CACHE_USER) tar -C $($(1)_BASE_PATH) -mczvf $($(1)_CACHE_DIR)/$(MOD_CACHE_FILE) $(2) $(addprefix $($(1)_DST_PATH)/,$($(1)_DERIVED_DEBS) $($(1)_EXTRA_DEBS) $(1).cached.log) \
|
||||
$($(1)_CACHE_USER) tar -C $($(1)_BASE_PATH) -mczvf $($(1)_CACHE_DIR)/$(MOD_CACHE_FILE) $(2) $(addprefix $($(1)_DST_PATH)/,$($(1)_DERIVED_DEBS) $($(1)_EXTRA_DEBS) ) \
|
||||
1>>$($(1)_DST_PATH)/$(1).log
|
||||
sudo chmod 777 $($(1)_CACHE_DIR)/$(MOD_CACHE_FILE)
|
||||
rm -f $($(1)_DST_PATH)/$(1).cached.log
|
||||
|
||||
echo "File $($(1)_CACHE_DIR)/$(MOD_CACHE_FILE) saved in cache " >> $($(1)_DST_PATH)/$(1).log
|
||||
echo "[ CACHE::SAVED ] $($(1)_CACHE_DIR)/$(MOD_CACHE_FILE)" >> $($(1)_DST_PATH)/$(1).log
|
||||
@ -360,22 +357,6 @@ define SAVE_CACHE
|
||||
$(if $(call CHECK_WCACHE_ENABLED,$(1)), $(call SAVE_INTO_CACHE,$(1),$(2)))
|
||||
endef
|
||||
|
||||
RFS_DEP_FILES := $(wildcard \
|
||||
$(addprefix scripts/, build_debian_base_system.sh prepare_debian_image_buildinfo.sh build_mirror_config.sh) \
|
||||
$(addprefix $(IMAGE_DISTRO_DEBS_PATH)/,$(INITRAMFS_TOOLS) $(LINUX_KERNEL)) \
|
||||
$(shell git ls-files files/initramfs-tools) \
|
||||
$(shell git ls-files files/image_config) \
|
||||
$(shell git ls-files files/apparmor) \
|
||||
$(shell git ls-files files/apt) \
|
||||
$(shell git ls-files files/sshd) \
|
||||
$(shell git ls-files files/dhcp) \
|
||||
src/sonic-build-hooks/buildinfo/trusted.gpg.d \
|
||||
platform/$(CONFIGURED_PLATFORM)/modules \
|
||||
files/docker/docker.service.conf \
|
||||
files/build_templates/default_users.json.j2 \
|
||||
files/build_scripts/generate_asic_config_checksum.py \
|
||||
files/scripts/core_cleanup.py \
|
||||
build_debian.sh onie-image.conf)
|
||||
|
||||
|
||||
# Set the target path for each target.
|
||||
@ -403,17 +384,11 @@ $(foreach pkg, $(SONIC_INSTALL_PKGS), \
|
||||
$(eval $(pkg)_DST_PATH := $(if $($(pkg)_DST_PATH), $($(pkg)_DST_PATH), $(FSROOT_PATH))) \
|
||||
$(eval $(FSROOT_PATH)/$(pkg)_TARGET := $(pkg)) )
|
||||
|
||||
$(foreach pkg, $(SONIC_RFS_TARGETS), \
|
||||
$(eval $(pkg)_DST_PATH := $(if $($(pkg)_DST_PATH), $($(pkg)_DST_PATH), $(TARGET_PATH))) \
|
||||
$(eval $(pkg)_CACHE_MODE := GIT_CONTENT_SHA) \
|
||||
$(eval $(pkg)_DEP_FLAGS := $(SONIC_COMMON_FLAGS_LIST)) \
|
||||
$(eval $(pkg)_DEP_FILES := $(SONIC_COMMON_BASE_FILES_LIST) $(RFS_DEP_FILES)) \
|
||||
$(eval $(TARGET_PATH)/$(pkg)_TARGET := $(pkg)) )
|
||||
|
||||
# define the DEP files(.dep and .smdep) and SHA files (.sha and smsha) for each target
|
||||
$(foreach pkg, $(SONIC_MAKE_DEBS) $(SONIC_DPKG_DEBS) $(SONIC_ONLINE_DEBS) $(SONIC_COPY_DEBS) \
|
||||
$(SONIC_MAKE_FILES) $(SONIC_PYTHON_STDEB_DEBS) $(SONIC_PYTHON_WHEELS) \
|
||||
$(SONIC_DOCKER_IMAGES) $(SONIC_DOCKER_DBG_IMAGES) $(SONIC_INSTALL_PKGS) $(SONIC_RFS_TARGETS), \
|
||||
$(SONIC_DOCKER_IMAGES) $(SONIC_DOCKER_DBG_IMAGES) $(SONIC_INSTALL_PKGS), \
|
||||
$(eval $(pkg)_MOD_SRC_PATH:=$(if $($(pkg)_SRC_PATH),$($(pkg)_SRC_PATH),$($(pkg)_PATH))) \
|
||||
$(eval $(pkg)_BASE_PATH:=$(if $($(pkg)_BASE_PATH),$($(pkg)_BASE_PATH),$(CURDIR))) \
|
||||
$(eval $(pkg)_DEP_FLAGS_FILE:=$($(pkg)_DST_PATH)/$(pkg).flags) \
|
||||
@ -514,7 +489,6 @@ $(eval $(call FLAGS_DEP_RULES, $(SONIC_PYTHON_STDEB_DEBS), $(PYTHON_DEBS_PATH),f
|
||||
$(eval $(call FLAGS_DEP_RULES, $(SONIC_PYTHON_WHEELS), $(PYTHON_WHEELS_PATH),flags))
|
||||
$(eval $(call FLAGS_DEP_RULES, $(SONIC_DOCKER_IMAGES) $(SONIC_DOCKER_DBG_IMAGES), $(TARGET_PATH),flags))
|
||||
$(eval $(call FLAGS_DEP_RULES, $(SONIC_INSTALL_PKGS), $(FSROOT_PATH),flags))
|
||||
$(eval $(call FLAGS_DEP_RULES, $(SONIC_RFS_TARGETS), $(TARGET_PATH),flags))
|
||||
|
||||
|
||||
|
||||
@ -590,12 +564,10 @@ ALL_DEP_FILES_LIST += $(foreach pkg,$(2), $($(filter none,$($(1)_CACHE_MODE)), \
|
||||
$(addsuffix .$(3).sha,$(addprefix $(pkg)/, $(1)))))
|
||||
$(foreach docker, $(filter $(SONIC_DOCKER_IMAGES), $(1)), \
|
||||
$(eval $(docker)_DEP_FILES+=$(wildcard files/build/versions/default/*) \
|
||||
$(wildcard files/build/versions/dockers/$(basename $(docker))/*) \
|
||||
$(foreach docker_file, $($(docker)_FILES), $(addprefix $(if $($(docker_file)_PATH), $($(docker_file)_PATH), $(FILES_PATH))/, $(docker_file))) ))
|
||||
$(wildcard files/build/versions/dockers/$(basename $(docker))/*)))
|
||||
$(foreach docker, $(filter $(SONIC_DOCKER_DBG_IMAGES), $(1)), \
|
||||
$(eval $(docker)_DEP_FILES+=$(wildcard files/build/versions/default/*) \
|
||||
$(wildcard files/build/versions/dockers/$(patsubst %-$(DBG_IMAGE_MARK).gz,%,$(docker))/*) \
|
||||
$(foreach docker_file, $($(docker)_FILES), $(addprefix $(if $($(docker_file)_PATH), $($(docker_file)_PATH), $(FILES_PATH))/, $(docker_file))) ))
|
||||
$(wildcard files/build/versions/dockers/$(patsubst %-$(DBG_IMAGE_MARK).gz,%,$(docker))/*)))
|
||||
$(addsuffix .$(3),$(addprefix $(2)/, $(1))) : $(2)/%.$(3) : \
|
||||
$(2)/%.flags $$$$($$$$*_DEP_FILES) $$$$(if $$$$($$$$*_SMDEP_FILES), $(2)/%.smdep)
|
||||
@$$(eval $$*_DEP_FILES_MODIFIED := $$? )
|
||||
@ -611,7 +583,6 @@ $(eval $(call SHA_DEP_RULES, $(SONIC_PYTHON_STDEB_DEBS), $(PYTHON_DEBS_PATH),dep
|
||||
$(eval $(call SHA_DEP_RULES, $(SONIC_PYTHON_WHEELS), $(PYTHON_WHEELS_PATH),dep))
|
||||
$(eval $(call SHA_DEP_RULES, $(SONIC_DOCKER_IMAGES) $(SONIC_DOCKER_DBG_IMAGES), $(TARGET_PATH),dep))
|
||||
$(eval $(call SHA_DEP_RULES, $(SONIC_INSTALL_PKGS), $(FSROOT_PATH),dep))
|
||||
$(eval $(call SHA_DEP_RULES, $(SONIC_RFS_TARGETS), $(TARGET_PATH),dep))
|
||||
|
||||
|
||||
|
||||
@ -645,7 +616,6 @@ SONIC_CACHE_CLEAN_TARGETS = $(addsuffix -clean,$(addprefix $(TARGET_PATH)/, \
|
||||
$(SONIC_DOCKER_IMAGES) \
|
||||
$(SONIC_DOCKER_DBG_IMAGES) \
|
||||
$(SONIC_SIMPLE_DOCKER_IMAGES) \
|
||||
$(SONIC_RFS_TARGETS) \
|
||||
$(SONIC_INSTALLERS)))
|
||||
$(SONIC_CACHE_CLEAN_TARGETS) :: $(TARGET_PATH)/%-clean : .platform
|
||||
@rm -f $($*_DEP_FLAGS_FILE) $($*_MOD_HASH_FILE) $($*_SMOD_HASH_FILE) \
|
||||
|
197
Makefile.work
197
Makefile.work
@ -5,6 +5,8 @@
|
||||
#
|
||||
# * PLATFORM: Specific platform we wish to build images for.
|
||||
# * BUILD_NUMBER: Desired version-number to pass to the building-system.
|
||||
# * ENABLE_DHCP_GRAPH_SERVICE: Enables get-graph service to fetch minigraph files
|
||||
# through http.
|
||||
# * ENABLE_ZTP: Enables zero touch provisioning.
|
||||
# * SHUTDOWN_BGP_ON_START: Sets admin-down state for all bgp peerings after restart.
|
||||
# * INCLUDE_KUBERNETES: Allows including Kubernetes
|
||||
@ -35,12 +37,6 @@
|
||||
# * ENABLE_TRANSLIB_WRITE: Enable translib write/config operations via the gNMI interface.
|
||||
# * Default: unset
|
||||
# * Values: y
|
||||
# * ENABLE_NATIVE_WRITE: Enable native write/config operations via the gNMI interface.
|
||||
# * Default: unset
|
||||
# * Values: y
|
||||
# * ENABLE_DIALOUT: Enable dialout client in telemetry.
|
||||
# * Default: unset
|
||||
# * Values: y
|
||||
# * SONIC_DPKG_CACHE_METHOD: Specifying method of obtaining the Debian packages from cache: none or cache
|
||||
# * SONIC_DPKG_CACHE_SOURCE: Debian package cache location when cache enabled for debian packages
|
||||
# * BUILD_LOG_TIMESTAMP: Set timestamp in the build log (simple/none)
|
||||
@ -54,13 +50,6 @@
|
||||
# * ENABLE_BOOTCHART: Enable SONiC bootchart
|
||||
# * Default: n
|
||||
# * Values: y,n
|
||||
# * GZ_COMPRESS_PROGRAM: Select pigz to reduce build time
|
||||
# * Default: gzip
|
||||
# * Values: pigz,gzip
|
||||
# * UNATTENDED: Don't wait for interactive input from terminal, setting this
|
||||
# * value to anything will enable it
|
||||
# * Default: unset
|
||||
# * Value: y
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
@ -70,7 +59,6 @@ USER := $(shell id -un)
|
||||
PWD := $(shell pwd)
|
||||
USER_LC := $(shell echo $(USER) | tr A-Z a-z)
|
||||
DOCKER_MACHINE := $(shell docker run --rm debian:buster uname -m)
|
||||
HOST_DOCKERD_GID := $(shell getent group docker | cut -d : -f3)
|
||||
|
||||
comma := ,
|
||||
|
||||
@ -123,9 +111,7 @@ else
|
||||
TARGET_BOOTLOADER = uboot
|
||||
endif
|
||||
|
||||
ifeq ($(BLDENV), bookworm)
|
||||
SLAVE_DIR = sonic-slave-bookworm
|
||||
else ifeq ($(BLDENV), bullseye)
|
||||
ifeq ($(BLDENV), bullseye)
|
||||
SLAVE_DIR = sonic-slave-bullseye
|
||||
else ifeq ($(BLDENV), buster)
|
||||
SLAVE_DIR = sonic-slave-buster
|
||||
@ -151,14 +137,6 @@ ifeq ($(ENABLE_DOCKER_BASE_PULL),)
|
||||
override ENABLE_DOCKER_BASE_PULL = n
|
||||
endif
|
||||
|
||||
ifeq ($(LEGACY_SONIC_MGMT_DOCKER),)
|
||||
override LEGACY_SONIC_MGMT_DOCKER = y
|
||||
endif
|
||||
|
||||
ifneq ($(GZ_COMPRESS_PROGRAM), pigz)
|
||||
override GZ_COMPRESS_PROGRAM = gzip
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIGURED_ARCH),amd64)
|
||||
SLAVE_BASE_IMAGE = $(SLAVE_DIR)
|
||||
MULTIARCH_QEMU_ENVIRON = n
|
||||
@ -184,50 +162,30 @@ DOCKER_ROOT = $(PWD)/fsroot.docker.$(BLDENV)
|
||||
|
||||
# Support FIPS feature, armhf not supported yet
|
||||
ifeq ($(PLATFORM_ARCH),armhf)
|
||||
INCLUDE_FIPS := n
|
||||
ENABLE_FIPS_FEATURE := n
|
||||
ENABLE_FIPS := n
|
||||
endif
|
||||
|
||||
# FIPS not yet available on Bookworm
|
||||
ifeq ($(BLDENV),bookworm)
|
||||
$(warning FIPS support not yet available on Bookworm)
|
||||
INCLUDE_FIPS := n
|
||||
endif
|
||||
|
||||
ifeq ($(INCLUDE_FIPS), n)
|
||||
ifeq ($(ENABLE_FIPS_FEATURE), n)
|
||||
ifeq ($(ENABLE_FIPS), y)
|
||||
$(error Cannot set fips config ENABLE_FIPS=y when INCLUDE_FIPS=n)
|
||||
$(error Cannot set fips config ENABLE_FIPS=y when ENABLE_FIPS_FEATURE=n)
|
||||
endif
|
||||
endif
|
||||
|
||||
SONIC_VERSION_CACHE := $(filter-out none,$(SONIC_VERSION_CACHE_METHOD))
|
||||
SONIC_OVERRIDE_BUILD_VARS += SONIC_VERSION_CACHE=$(SONIC_VERSION_CACHE)
|
||||
SONIC_OVERRIDE_BUILD_VARS += SONIC_VERSION_CACHE_SOURCE=$(SONIC_VERSION_CACHE_SOURCE)
|
||||
export SONIC_VERSION_CACHE SONIC_VERSION_CACHE_SOURCE
|
||||
$(shell test -d $(SONIC_VERSION_CACHE_SOURCE) || \
|
||||
mkdir -p $(SONIC_VERSION_CACHE_SOURCE) && chmod -f 777 $(SONIC_VERSION_CACHE_SOURCE) 2>/dev/null )
|
||||
|
||||
# Generate the version control build info
|
||||
$(shell \
|
||||
SONIC_VERSION_CONTROL_COMPONENTS=$(SONIC_VERSION_CONTROL_COMPONENTS) \
|
||||
TRUSTED_GPG_URLS=$(TRUSTED_GPG_URLS) \
|
||||
PACKAGE_URL_PREFIX=$(PACKAGE_URL_PREFIX) \
|
||||
DISTRO=$(BLDENV) \
|
||||
SONIC_VERSION_CACHE=$(SONIC_VERSION_CACHE) \
|
||||
SONIC_VERSION_CACHE_SOURCE=$(SONIC_VERSION_CACHE_SOURCE) \
|
||||
DBGOPT='$(DBGOPT)' \
|
||||
MIRROR_SNAPSHOT=$(MIRROR_SNAPSHOT) \
|
||||
PIP_HTTP_TIMEOUT=$(PIP_HTTP_TIMEOUT) \
|
||||
scripts/generate_buildinfo_config.sh)
|
||||
|
||||
# Generate the slave Dockerfile, and prepare build info for it
|
||||
$(shell CONFIGURED_ARCH=$(CONFIGURED_ARCH) \
|
||||
MULTIARCH_QEMU_ENVIRON=$(MULTIARCH_QEMU_ENVIRON) \
|
||||
CROSS_BUILD_ENVIRON=$(CROSS_BUILD_ENVIRON) \
|
||||
INCLUDE_FIPS=$(INCLUDE_FIPS) \
|
||||
ENABLE_FIPS_FEATURE=$(ENABLE_FIPS_FEATURE) \
|
||||
DOCKER_EXTRA_OPTS=$(DOCKER_EXTRA_OPTS) \
|
||||
DEFAULT_CONTAINER_REGISTRY=$(DEFAULT_CONTAINER_REGISTRY) \
|
||||
GZ_COMPRESS_PROGRAM=$(GZ_COMPRESS_PROGRAM) \
|
||||
j2 $(SLAVE_DIR)/Dockerfile.j2 > $(SLAVE_DIR)/Dockerfile)
|
||||
|
||||
$(shell CONFIGURED_ARCH=$(CONFIGURED_ARCH) \
|
||||
@ -235,18 +193,8 @@ $(shell CONFIGURED_ARCH=$(CONFIGURED_ARCH) \
|
||||
CROSS_BUILD_ENVIRON=$(CROSS_BUILD_ENVIRON) \
|
||||
j2 $(SLAVE_DIR)/Dockerfile.user.j2 > $(SLAVE_DIR)/Dockerfile.user)
|
||||
|
||||
ifeq ($(CROSS_BUILD_ENVIRON), y)
|
||||
$(shell SONIC_VERSION_CONTROL_COMPONENTS=$(SONIC_VERSION_CONTROL_COMPONENTS) \
|
||||
MIRROR_SNAPSHOT=$(MIRROR_SNAPSHOT) scripts/build_mirror_config.sh $(SLAVE_DIR) amd64 $(BLDENV))
|
||||
endif
|
||||
$(shell SONIC_VERSION_CONTROL_COMPONENTS=$(SONIC_VERSION_CONTROL_COMPONENTS) \
|
||||
MIRROR_SNAPSHOT=$(MIRROR_SNAPSHOT) scripts/build_mirror_config.sh $(SLAVE_DIR) $(CONFIGURED_ARCH) $(BLDENV))
|
||||
|
||||
PREPARE_DOCKER=BUILD_SLAVE=y \
|
||||
DEFAULT_CONTAINER_REGISTRY=$(DEFAULT_CONTAINER_REGISTRY) \
|
||||
SONIC_VERSION_CACHE=$(SONIC_VERSION_CACHE) \
|
||||
DBGOPT='$(DBGOPT)' \
|
||||
MIRROR_SNAPSHOT=$(MIRROR_SNAPSHOT) \
|
||||
scripts/prepare_docker_buildinfo.sh \
|
||||
$(SLAVE_BASE_IMAGE) \
|
||||
$(SLAVE_DIR)/Dockerfile \
|
||||
@ -260,7 +208,8 @@ $(shell $(PREPARE_DOCKER) )
|
||||
SLAVE_BASE_TAG = $(shell \
|
||||
cat $(SLAVE_DIR)/Dockerfile \
|
||||
$(SLAVE_DIR)/sources.list.* \
|
||||
$(SLAVE_DIR)/buildinfo/versions/versions-* 2>/dev/null \
|
||||
$(SLAVE_DIR)/buildinfo/versions/versions-* \
|
||||
src/sonic-build-hooks/hooks/* 2>/dev/null \
|
||||
| sha1sum \
|
||||
| awk '{print substr($$1,0,11);}')
|
||||
|
||||
@ -268,22 +217,16 @@ SLAVE_BASE_TAG = $(shell \
|
||||
SLAVE_TAG = $(shell \
|
||||
(cat $(SLAVE_DIR)/Dockerfile.user \
|
||||
$(SLAVE_DIR)/Dockerfile \
|
||||
$(SLAVE_DIR)/sources.list.* \
|
||||
$(SLAVE_DIR)/buildinfo/versions/versions-* \
|
||||
.git/HEAD \
|
||||
&& echo $(USER)/$(PWD)/$(CONFIGURED_PLATFORM)) \
|
||||
| sha1sum \
|
||||
| awk '{print substr($$1,0,11);}')
|
||||
|
||||
COLLECT_DOCKER=DEFAULT_CONTAINER_REGISTRY=$(DEFAULT_CONTAINER_REGISTRY) \
|
||||
SONIC_VERSION_CACHE=$(SONIC_VERSION_CACHE) \
|
||||
DBGOPT='$(DBGOPT)' \
|
||||
scripts/collect_docker_version_files.sh \
|
||||
$(SLAVE_BASE_IMAGE) \
|
||||
target \
|
||||
$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) \
|
||||
$(SLAVE_DIR) \
|
||||
$(SLAVE_DIR)/Dockerfile
|
||||
|
||||
$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) \
|
||||
target
|
||||
OVERLAY_MODULE_CHECK := \
|
||||
lsmod | grep -q "^overlay " &>/dev/null || \
|
||||
zgrep -q 'CONFIG_OVERLAY_FS=y' /proc/config.gz &>/dev/null || \
|
||||
@ -299,7 +242,7 @@ endif
|
||||
DOCKER_LOCKFILE_SAVE := $(DOCKER_LOCKDIR)/docker_save.lock
|
||||
$(shell mkdir -m 0777 -p $(DOCKER_LOCKDIR))
|
||||
$(shell [ -f $(DOCKER_LOCKFILE_SAVE) ] || (touch $(DOCKER_LOCKFILE_SAVE) && chmod 0777 $(DOCKER_LOCKFILE_SAVE)))
|
||||
$(shell [ -d $(DOCKER_ROOT) ] && docker run --rm -v $(DOCKER_ROOT)\:/mount debian sh -c 'rm -rf /mount/*')
|
||||
$(docker run --rm -v $(DOCKER_ROOT)\:/mount alpine sh -c 'rm -rf /mount/')
|
||||
$(mkdir -p $(DOCKER_ROOT))
|
||||
|
||||
ifeq ($(DOCKER_BUILDER_MOUNT),)
|
||||
@ -333,27 +276,10 @@ ifdef SONIC_BUILD_QUIETER
|
||||
DOCKER_RUN += -e "SONIC_BUILD_QUIETER=$(SONIC_BUILD_QUIETER)"
|
||||
endif
|
||||
|
||||
# Mount the Signing key and Certificate in the slave container
|
||||
ifneq ($(SECURE_UPGRADE_DEV_SIGNING_KEY),)
|
||||
DOCKER_RUN += -v $(SECURE_UPGRADE_DEV_SIGNING_KEY):$(SECURE_UPGRADE_DEV_SIGNING_KEY):ro
|
||||
endif
|
||||
ifneq ($(SECURE_UPGRADE_SIGNING_CERT),)
|
||||
DOCKER_RUN += -v $(SECURE_UPGRADE_SIGNING_CERT):$(SECURE_UPGRADE_SIGNING_CERT):ro
|
||||
endif
|
||||
# Mount the Signing prod tool in the slave container
|
||||
$(info "SECURE_UPGRADE_PROD_SIGNING_TOOL": "$(SECURE_UPGRADE_PROD_SIGNING_TOOL)")
|
||||
ifneq ($(SECURE_UPGRADE_PROD_SIGNING_TOOL),)
|
||||
DOCKER_RUN += -v $(SECURE_UPGRADE_PROD_SIGNING_TOOL):/sonic/scripts/$(shell basename -- $(SECURE_UPGRADE_PROD_SIGNING_TOOL)):ro
|
||||
endif
|
||||
|
||||
ifneq ($(SONIC_DPKG_CACHE_SOURCE),)
|
||||
DOCKER_RUN += -v "$(SONIC_DPKG_CACHE_SOURCE):/dpkg_cache:rw"
|
||||
endif
|
||||
|
||||
ifneq ($(SONIC_VERSION_CACHE_SOURCE),)
|
||||
DOCKER_RUN += -v "$(SONIC_VERSION_CACHE_SOURCE):/vcache:rw"
|
||||
endif
|
||||
|
||||
ifeq ($(SONIC_ENABLE_SECUREBOOT_SIGNATURE), y)
|
||||
ifneq ($(SIGNING_KEY),)
|
||||
DOCKER_SIGNING_SOURCE := $(shell dirname $(SIGNING_KEY))
|
||||
@ -391,7 +317,7 @@ endif
|
||||
|
||||
ifeq ($(SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD), y)
|
||||
ifneq ($(MULTIARCH_QEMU_ENVIRON), y)
|
||||
DOCKER_RUN += -v /var/run/docker.sock:/var/run/docker.sock --group-add $(HOST_DOCKERD_GID)
|
||||
DOCKER_RUN += -v /var/run/docker.sock:/var/run/docker.sock
|
||||
endif
|
||||
endif
|
||||
|
||||
@ -401,19 +327,19 @@ ifeq ($(DOCKER_DATA_ROOT_FOR_MULTIARCH),)
|
||||
endif
|
||||
# Multiarch docker cannot start dockerd service due to iptables cannot run over different arch kernel
|
||||
SONIC_SERVICE_DOCKERD_FOR_MULTIARCH=y
|
||||
SONIC_NATIVE_DOCKERD_FOR_MULTIARCH := dockerd --experimental=true --storage-driver=vfs \
|
||||
SONIC_NATIVE_DOCKERD_FOR_MUTLIARCH := dockerd --experimental=true --storage-driver=vfs \
|
||||
--data-root=$(DOCKER_DATA_ROOT_FOR_MULTIARCH) --exec-root=/var/run/march/docker/ \
|
||||
-H unix:///var/run/march/docker.sock -p /var/run/march/docker.pid
|
||||
|
||||
ifneq ($(DOCKER_CONFIG_FILE_FOR_MULTIARCH),)
|
||||
SONIC_NATIVE_DOCKERD_FOR_MULTIARCH += --config-file=$(DOCKER_CONFIG_FILE_FOR_MULTIARCH)
|
||||
SONIC_NATIVE_DOCKERD_FOR_MUTLIARCH += --config-file=$(DOCKER_CONFIG_FILE_FOR_MULTIARCH)
|
||||
endif
|
||||
|
||||
DOCKER_RUN += -v /var/run/march/docker.sock:/var/run/docker.sock
|
||||
DOCKER_RUN += -v /var/run/march/docker.pid:/var/run/docker.pid
|
||||
DOCKER_RUN += -v /var/run/march/docker:/var/run/docker
|
||||
DOCKER_RUN += -v $(DOCKER_DATA_ROOT_FOR_MULTIARCH):/var/lib/docker
|
||||
SONIC_USERFACL_DOCKERD_FOR_MULTIARCH := setfacl -m user:$(USER):rw /var/run/march/docker.sock
|
||||
SONIC_USERFACL_DOCKERD_FOR_MUTLIARCH := setfacl -m user:$(USER):rw /var/run/march/docker.sock
|
||||
|
||||
#Override Native config to prevent docker service
|
||||
SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD=y
|
||||
@ -421,7 +347,7 @@ endif
|
||||
DOCKER_MULTIARCH_CHECK := docker run --rm --privileged multiarch/qemu-user-static --reset -p yes --credential yes
|
||||
|
||||
DOCKER_SERVICE_SAFE_KILLER := (MARCH_PID=`ps -eo pid,cmd | grep "[0-9] dockerd.*march" | awk '{print $$1}'`; echo "Killing march docker $$MARCH_PID"; [ -z "$$MARCH_PID" ] || sudo kill -9 "$$MARCH_PID";)
|
||||
DOCKER_SERVICE_MULTIARCH_CHECK := ($(DOCKER_SERVICE_SAFE_KILLER); sudo rm -fr /var/run/march/; (echo "Starting docker march service..."; sudo $(SONIC_NATIVE_DOCKERD_FOR_MULTIARCH) &) &>/dev/null ; sleep 2; sudo $(SONIC_USERFACL_DOCKERD_FOR_MULTIARCH);)
|
||||
DOCKER_SERVICE_MULTIARCH_CHECK := ($(DOCKER_SERVICE_SAFE_KILLER); sudo rm -fr /var/run/march/; (echo "Starting docker march service..."; sudo $(SONIC_NATIVE_DOCKERD_FOR_MUTLIARCH) &) &>/dev/null ; sleep 2; sudo $(SONIC_USERFACL_DOCKERD_FOR_MUTLIARCH);)
|
||||
|
||||
# Docker service to load the compiled dockers-*.gz
|
||||
# docker 19.0 version above has path/length restriction, so replaced it with soft link in /tmp/
|
||||
@ -436,7 +362,7 @@ endif
|
||||
|
||||
endif
|
||||
|
||||
SPLIT_LOG = 2>&1 | tee
|
||||
SPLIT_LOG = | tee
|
||||
|
||||
DOCKER_BASE_LOG = $(SLAVE_DIR)/$(SLAVE_BASE_IMAGE)_$(SLAVE_BASE_TAG).log
|
||||
DOCKER_LOG = $(SLAVE_DIR)/$(SLAVE_IMAGE)_$(SLAVE_TAG).log
|
||||
@ -447,13 +373,10 @@ DOCKER_SLAVE_BASE_BUILD = docker build --no-cache \
|
||||
--build-arg http_proxy=$(http_proxy) \
|
||||
--build-arg https_proxy=$(https_proxy) \
|
||||
--build-arg no_proxy=$(no_proxy) \
|
||||
--build-arg SONIC_VERSION_CACHE=$(SONIC_VERSION_CACHE) \
|
||||
--build-arg SONIC_VERSION_CONTROL_COMPONENTS=$(SONIC_VERSION_CONTROL_COMPONENTS) \
|
||||
$(SLAVE_DIR) \
|
||||
$(SPLIT_LOG) $(DOCKER_BASE_LOG)
|
||||
$(SLAVE_DIR) $(SPLIT_LOG) $(DOCKER_BASE_LOG)
|
||||
|
||||
DOCKER_BASE_PULL = docker pull \
|
||||
$(REGISTRY_SERVER):$(REGISTRY_PORT)$(REGISTRY_SERVER_PATH)/$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG)
|
||||
$(REGISTRY_SERVER):$(REGISTRY_PORT)/$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG)
|
||||
|
||||
DOCKER_USER_BUILD = docker build --no-cache \
|
||||
--build-arg user=$(USER) \
|
||||
@ -479,7 +402,7 @@ DOCKER_SLAVE_BASE_PULL_REGISTRY = \
|
||||
$(DOCKER_BASE_PULL); \
|
||||
} && \
|
||||
{ \
|
||||
docker tag $(REGISTRY_SERVER):$(REGISTRY_PORT)$(REGISTRY_SERVER_PATH)/$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) && \
|
||||
docker tag $(REGISTRY_SERVER):$(REGISTRY_PORT)/$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) && \
|
||||
$(COLLECT_DOCKER); \
|
||||
}\
|
||||
|
||||
@ -521,13 +444,13 @@ SONIC_BUILD_INSTRUCTION := $(MAKE) \
|
||||
BUILD_TIMESTAMP=$(BUILD_TIMESTAMP) \
|
||||
SONIC_IMAGE_VERSION=$(SONIC_IMAGE_VERSION) \
|
||||
SLAVE_TAG=$(SLAVE_TAG) \
|
||||
ENABLE_DHCP_GRAPH_SERVICE=$(ENABLE_DHCP_GRAPH_SERVICE) \
|
||||
ENABLE_ZTP=$(ENABLE_ZTP) \
|
||||
INCLUDE_PDE=$(INCLUDE_PDE) \
|
||||
SHUTDOWN_BGP_ON_START=$(SHUTDOWN_BGP_ON_START) \
|
||||
INCLUDE_KUBERNETES=$(INCLUDE_KUBERNETES) \
|
||||
KUBERNETES_VERSION=$(KUBERNETES_VERSION) \
|
||||
KUBERNETES_CNI_VERSION=$(KUBERNETES_CNI_VERSION) \
|
||||
KUBERNETES_CRI_TOOLS_VERSION=$(KUBERNETES_CRI_TOOLS_VERSION) \
|
||||
K8s_GCR_IO_PAUSE_VERSION=$(K8s_GCR_IO_PAUSE_VERSION) \
|
||||
INCLUDE_KUBERNETES_MASTER=$(INCLUDE_KUBERNETES_MASTER) \
|
||||
SONIC_ENABLE_PFCWD_ON_START=$(ENABLE_PFCWD_ON_START) \
|
||||
@ -537,7 +460,6 @@ SONIC_BUILD_INSTRUCTION := $(MAKE) \
|
||||
MDEBUG=$(MDEBUG) \
|
||||
PASSWORD=$(PASSWORD) \
|
||||
USERNAME=$(USERNAME) \
|
||||
CHANGE_DEFAULT_PASSWORD=$(CHANGE_DEFAULT_PASSWORD) \
|
||||
SONIC_BUILD_JOBS=$(SONIC_BUILD_JOBS) \
|
||||
SONIC_USE_DOCKER_BUILDKIT=$(SONIC_USE_DOCKER_BUILDKIT) \
|
||||
VS_PREPARE_MEM=$(VS_PREPARE_MEM) \
|
||||
@ -553,25 +475,15 @@ SONIC_BUILD_INSTRUCTION := $(MAKE) \
|
||||
DOCKER_LOCKFILE_SAVE=$(DOCKER_LOCKFILE_SAVE) \
|
||||
SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD=$(SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD) \
|
||||
SONIC_INCLUDE_SYSTEM_TELEMETRY=$(INCLUDE_SYSTEM_TELEMETRY) \
|
||||
SONIC_INCLUDE_SYSTEM_GNMI=$(INCLUDE_SYSTEM_GNMI) \
|
||||
SONIC_INCLUDE_SYSTEM_EVENTD=$(INCLUDE_SYSTEM_EVENTD) \
|
||||
INCLUDE_DHCP_RELAY=$(INCLUDE_DHCP_RELAY) \
|
||||
INCLUDE_DHCP_SERVER=$(INCLUDE_DHCP_SERVER) \
|
||||
INCLUDE_MACSEC=$(INCLUDE_MACSEC) \
|
||||
SONIC_INCLUDE_RESTAPI=$(INCLUDE_RESTAPI) \
|
||||
SONIC_INCLUDE_MUX=$(INCLUDE_MUX) \
|
||||
ENABLE_TRANSLIB_WRITE=$(ENABLE_TRANSLIB_WRITE) \
|
||||
ENABLE_NATIVE_WRITE=$(ENABLE_NATIVE_WRITE) \
|
||||
ENABLE_DIALOUT=$(ENABLE_DIALOUT) \
|
||||
EXTRA_DOCKER_TARGETS=$(EXTRA_DOCKER_TARGETS) \
|
||||
BUILD_LOG_TIMESTAMP=$(BUILD_LOG_TIMESTAMP) \
|
||||
SONIC_ENABLE_IMAGE_SIGNATURE=$(ENABLE_IMAGE_SIGNATURE) \
|
||||
SONIC_ENABLE_SECUREBOOT_SIGNATURE=$(SONIC_ENABLE_SECUREBOOT_SIGNATURE) \
|
||||
SECURE_UPGRADE_MODE=$(SECURE_UPGRADE_MODE) \
|
||||
SECURE_UPGRADE_DEV_SIGNING_KEY=$(SECURE_UPGRADE_DEV_SIGNING_KEY) \
|
||||
SECURE_UPGRADE_SIGNING_CERT=$(SECURE_UPGRADE_SIGNING_CERT) \
|
||||
SECURE_UPGRADE_PROD_SIGNING_TOOL=$(SECURE_UPGRADE_PROD_SIGNING_TOOL) \
|
||||
SECURE_UPGRADE_PROD_TOOL_ARGS=$(SECURE_UPGRADE_PROD_TOOL_ARGS) \
|
||||
SONIC_DEFAULT_CONTAINER_REGISTRY=$(DEFAULT_CONTAINER_REGISTRY) \
|
||||
ENABLE_HOST_SERVICE_ON_START=$(ENABLE_HOST_SERVICE_ON_START) \
|
||||
SLAVE_DIR=$(SLAVE_DIR) \
|
||||
@ -580,24 +492,19 @@ SONIC_BUILD_INSTRUCTION := $(MAKE) \
|
||||
ENABLE_ASAN=$(ENABLE_ASAN) \
|
||||
SONIC_INCLUDE_BOOTCHART=$(INCLUDE_BOOTCHART) \
|
||||
SONIC_ENABLE_BOOTCHART=$(ENABLE_BOOTCHART) \
|
||||
INCLUDE_FIPS=$(INCLUDE_FIPS) \
|
||||
ENABLE_FIPS_FEATURE=$(ENABLE_FIPS_FEATURE) \
|
||||
ENABLE_FIPS=$(ENABLE_FIPS) \
|
||||
SONIC_SLAVE_DOCKER_DRIVER=$(SONIC_SLAVE_DOCKER_DRIVER) \
|
||||
MIRROR_URLS=$(MIRROR_URLS) \
|
||||
MIRROR_SECURITY_URLS=$(MIRROR_SECURITY_URLS) \
|
||||
GZ_COMPRESS_PROGRAM=$(GZ_COMPRESS_PROGRAM) \
|
||||
MIRROR_SNAPSHOT=$(MIRROR_SNAPSHOT) \
|
||||
SONIC_VERSION_CONTROL_COMPONENTS=$(SONIC_VERSION_CONTROL_COMPONENTS) \
|
||||
ONIE_IMAGE_PART_SIZE=$(ONIE_IMAGE_PART_SIZE) \
|
||||
SONIC_OS_VERSION=$(SONIC_OS_VERSION) \
|
||||
PIP_HTTP_TIMEOUT=$(PIP_HTTP_TIMEOUT) \
|
||||
LEGACY_SONIC_MGMT_DOCKER=$(LEGACY_SONIC_MGMT_DOCKER) \
|
||||
$(SONIC_OVERRIDE_BUILD_VARS)
|
||||
|
||||
.PHONY: sonic-slave-build sonic-slave-bash init reset
|
||||
|
||||
|
||||
COLLECT_BUILD_VERSION = { scripts/collect_build_version_files.sh \$$?; }
|
||||
ifeq ($(filter clean,$(MAKECMDGOALS)),)
|
||||
COLLECT_BUILD_VERSION = { DBGOPT='$(DBGOPT)' scripts/collect_build_version_files.sh \$$?; }
|
||||
endif
|
||||
|
||||
ifdef SOURCE_FOLDER
|
||||
DOCKER_RUN += -v $(SOURCE_FOLDER):/var/$(USER)/src
|
||||
@ -612,9 +519,6 @@ endif
|
||||
|
||||
export MIRROR_URLS
|
||||
export MIRROR_SECURITY_URLS
|
||||
export MIRROR_SNAPSHOT
|
||||
export SONIC_VERSION_CONTROL_COMPONENTS
|
||||
export PIP_HTTP_TIMEOUT
|
||||
|
||||
%:: | sonic-build-hooks
|
||||
ifneq ($(filter y, $(MULTIARCH_QEMU_ENVIRON) $(CROSS_BUILD_ENVIRON)),)
|
||||
@ -641,6 +545,8 @@ sonic-build-hooks:
|
||||
$(Q)pushd src/sonic-build-hooks; TRUSTED_GPG_URLS=$(TRUSTED_GPG_URLS) $(MAKE) all; popd
|
||||
$(Q)mkdir -p $(SLAVE_DIR)/buildinfo
|
||||
$(Q)cp src/sonic-build-hooks/buildinfo/sonic-build-hooks* $(SLAVE_DIR)/buildinfo
|
||||
$(Q)[ "$(MULTIARCH_QEMU_ENVIRON)" == y ] && scripts/build_mirror_config.sh $(SLAVE_DIR) amd64 $(BLDENV)
|
||||
$(Q)scripts/build_mirror_config.sh $(SLAVE_DIR) $(CONFIGURED_ARCH) $(BLDENV)
|
||||
|
||||
sonic-slave-base-build : | sonic-build-hooks
|
||||
ifeq ($(MULTIARCH_QEMU_ENVIRON), y)
|
||||
@ -668,28 +574,23 @@ init :
|
||||
|
||||
.ONESHELL : reset
|
||||
reset :
|
||||
$(Q)echo && (
|
||||
if [ -z "$(UNATTENDED)" ]; then
|
||||
echo -n "Warning! All local changes will be lost. Proceed? [y/N]: "
|
||||
@read ans
|
||||
else
|
||||
ans=y
|
||||
fi
|
||||
if [ $$ans == y ]; then
|
||||
echo "Resetting local repository. Please wait...";
|
||||
sudo rm -rf fsroot*;
|
||||
if [ "$(MULTIARCH_QEMU_ENVIRON)" == y ] && [[ "$(CONFIGURED_ARCH)" == "armhf" || "$(CONFIGURED_ARCH)" == "arm64" ]]; then
|
||||
echo "Stopping march $(CONFIGURED_ARCH) docker"
|
||||
sudo kill -9 `sudo cat /var/run/march/docker.pid` || true
|
||||
sudo rm -f /var/run/march/docker.pid || true
|
||||
fi
|
||||
git clean -xfdf;
|
||||
git reset --hard;
|
||||
git submodule foreach --recursive 'git clean -xfdf || true';
|
||||
git submodule foreach --recursive 'git reset --hard || true';
|
||||
git submodule foreach --recursive 'git remote update || true';
|
||||
git submodule update --init --recursive;
|
||||
echo "Reset complete!";
|
||||
else
|
||||
echo "Reset aborted";
|
||||
fi )
|
||||
$(Q)echo && echo -n "Warning! All local changes will be lost. Proceed? [y/N]: "
|
||||
$(Q)read ans && (
|
||||
if [ $$ans == y ]; then
|
||||
echo "Resetting local repository. Please wait...";
|
||||
sudo rm -rf fsroot*;
|
||||
if [ "$(MULTIARCH_QEMU_ENVIRON)" == y ] && [[ "$(CONFIGURED_ARCH)" == "armhf" || "$(CONFIGURED_ARCH)" == "arm64" ]]; then
|
||||
echo "Stopping march $(CONFIGURED_ARCH) docker"
|
||||
sudo kill -9 `sudo cat /var/run/march/docker.pid` || true
|
||||
sudo rm -f /var/run/march/docker.pid || true
|
||||
fi
|
||||
git clean -xfdf;
|
||||
git reset --hard;
|
||||
git submodule foreach --recursive 'git clean -xfdf || true';
|
||||
git submodule foreach --recursive 'git reset --hard || true';
|
||||
git submodule foreach --recursive 'git remote update || true';
|
||||
git submodule update --init --recursive;
|
||||
echo "Reset complete!";
|
||||
else
|
||||
echo "Reset aborted";
|
||||
fi )
|
||||
|
357
README.md
357
README.md
@ -1,3 +1,9 @@
|
||||
*static analysis*:
|
||||
|
||||
[![Total alerts](https://img.shields.io/lgtm/alerts/g/Azure/sonic-buildimage.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Azure/sonic-buildimage/alerts/)
|
||||
[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Azure/sonic-buildimage.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Azure/sonic-buildimage/context:python)
|
||||
|
||||
|
||||
*master builds*:
|
||||
|
||||
[![Barefoot](https://dev.azure.com/mssonic/build/_apis/build/status/barefoot/Azure.sonic-buildimage.official.barefoot?branchName=master&label=Barefoot)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=146&branchName=master)
|
||||
@ -7,34 +13,9 @@
|
||||
[![Innovium](https://dev.azure.com/mssonic/build/_apis/build/status/innovium/Azure.sonic-buildimage.official.innovium?branchName=master&label=Innovium)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=148&branchName=master)
|
||||
[![Mellanox](https://dev.azure.com/mssonic/build/_apis/build/status/mellanox/Azure.sonic-buildimage.official.mellanox?branchName=master&label=Mellanox)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=139&branchName=master)
|
||||
[![Marvell(armhf)](https://dev.azure.com/mssonic/build/_apis/build/status/marvell/Azure.sonic-buildimage.official.marvell-armhf?branchName=master&label=Marvell-armhf)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=141&branchName=master)
|
||||
[![Marvell(arm64)](https://dev.azure.com/mssonic/build/_apis/build/status/marvell/Azure.sonic-buildimage.official.marvell-arm64?branchName=master&label=Marvell-arm64)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=999&branchName=master)
|
||||
[![Nephos](https://dev.azure.com/mssonic/build/_apis/build/status/nephos/Azure.sonic-buildimage.official.nephos?branchName=master&label=Nephos)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=149&branchName=master)
|
||||
[![VS](https://dev.azure.com/mssonic/build/_apis/build/status/vs/Azure.sonic-buildimage.official.vs?branchName=master&label=VS)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=142&branchName=master)
|
||||
|
||||
*202305 builds*:
|
||||
|
||||
[![Barefoot](https://dev.azure.com/mssonic/build/_apis/build/status/barefoot/Azure.sonic-buildimage.official.barefoot?branchName=202205&label=Barefoot)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=146&branchName=202305)
|
||||
[![Broadcom](https://dev.azure.com/mssonic/build/_apis/build/status/broadcom/Azure.sonic-buildimage.official.broadcom?branchName=202305&label=Broadcom)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=138&branchName=202305)
|
||||
[![Centec](https://dev.azure.com/mssonic/build/_apis/build/status/centec/Azure.sonic-buildimage.official.centec?branchName=202305&label=Centec)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=143&branchName=202305)
|
||||
[![Centec(arm64)](https://dev.azure.com/mssonic/build/_apis/build/status/centec/Azure.sonic-buildimage.official.centec-arm64?branchName=202305&label=Centec-arm64)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=140&branchName=202305)
|
||||
[![Innovium](https://dev.azure.com/mssonic/build/_apis/build/status/innovium/Azure.sonic-buildimage.official.innovium?branchName=202305&label=Innovium)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=148&branchName=202305)
|
||||
[![Mellanox](https://dev.azure.com/mssonic/build/_apis/build/status/mellanox/Azure.sonic-buildimage.official.mellanox?branchName=202305&label=Mellanox)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=139&branchName=202305)
|
||||
[![Marvell(armhf)](https://dev.azure.com/mssonic/build/_apis/build/status/marvell/Azure.sonic-buildimage.official.marvell-armhf?branchName=202305&label=Marvell-armhf)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=141&branchName=202305)
|
||||
[![Nephos](https://dev.azure.com/mssonic/build/_apis/build/status/nephos/Azure.sonic-buildimage.official.nephos?branchName=202305&label=Nephos)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=149&branchName=202305)
|
||||
[![VS](https://dev.azure.com/mssonic/build/_apis/build/status/vs/Azure.sonic-buildimage.official.vs?branchName=202305&label=VS)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=142&branchName=202305)
|
||||
|
||||
*202211 builds*:
|
||||
|
||||
[![Barefoot](https://dev.azure.com/mssonic/build/_apis/build/status/barefoot/Azure.sonic-buildimage.official.barefoot?branchName=202205&label=Barefoot)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=146&branchName=202211)
|
||||
[![Broadcom](https://dev.azure.com/mssonic/build/_apis/build/status/broadcom/Azure.sonic-buildimage.official.broadcom?branchName=202211&label=Broadcom)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=138&branchName=202211)
|
||||
[![Centec](https://dev.azure.com/mssonic/build/_apis/build/status/centec/Azure.sonic-buildimage.official.centec?branchName=202211&label=Centec)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=143&branchName=202211)
|
||||
[![Centec(arm64)](https://dev.azure.com/mssonic/build/_apis/build/status/centec/Azure.sonic-buildimage.official.centec-arm64?branchName=202211&label=Centec-arm64)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=140&branchName=202211)
|
||||
[![Innovium](https://dev.azure.com/mssonic/build/_apis/build/status/innovium/Azure.sonic-buildimage.official.innovium?branchName=202211&label=Innovium)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=148&branchName=202211)
|
||||
[![Mellanox](https://dev.azure.com/mssonic/build/_apis/build/status/mellanox/Azure.sonic-buildimage.official.mellanox?branchName=202211&label=Mellanox)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=139&branchName=202211)
|
||||
[![Marvell(armhf)](https://dev.azure.com/mssonic/build/_apis/build/status/marvell/Azure.sonic-buildimage.official.marvell-armhf?branchName=202211&label=Marvell-armhf)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=141&branchName=202211)
|
||||
[![Nephos](https://dev.azure.com/mssonic/build/_apis/build/status/nephos/Azure.sonic-buildimage.official.nephos?branchName=202211&label=Nephos)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=149&branchName=202211)
|
||||
[![VS](https://dev.azure.com/mssonic/build/_apis/build/status/vs/Azure.sonic-buildimage.official.vs?branchName=202211&label=VS)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=142&branchName=202211)
|
||||
|
||||
*202205 builds*:
|
||||
|
||||
[![Barefoot](https://dev.azure.com/mssonic/build/_apis/build/status/barefoot/Azure.sonic-buildimage.official.barefoot?branchName=202205&label=Barefoot)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=146&branchName=202205)
|
||||
@ -47,7 +28,7 @@
|
||||
[![Nephos](https://dev.azure.com/mssonic/build/_apis/build/status/nephos/Azure.sonic-buildimage.official.nephos?branchName=202205&label=Nephos)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=149&branchName=202205)
|
||||
[![VS](https://dev.azure.com/mssonic/build/_apis/build/status/vs/Azure.sonic-buildimage.official.vs?branchName=202205&label=VS)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=142&branchName=202205)
|
||||
|
||||
*202111 builds*:
|
||||
*202111 builds*
|
||||
|
||||
[![Barefoot](https://dev.azure.com/mssonic/build/_apis/build/status/barefoot/Azure.sonic-buildimage.official.barefoot?branchName=202111&label=Barefoot)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=146&branchName=202111)
|
||||
[![Broadcom](https://dev.azure.com/mssonic/build/_apis/build/status/broadcom/Azure.sonic-buildimage.official.broadcom?branchName=202111&label=Broadcom)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=138&branchName=202111)
|
||||
@ -80,315 +61,237 @@
|
||||
[![Nephos](https://dev.azure.com/mssonic/build/_apis/build/status/nephos/Azure.sonic-buildimage.official.nephos?branchName=201911&label=Nephos)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=149&branchName=201911)
|
||||
[![VS](https://dev.azure.com/mssonic/build/_apis/build/status/vs/Azure.sonic-buildimage.official.vs?branchName=201911&label=VS)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=142&branchName=201911)
|
||||
|
||||
*201811 builds*:
|
||||
|
||||
[![Broadcom](https://dev.azure.com/mssonic/build/_apis/build/status/broadcom/Azure.sonic-buildimage.official.broadcom?branchName=201811&label=Broadcom)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=138&branchName=201811)
|
||||
[![Mellanox](https://dev.azure.com/mssonic/build/_apis/build/status/mellanox/Azure.sonic-buildimage.official.mellanox?branchName=201811&label=Mellanox)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=139&branchName=201811)
|
||||
[![Innovium](https://dev.azure.com/mssonic/build/_apis/build/status/innovium/Azure.sonic-buildimage.official.innovium?branchName=201811&label=Innovium)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=148&branchName=201811)
|
||||
[![Nephos](https://dev.azure.com/mssonic/build/_apis/build/status/nephos/Azure.sonic-buildimage.official.nephos?branchName=201811&label=Nephos)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=149&branchName=201811)
|
||||
[![VS](https://dev.azure.com/mssonic/build/_apis/build/status/vs/Azure.sonic-buildimage.official.vs?branchName=201811&label=VS)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=142&branchName=201811)
|
||||
|
||||
# sonic-buildimage
|
||||
|
||||
## Build SONiC Switch Images
|
||||
|
||||
# Description
|
||||
|
||||
Following are the instructions on how to build an [(ONIE)](https://github.com/opencomputeproject/onie)
|
||||
compatible network operating system (NOS) installer image for network switches,
|
||||
and also how to build docker images running inside the NOS.
|
||||
Note that SONiC images are build per ASIC platform.
|
||||
Switches using the same ASIC platform share a common image.
|
||||
For a list of supported switches and ASIC, please refer to this [list](https://github.com/sonic-net/SONiC/wiki/Supported-Devices-and-Platforms)
|
||||
Following are the instructions on how to build an [(ONIE)](https://github.com/opencomputeproject/onie) compatible network operating system (NOS) installer image for network switches, and also how to build docker images running inside the NOS. Note that SONiC images are build per ASIC platform. Switches using the same ASIC platform share a common image. For a list of supported switches and ASIC, please refer to this [list](https://github.com/sonic-net/SONiC/wiki/Supported-Devices-and-Platforms)
|
||||
|
||||
# Hardware
|
||||
|
||||
Any server can be a build image server as long as it has:
|
||||
|
||||
* Multiple cores to increase build speed
|
||||
* Plenty of RAM (less than 8 GiB is likely to cause issues)
|
||||
* 300G of free disk space
|
||||
* KVM Virtualization Support.
|
||||
|
||||
* Multiple cores to increase build speed
|
||||
* Plenty of RAM (less than 8 GiB is likely to cause issues)
|
||||
* 300G of free disk space
|
||||
* KVM Virtualization Support.
|
||||
> Note: If you are in a VM, make sure you have support for nested virtualization.
|
||||
> Some cases (e.g. building OVS image) also requires extra configuration
|
||||
> options to expose the full KVM interface to the VM
|
||||
> (e.g. [the KVM paravirtualization support on VirtualBox](https://www.virtualbox.org/manual/ch10.html#gimproviders)).
|
||||
|
||||
A good choice of OS for building SONiC is currently Ubuntu 20.04.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Install pip and jinja in host build machine, execute below commands
|
||||
if j2/j2cli is not available:
|
||||
* Install pip and jinja in host build machine, execute below commands if j2/j2cli is not available:
|
||||
|
||||
```shell
|
||||
```
|
||||
sudo apt install -y python3-pip
|
||||
pip3 install --user j2cli
|
||||
sudo pip3 install j2cli
|
||||
```
|
||||
|
||||
* Install [Docker](https://docs.docker.com/engine/install/) and configure your
|
||||
system to allow running the 'docker' command without 'sudo':
|
||||
* Add current user to the docker group: `sudo gpasswd -a ${USER} docker`
|
||||
* Log out and log back in so that your group membership is re-evaluated
|
||||
* If you are using Linux kernel 5.3 or newer, then you must use Docker 20.10.10 or newer. This is because older Docker versions did not allow the `clone3` syscall, which is now used in Bookworm.
|
||||
|
||||
> Note: If a previous installation of Docker using snap was present on the
|
||||
> system, remove it and also remove docker from snap before reinstallating docker.
|
||||
> This will avoid [known bugs that falsely report read-only filesystems issues](https://stackoverflow.com/questions/52526219/docker-mkdir-read-only-file-system)
|
||||
> during the build process.
|
||||
* Install [Docker](https://docs.docker.com/engine/install/) and configure your system to allow running the 'docker' command without 'sudo':
|
||||
* Add current user to the docker group: `sudo gpasswd -a ${USER} docker`
|
||||
* Log out and log back in so that your group membership is re-evaluated
|
||||
|
||||
## Clone the repository with all the git submodules
|
||||
|
||||
To clone the code repository recursively:
|
||||
|
||||
```shell
|
||||
git clone --recurse-submodules https://github.com/sonic-net/sonic-buildimage.git
|
||||
```
|
||||
git clone --recurse-submodules https://github.com/sonic-net/sonic-buildimage.git
|
||||
|
||||
## Usage
|
||||
|
||||
To build SONiC installer image and docker images, run the following commands:
|
||||
|
||||
```shell
|
||||
# Ensure the 'overlay' module is loaded on your development system
|
||||
sudo modprobe overlay
|
||||
# Ensure the 'overlay' module is loaded on your development system
|
||||
sudo modprobe overlay
|
||||
|
||||
# Enter the source directory
|
||||
cd sonic-buildimage
|
||||
# Enter the source directory
|
||||
cd sonic-buildimage
|
||||
|
||||
# (Optional) Checkout a specific branch. By default, it uses master branch.
|
||||
# For example, to checkout the branch 201911, use "git checkout 201911"
|
||||
git checkout [branch_name]
|
||||
# (Optional) Checkout a specific branch. By default, it uses master branch. For example, to checkout the branch 201911, use "git checkout 201911"
|
||||
git checkout [branch_name]
|
||||
|
||||
# Execute make init once after cloning the repo,
|
||||
# or after fetching remote repo with submodule updates
|
||||
make init
|
||||
# Execute make init once after cloning the repo, or after fetching remote repo with submodule updates
|
||||
make init
|
||||
|
||||
# Execute make configure once to configure ASIC
|
||||
make configure PLATFORM=[ASIC_VENDOR]
|
||||
# Execute make configure once to configure ASIC
|
||||
make configure PLATFORM=[ASIC_VENDOR]
|
||||
|
||||
# Build SONiC image with 4 jobs in parallel.
|
||||
# Note: You can set this higher, but 4 is a good number for most cases
|
||||
# and is well-tested.
|
||||
make SONIC_BUILD_JOBS=4 all
|
||||
```
|
||||
# Build SONiC image with 4 jobs in parallel.
|
||||
# Note: You can set this higher, but 4 is a good number for most cases
|
||||
# and is well-tested.
|
||||
make SONIC_BUILD_JOBS=4 all
|
||||
|
||||
The supported ASIC vendors are:
|
||||
The supported ASIC vendors are:
|
||||
|
||||
* PLATFORM=barefoot
|
||||
* PLATFORM=broadcom
|
||||
* PLATFORM=marvell
|
||||
* PLATFORM=mellanox
|
||||
* PLATFORM=cavium
|
||||
* PLATFORM=centec
|
||||
* PLATFORM=nephos
|
||||
* PLATFORM=innovium
|
||||
* PLATFORM=vs
|
||||
- PLATFORM=barefoot
|
||||
- PLATFORM=broadcom
|
||||
- PLATFORM=marvell
|
||||
- PLATFORM=mellanox
|
||||
- PLATFORM=cavium
|
||||
- PLATFORM=centec
|
||||
- PLATFORM=nephos
|
||||
- PLATFORM=innovium
|
||||
- PLATFORM=vs
|
||||
|
||||
## Usage for ARM Architecture
|
||||
|
||||
```shell
|
||||
ARM build has dependency in docker version 18. If docker version is 19, downgrade to 18 with:
|
||||
```
|
||||
sudo apt-get install --allow-downgrades -y docker-ce=5:18.09.0~3-0~ubuntu-xenial
|
||||
sudo apt-get install --allow-downgrades -y docker-ce-cli=5:18.09.0~3-0~ubuntu-xenial
|
||||
```
|
||||
|
||||
To build Arm32 bit for (ARMHF) platform
|
||||
|
||||
```shell
|
||||
# Execute make configure once to configure ASIC and ARCH
|
||||
make configure PLATFORM=[ASIC_VENDOR] PLATFORM_ARCH=armhf
|
||||
make target/sonic-[ASIC_VENDER]-armhf.bin
|
||||
```
|
||||
# Execute make configure once to configure ASIC and ARCH
|
||||
|
||||
_example:_
|
||||
make configure PLATFORM=[ASIC_VENDOR] PLATFORM_ARCH=armhf
|
||||
|
||||
```shell
|
||||
make configure PLATFORM=marvell-armhf PLATFORM_ARCH=armhf
|
||||
make target/sonic-marvell-armhf.bin
|
||||
```
|
||||
make target/sonic-[ASIC_VENDER]-armhf.bin
|
||||
|
||||
To build Arm32 bit for (ARMHF) Marvell platform on amd64 host for debian buster
|
||||
using cross-compilation, run the following commands:
|
||||
# example:
|
||||
|
||||
```shell
|
||||
# Execute make configure once to configure ASIC and ARCH for cross-compilation build
|
||||
make configure PLATFORM=marvell-armhf PLATFORM_ARCH=armhf
|
||||
|
||||
NOJESSIE=1 NOSTRETCH=1 BLDENV=buster CROSS_BLDENV=1 \
|
||||
make configure PLATFORM=marvell-armhf PLATFORM_ARCH=armhf
|
||||
make target/sonic-marvell-armhf.bin
|
||||
|
||||
# Execute Arm32 build using cross-compilation environment
|
||||
To build Arm32 bit for (ARMHF) Marvell platform on amd64 host for debian buster using cross-compilation, run the following commands:
|
||||
|
||||
NOJESSIE=1 NOSTRETCH=1 BLDENV=buster CROSS_BLDENV=1 make target/sonic-marvell-armhf.bin
|
||||
```
|
||||
# Execute make configure once to configure ASIC and ARCH for cross-compilation build
|
||||
|
||||
NOJESSIE=1 NOSTRETCH=1 BLDENV=buster CROSS_BLDENV=1 make configure PLATFORM=marvell-armhf PLATFORM_ARCH=armhf
|
||||
|
||||
# Execute Arm32 build using cross-compilation environment
|
||||
|
||||
NOJESSIE=1 NOSTRETCH=1 BLDENV=buster CROSS_BLDENV=1 make target/sonic-marvell-armhf.bin
|
||||
|
||||
Running the above Arm32 build using cross-compilation instead of qemu emulator drastically reduces the build time.
|
||||
|
||||
Running the above Arm32 build using cross-compilation instead of qemu emulator
|
||||
drastically reduces the build time.
|
||||
|
||||
To build Arm64 bit for platform
|
||||
|
||||
```shell
|
||||
# Execute make configure once to configure ASIC and ARCH
|
||||
# Execute make configure once to configure ASIC and ARCH
|
||||
|
||||
make configure PLATFORM=[ASIC_VENDOR] PLATFORM_ARCH=arm64
|
||||
make configure PLATFORM=[ASIC_VENDOR] PLATFORM_ARCH=arm64
|
||||
|
||||
# example:
|
||||
# example:
|
||||
|
||||
make configure PLATFORM=marvell-arm64 PLATFORM_ARCH=arm64
|
||||
|
||||
make configure PLATFORM=marvell-arm64 PLATFORM_ARCH=arm64
|
||||
```
|
||||
|
||||
**NOTE**:
|
||||
|
||||
* Recommend reserving at least 100G free space to build one platform
|
||||
with a single job.
|
||||
The build process will use more disk if you are setting `SONIC_BUILD_JOBS`
|
||||
to more than 1.
|
||||
* If Docker's workspace folder, `/var/lib/docker`,
|
||||
resides on a partition without sufficient free space,
|
||||
you may encounter an error like the following during a Docker container build job:
|
||||
- Recommend reserving at least 100G free space to build one platform with a single job. The build process will use more disk if you are setting `SONIC_BUILD_JOBS` to more than 1.
|
||||
- If Docker's workspace folder, `/var/lib/docker`, resides on a partition without sufficient free space, you may encounter an error like the following during a Docker container build job:
|
||||
|
||||
`/usr/bin/tar: /path/to/sonic-buildimage/<some_file>:
|
||||
Cannot write: No space left on device`
|
||||
`/usr/bin/tar: /path/to/sonic-buildimage/<some_file>: Cannot write: No space left on device`
|
||||
|
||||
The solution is to [move the directory](https://www.ibm.com/docs/en/z-logdata-analytics/5.1.0?topic=compose-relocating-docker-root-directory)
|
||||
to a partition with more free space.
|
||||
* Use
|
||||
`http_proxy=[your_proxy] https_proxy=[your_proxy] no_proxy=[your_no_proxy] make`
|
||||
to enable http(s) proxy in the build process.
|
||||
* Add your user account to `docker` group and use your user account to make.
|
||||
`root` or `sudo` are not supported.
|
||||
The solution is to [move the directory](https://linuxconfig.org/how-to-move-docker-s-default-var-lib-docker-to-another-directory-on-ubuntu-debian-linux) to a partition with more free space.
|
||||
- Use `http_proxy=[your_proxy] https_proxy=[your_proxy] no_proxy=[your_no_proxy] make` to enable http(s) proxy in the build process.
|
||||
- Add your user account to `docker` group and use your user account to make. `root` or `sudo` are not supported.
|
||||
|
||||
The SONiC installer contains all docker images needed.
|
||||
SONiC uses one image for all devices of a same ASIC vendor.
|
||||
The SONiC installer contains all docker images needed. SONiC uses one image for all devices of a same ASIC vendor.
|
||||
|
||||
For Broadcom ASIC, we build ONIE and EOS image.
|
||||
EOS image is used for Arista devices,
|
||||
ONIE image is used for all other Broadcom ASIC based devices.
|
||||
For Broadcom ASIC, we build ONIE and EOS image. EOS image is used for Arista devices, ONIE image is used for all other Broadcom ASIC based devices.
|
||||
|
||||
```shell
|
||||
make configure PLATFORM=broadcom
|
||||
# build debian stretch required targets
|
||||
BLDENV=stretch make stretch
|
||||
# build ONIE image
|
||||
make target/sonic-broadcom.bin
|
||||
# build EOS image
|
||||
make target/sonic-aboot-broadcom.swi
|
||||
```
|
||||
make configure PLATFORM=broadcom
|
||||
# build debian stretch required targets
|
||||
BLDENV=stretch make stretch
|
||||
# build ONIE image
|
||||
make target/sonic-broadcom.bin
|
||||
# build EOS image
|
||||
make target/sonic-aboot-broadcom.swi
|
||||
|
||||
You may find the rules/config file useful.
|
||||
It contains configuration options for the build process,
|
||||
like adding more verbosity or showing dependencies,
|
||||
username and password for base image etc.
|
||||
You may find the rules/config file useful. It contains configuration options for the build process, like adding more verbosity or showing dependencies, username and password for base image etc.
|
||||
|
||||
Every docker image is built and saved to target/ directory.
|
||||
So, for instance, to build only docker-database, execute:
|
||||
|
||||
```shell
|
||||
make target/docker-database.gz
|
||||
```
|
||||
make target/docker-database.gz
|
||||
|
||||
Same goes for debian packages, which are under target/debs/:
|
||||
|
||||
```shell
|
||||
make target/debs/swss_1.0.0_amd64.deb
|
||||
```
|
||||
make target/debs/swss_1.0.0_amd64.deb
|
||||
|
||||
Every target has a clean target, so in order to clean swss, execute:
|
||||
|
||||
```shell
|
||||
make target/debs/swss_1.0.0_amd64.deb-clean
|
||||
```
|
||||
make target/debs/swss_1.0.0_amd64.deb-clean
|
||||
|
||||
It is recommended to use clean targets to clean all packages that are built together,
|
||||
like dev packages for instance.
|
||||
In order to be more familiar with build process and make some changes to it,
|
||||
it is recommended to read this short [Documentation](README.buildsystem.md).
|
||||
It is recommended to use clean targets to clean all packages that are built together, like dev packages for instance. In order to be more familiar with build process and make some changes to it, it is recommended to read this short [Documentation](README.buildsystem.md).
|
||||
|
||||
## Build debug dockers and debug SONiC installer image
|
||||
|
||||
SONiC build system supports building dockers and ONIE-image with debug tools
|
||||
and debug symbols, to help with live & core debugging.
|
||||
For details refer to [SONiC Buildimage Guide](https://github.com/sonic-net/sonic-buildimage/blob/master/README.buildsystem.md).
|
||||
## Build debug dockers and debug SONiC installer image:
|
||||
SONiC build system supports building dockers and ONIE-image with debug tools and debug symbols, to help with live & core debugging. For details refer to [SONiC Buildimage Guide](https://github.com/sonic-net/sonic-buildimage/blob/master/README.buildsystem.md).
|
||||
|
||||
## SAI Version
|
||||
Please refer to [SONiC roadmap](https://github.com/sonic-net/SONiC/wiki/Sonic-Roadmap-Planning) on the SAI version for each SONiC release.
|
||||
|
||||
Please refer to [SONiC roadmap](https://github.com/sonic-net/SONiC/wiki/Sonic-Roadmap-Planning)
|
||||
on the SAI version for each SONiC release.
|
||||
## Notes:
|
||||
- If you are running make for the first time, a sonic-slave-${USER} docker image will be built automatically.
|
||||
This may take a while, but it is a one-time action, so please be patient.
|
||||
|
||||
## Notes
|
||||
- The root user account is disabled. However, the created user can `sudo`.
|
||||
|
||||
* If you are running make for the first time, a sonic-slave-${USER} docker image
|
||||
will be built automatically.
|
||||
This may take a while, but it is a one-time action, so please be patient.
|
||||
* The root user account is disabled. However, the created user can `sudo`.
|
||||
* The target directory is `./target`, containing the NOS installer image
|
||||
and docker images.
|
||||
* sonic-generic.bin: SONiC switch installer image (ONIE compatible)
|
||||
* sonic-aboot.bin: SONiC switch installer image (Aboot compatible)
|
||||
* docker-base.gz: base docker image where other docker images are built from,
|
||||
only used in build process (gzip tar archive)
|
||||
* docker-database.gz: docker image for in-memory key-value store,
|
||||
used as inter-process communication (gzip tar archive)
|
||||
* docker-fpm.gz: docker image for quagga with fpm module enabled
|
||||
(gzip tar archive)
|
||||
* docker-orchagent.gz: docker image for SWitch State Service (SWSS)
|
||||
(gzip tar archive)
|
||||
* docker-syncd-brcm.gz: docker image for the daemon to sync database
|
||||
and Broadcom switch ASIC (gzip tar archive)
|
||||
* docker-syncd-cavm.gz: docker image for the daemon to sync database
|
||||
and Cavium switch ASIC (gzip tar archive)
|
||||
* docker-syncd-mlnx.gz: docker image for the daemon to sync database
|
||||
and Mellanox switch ASIC (gzip tar archive)
|
||||
* docker-syncd-nephos.gz: docker image for the daemon to sync database
|
||||
and Nephos switch ASIC (gzip tar archive)
|
||||
* docker-syncd-invm.gz: docker image for the daemon to sync database
|
||||
and Innovium switch ASIC (gzip tar archive)
|
||||
* docker-sonic-p4.gz: docker image for all-in-one for p4 software switch
|
||||
(gzip tar archive)
|
||||
* docker-sonic-vs.gz: docker image for all-in-one for software virtual switch
|
||||
(gzip tar archive)
|
||||
* docker-sonic-mgmt.gz: docker image for
|
||||
[managing, configuring and monitoring SONiC](https://github.com/sonic-net/sonic-mgmt)
|
||||
(gzip tar archive)
|
||||
|
||||
## SONiC Image Azure Pipelines
|
||||
|
||||
All SONiC project build pipeline could be found at [Download Portal for SONiC Images](https://sonic-build.azurewebsites.net)
|
||||
- The target directory is `./target`, containing the NOS installer image and docker images.
|
||||
- sonic-generic.bin: SONiC switch installer image (ONIE compatible)
|
||||
- sonic-aboot.bin: SONiC switch installer image (Aboot compatible)
|
||||
- docker-base.gz: base docker image where other docker images are built from, only used in build process (gzip tar archive)
|
||||
- docker-database.gz: docker image for in-memory key-value store, used as inter-process communication (gzip tar archive)
|
||||
- docker-fpm.gz: docker image for quagga with fpm module enabled (gzip tar archive)
|
||||
- docker-orchagent.gz: docker image for SWitch State Service (SWSS) (gzip tar archive)
|
||||
- docker-syncd-brcm.gz: docker image for the daemon to sync database and Broadcom switch ASIC (gzip tar archive)
|
||||
- docker-syncd-cavm.gz: docker image for the daemon to sync database and Cavium switch ASIC (gzip tar archive)
|
||||
- docker-syncd-mlnx.gz: docker image for the daemon to sync database and Mellanox switch ASIC (gzip tar archive)
|
||||
- docker-syncd-nephos.gz: docker image for the daemon to sync database and Nephos switch ASIC (gzip tar archive)
|
||||
- docker-syncd-invm.gz: docker image for the daemon to sync database and Innovium switch ASIC (gzip tar archive)
|
||||
- docker-sonic-p4.gz: docker image for all-in-one for p4 software switch (gzip tar archive)
|
||||
- docker-sonic-vs.gz: docker image for all-in-one for software virtual switch (gzip tar archive)
|
||||
- docker-sonic-mgmt.gz: docker image for [managing, configuring and monitoring SONiC](https://github.com/sonic-net/sonic-mgmt) (gzip tar archive)
|
||||
|
||||
## Contribution Guide
|
||||
|
||||
All contributors must sign a contribution license agreement before contributions
|
||||
can be accepted.
|
||||
Visit [EasyCLA - Linux Foundation](https://easycla.lfx.linuxfoundation.org).
|
||||
All contributors must sign a contribution license agreement before contributions can be accepted. Visit [EasyCLA - Linux Foundation](https://easycla.lfx.linuxfoundation.org).
|
||||
|
||||
## GitHub Workflow
|
||||
|
||||
We're following basic GitHub Flow.
|
||||
If you have no idea what we're talking about, check out [GitHub's official guide](https://guides.github.com/introduction/flow/).
|
||||
Note that merge is only performed by the repository maintainer.
|
||||
We're following basic GitHub Flow. If you have no idea what we're talking about, check out [GitHub's official guide](https://guides.github.com/introduction/flow/). Note that merge is only performed by the repository maintainer.
|
||||
|
||||
Guide for performing commits:
|
||||
|
||||
* Isolate each commit to one component/bugfix/issue/feature
|
||||
* Use a standard commit message format:
|
||||
|
||||
> [component/folder touched]: Description intent of your changes
|
||||
> [component/folder touched]: Description intent of your changes
|
||||
>
|
||||
> [List of changes]
|
||||
> [List of changes]
|
||||
>
|
||||
> Signed-off-by: Your Name your@email.com
|
||||
> Signed-off-by: Your Name your@email.com
|
||||
|
||||
For example:
|
||||
|
||||
> swss-common: Stabilize the ConsumerTable
|
||||
> swss-common: Stabilize the ConsumerTable
|
||||
>
|
||||
> * Fixing autoreconf
|
||||
> * Fixing unit-tests by adding checkers and initialize the DB before start
|
||||
> * Adding the ability to select from multiple channels
|
||||
> * Health-Monitor - The idea of the patch is that if something went wrong
|
||||
> with the notification channel,
|
||||
> we will have the option to know about it (Query the LLEN table length).
|
||||
> * Fixing autoreconf
|
||||
> * Fixing unit-tests by adding checkers and initialize the DB before start
|
||||
> * Adding the ability to select from multiple channels
|
||||
> * Health-Monitor - The idea of the patch is that if something went wrong with the notification channel,
|
||||
> we will have the option to know about it (Query the LLEN table length).
|
||||
>
|
||||
> Signed-off-by: user@dev.null
|
||||
> Signed-off-by: user@dev.null
|
||||
|
||||
|
||||
* Each developer should fork this repository and [add the team as a Contributor](https://help.github.com/articles/adding-collaborators-to-a-personal-repository)
|
||||
* Push your changes to your private fork and do "pull-request" to this repository
|
||||
* Use a pull request to do code review
|
||||
* Use issues to keep track of what is going on
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
|
||||
or contact [opencode@microsoft.com](mailto:opencode@microsoft.com)
|
||||
with any additional questions or comments.
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
|
@ -28,7 +28,6 @@ resources:
|
||||
- repository: sonic-mgmt
|
||||
type: github
|
||||
name: sonic-net/sonic-mgmt
|
||||
ref: master
|
||||
endpoint: sonic-net
|
||||
- repository: buildimage
|
||||
type: github
|
||||
@ -42,12 +41,7 @@ variables:
|
||||
- name: CACHE_MODE
|
||||
value: rcache
|
||||
- name: ENABLE_FIPS
|
||||
value: n
|
||||
- name: BUILD_BRANCH
|
||||
${{ if eq(variables['Build.Reason'], 'PullRequest') }}:
|
||||
value: $(System.PullRequest.TargetBranch)
|
||||
${{ else }}:
|
||||
value: $(Build.SourceBranchName)
|
||||
value: y
|
||||
|
||||
stages:
|
||||
- stage: BuildVS
|
||||
@ -55,7 +49,7 @@ stages:
|
||||
jobs:
|
||||
- template: .azure-pipelines/azure-pipelines-build.yml
|
||||
parameters:
|
||||
buildOptions: 'USERNAME=admin SONIC_BUILD_JOBS=$(nproc) BUILD_MULTIASIC_KVM=y INCLUDE_DHCP_SERVER=y ${{ variables.VERSION_CONTROL_OPTIONS }}'
|
||||
buildOptions: 'USERNAME=admin SONIC_BUILD_JOBS=$(nproc) BUILD_MULTIASIC_KVM=y ${{ variables.VERSION_CONTROL_OPTIONS }}'
|
||||
jobGroups:
|
||||
- name: vs
|
||||
|
||||
@ -70,28 +64,18 @@ stages:
|
||||
- name: broadcom
|
||||
variables:
|
||||
swi_image: yes
|
||||
INCLUDE_RESTAPI: y
|
||||
- name: mellanox
|
||||
variables:
|
||||
dbg_image: yes
|
||||
docker_syncd_rpc_image: yes
|
||||
platform_rpc: mlnx
|
||||
- name: marvell-arm64
|
||||
pool: sonicbld-arm64
|
||||
variables:
|
||||
PLATFORM_ARCH: arm64
|
||||
- name: marvell-armhf
|
||||
pool: sonicbld-armhf
|
||||
timeoutInMinutes: 1200
|
||||
variables:
|
||||
PLATFORM_ARCH: armhf
|
||||
INCLUDE_RESTAPI: y
|
||||
|
||||
- stage: Test
|
||||
dependsOn: BuildVS
|
||||
condition: and(succeeded(), and(ne(stageDependencies.BuildVS.outputs['vs.SetVar.SKIP_VSTEST'], 'YES'), in(dependencies.BuildVS.result, 'Succeeded', 'SucceededWithIssues')))
|
||||
variables:
|
||||
- group: SONiC-Elastictest
|
||||
- group: Testbed-Tools
|
||||
- name: inventory
|
||||
value: veos_vtb
|
||||
- name: testbed_file
|
||||
@ -118,30 +102,27 @@ stages:
|
||||
sudo rm -rf ../*.deb
|
||||
displayName: "Cleanup"
|
||||
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
source: specific
|
||||
project: build
|
||||
pipeline: 9
|
||||
artifact: sonic-swss-common.amd64.ubuntu20_04
|
||||
runVersion: 'latestFromBranch'
|
||||
runBranch: 'refs/heads/master'
|
||||
displayName: "Download sonic swss common deb packages"
|
||||
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: sonic-buildimage.vs
|
||||
displayName: "Download sonic-buildimage.vs artifact"
|
||||
|
||||
- script: |
|
||||
sudo src/sonic-swss-common/.azure-pipelines/build_and_install_module.sh
|
||||
displayName: "Install kernel modules"
|
||||
|
||||
- script: |
|
||||
set -x
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y make libtool m4 autoconf dh-exec debhelper cmake pkg-config \
|
||||
libhiredis-dev libnl-3-dev libnl-genl-3-dev libnl-route-3-dev libnl-nf-3-dev swig \
|
||||
libpython2.7-dev libboost-dev libboost-serialization-dev uuid-dev libzmq5 libzmq3-dev python3-pip \
|
||||
cmake libgtest-dev libgmock-dev libyang-dev nlohmann-json3-dev
|
||||
sudo pip3 install pytest
|
||||
cd src/sonic-swss-common
|
||||
./autogen.sh
|
||||
dpkg-buildpackage -rfakeroot -us -uc -b -j$(nproc)
|
||||
sudo dpkg -i --force-confask,confnew ../libswsscommon_*.deb
|
||||
sudo dpkg -i ../python3-swsscommon_*.deb
|
||||
displayName: "Compile sonic swss common"
|
||||
|
||||
- script: |
|
||||
sudo apt-get install libyang0.16 -y
|
||||
sudo dpkg -i --force-confask,confnew ../libswsscommon_1.0.0_amd64.deb
|
||||
sudo dpkg -i ../python3-swsscommon_1.0.0_amd64.deb
|
||||
sudo docker load -i ../target/docker-sonic-vs.gz
|
||||
docker tag docker-sonic-vs:latest docker-sonic-vs:$(Build.BuildNumber)
|
||||
username=$(id -un)
|
||||
@ -160,115 +141,226 @@ stages:
|
||||
testResultsFiles: '**/tr.xml'
|
||||
testRunTitle: vstest
|
||||
|
||||
- job: t0_elastictest
|
||||
pool: ubuntu-20.04
|
||||
displayName: "kvmtest-t0 by Elastictest"
|
||||
timeoutInMinutes: 240
|
||||
- job: t0_part1
|
||||
pool: sonictest
|
||||
displayName: "kvmtest-t0-part1"
|
||||
timeoutInMinutes: 400
|
||||
condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_CLASSICAL_TEST, 'YES'))
|
||||
continueOnError: false
|
||||
steps:
|
||||
- template: .azure-pipelines/run-test-elastictest-template.yml@sonic-mgmt
|
||||
- template: .azure-pipelines/run-test-template.yml
|
||||
parameters:
|
||||
dut: vlab-01
|
||||
tbname: vms-kvm-t0
|
||||
ptf_name: ptf_vms6-1
|
||||
tbtype: t0
|
||||
vmtype: ceos
|
||||
section: part-1
|
||||
|
||||
- job: t0_part2
|
||||
pool: sonictest
|
||||
displayName: "kvmtest-t0-part2"
|
||||
timeoutInMinutes: 400
|
||||
condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_CLASSICAL_TEST, 'YES'))
|
||||
continueOnError: false
|
||||
steps:
|
||||
- template: .azure-pipelines/run-test-template.yml
|
||||
parameters:
|
||||
dut: vlab-01
|
||||
tbname: vms-kvm-t0
|
||||
ptf_name: ptf_vms6-1
|
||||
tbtype: t0
|
||||
vmtype: ceos
|
||||
section: part-2
|
||||
|
||||
- job: t0_testbedv2
|
||||
pool:
|
||||
vmImage: 'ubuntu-20.04'
|
||||
displayName: "kvmtest-t0 by TestbedV2"
|
||||
timeoutInMinutes: 1080
|
||||
condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_TESTBEDV2_TEST, 'YES'))
|
||||
continueOnError: false
|
||||
steps:
|
||||
- template: .azure-pipelines/run-test-scheduler-template.yml
|
||||
parameters:
|
||||
TOPOLOGY: t0
|
||||
MIN_WORKER: $(T0_INSTANCE_NUM)
|
||||
MAX_WORKER: $(T0_INSTANCE_NUM)
|
||||
MGMT_BRANCH: $(BUILD_BRANCH)
|
||||
MIN_WORKER: 2
|
||||
MAX_WORKER: 3
|
||||
|
||||
- job: t0_2vlans_elastictest
|
||||
pool: ubuntu-20.04
|
||||
displayName: "kvmtest-t0-2vlans by Elastictest"
|
||||
timeoutInMinutes: 240
|
||||
- job: t0_2vlans_testbedv2
|
||||
pool:
|
||||
vmImage: 'ubuntu-20.04'
|
||||
displayName: "kvmtest-t0-2vlans by TestbedV2"
|
||||
timeoutInMinutes: 1080
|
||||
condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_TESTBEDV2_TEST, 'YES'))
|
||||
continueOnError: false
|
||||
steps:
|
||||
- template: .azure-pipelines/run-test-elastictest-template.yml@sonic-mgmt
|
||||
- template: .azure-pipelines/run-test-scheduler-template.yml
|
||||
parameters:
|
||||
TOPOLOGY: t0
|
||||
TEST_SET: t0-2vlans
|
||||
MIN_WORKER: $(T0_2VLANS_INSTANCE_NUM)
|
||||
MAX_WORKER: $(T0_2VLANS_INSTANCE_NUM)
|
||||
MGMT_BRANCH: $(BUILD_BRANCH)
|
||||
MAX_WORKER: 1
|
||||
DEPLOY_MG_EXTRA_PARAMS: "-e vlan_config=two_vlan_a"
|
||||
|
||||
- job: t1_lag_elastictest
|
||||
pool: ubuntu-20.04
|
||||
displayName: "kvmtest-t1-lag by Elastictest"
|
||||
timeoutInMinutes: 240
|
||||
- job:
|
||||
pool:
|
||||
vmImage: 'ubuntu-20.04'
|
||||
displayName: "kvmtest-t0"
|
||||
dependsOn:
|
||||
- t0_part1
|
||||
- t0_part2
|
||||
- t0_testbedv2
|
||||
- t0_2vlans_testbedv2
|
||||
condition: always()
|
||||
continueOnError: false
|
||||
variables:
|
||||
resultOfPart1: $[ dependencies.t0_part1.result ]
|
||||
resultOfPart2: $[ dependencies.t0_part2.result ]
|
||||
resultOfT0TestbedV2: $[ dependencies.t0_testbedv2.result ]
|
||||
resultOfT02VlansTestbedV2: $[ dependencies.t0_2vlans_testbedv2.result ]
|
||||
|
||||
steps:
|
||||
- script: |
|
||||
if [ $(resultOfT0TestbedV2) == "Succeeded" ] && [ $(resultOfT02VlansTestbedV2) == "Succeeded" ]; then
|
||||
echo "TestbedV2 t0 passed."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $(resultOfPart1) == "Succeeded" ] && [ $(resultOfPart2) == "Succeeded" ]; then
|
||||
echo "Classic t0 jobs(both part1 and part2) passed."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Both classic and TestbedV2 t0 jobs failed! Please check the detailed information. (Any of them passed, t0 will be considered as passed)"
|
||||
exit 1
|
||||
|
||||
|
||||
- job: t1_lag_classic
|
||||
pool: sonictest-t1-lag
|
||||
displayName: "kvmtest-t1-lag classic"
|
||||
timeoutInMinutes: 400
|
||||
condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_CLASSICAL_TEST, 'YES'))
|
||||
continueOnError: false
|
||||
steps:
|
||||
- template: .azure-pipelines/run-test-elastictest-template.yml@sonic-mgmt
|
||||
- template: .azure-pipelines/run-test-template.yml
|
||||
parameters:
|
||||
dut: vlab-03
|
||||
tbname: vms-kvm-t1-lag
|
||||
ptf_name: ptf_vms6-2
|
||||
tbtype: t1-lag
|
||||
vmtype: ceos
|
||||
|
||||
- job: t1_lag_testbedv2
|
||||
pool:
|
||||
vmImage: 'ubuntu-20.04'
|
||||
displayName: "kvmtest-t1-lag by TestbedV2"
|
||||
timeoutInMinutes: 600
|
||||
condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_TESTBEDV2_TEST, 'YES'))
|
||||
continueOnError: false
|
||||
steps:
|
||||
- template: .azure-pipelines/run-test-scheduler-template.yml
|
||||
parameters:
|
||||
TOPOLOGY: t1-lag
|
||||
MIN_WORKER: $(T1_LAG_INSTANCE_NUM)
|
||||
MAX_WORKER: $(T1_LAG_INSTANCE_NUM)
|
||||
MGMT_BRANCH: $(BUILD_BRANCH)
|
||||
MIN_WORKER: 2
|
||||
MAX_WORKER: 3
|
||||
|
||||
- job: multi_asic_elastictest
|
||||
displayName: "kvmtest-multi-asic-t1-lag by Elastictest"
|
||||
pool: ubuntu-20.04
|
||||
timeoutInMinutes: 240
|
||||
- job:
|
||||
pool:
|
||||
vmImage: 'ubuntu-20.04'
|
||||
displayName: "kvmtest-t1-lag"
|
||||
dependsOn:
|
||||
- t1_lag_classic
|
||||
- t1_lag_testbedv2
|
||||
condition: always()
|
||||
continueOnError: false
|
||||
variables:
|
||||
resultOfClassic: $[ dependencies.t1_lag_classic.result ]
|
||||
resultOfTestbedV2: $[ dependencies.t1_lag_testbedv2.result ]
|
||||
steps:
|
||||
- template: .azure-pipelines/run-test-elastictest-template.yml@sonic-mgmt
|
||||
- script: |
|
||||
if [ $(resultOfClassic) == "Succeeded" ] || [ $(resultOfTestbedV2) == "Succeeded" ]; then
|
||||
echo "One or both of t1_lag_classic and t1_lag_testbedv2 passed."
|
||||
exit 0
|
||||
else
|
||||
echo "Both t1_lag_classic and t1_lag_testbedv2 failed! Please check the detailed information."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- job:
|
||||
pool: sonictest-sonic-t0
|
||||
displayName: "kvmtest-t0-sonic"
|
||||
timeoutInMinutes: 360
|
||||
condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_CLASSICAL_TEST, 'YES'))
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: .azure-pipelines/run-test-template.yml
|
||||
parameters:
|
||||
dut: vlab-02
|
||||
tbname: vms-kvm-t0-64-32
|
||||
ptf_name: ptf_vms6-1
|
||||
tbtype: t0-sonic
|
||||
vmtype: vsonic
|
||||
|
||||
- job:
|
||||
pool: sonictest-ma
|
||||
displayName: "kvmtest-multi-asic-t1-lag"
|
||||
timeoutInMinutes: 240
|
||||
condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_CLASSICAL_TEST, 'YES'))
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: .azure-pipelines/run-test-template.yml
|
||||
parameters:
|
||||
dut: vlab-08
|
||||
tbname: vms-kvm-four-asic-t1-lag
|
||||
ptf_name: ptf_vms6-4
|
||||
tbtype: multi-asic-t1-lag-pr
|
||||
image: sonic-4asic-vs.img.gz
|
||||
|
||||
- job: multi_asic_testbedv2
|
||||
displayName: "kvmtest-multi-asic-t1-lag by TestbedV2"
|
||||
pool:
|
||||
vmImage: 'ubuntu-20.04'
|
||||
timeoutInMinutes: 1080
|
||||
condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_TESTBEDV2_TEST, 'YES'))
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: .azure-pipelines/run-test-scheduler-template.yml
|
||||
parameters:
|
||||
TOPOLOGY: t1-8-lag
|
||||
TEST_SET: multi-asic-t1-lag
|
||||
MIN_WORKER: $(MULTI_ASIC_INSTANCE_NUM)
|
||||
MAX_WORKER: $(MULTI_ASIC_INSTANCE_NUM)
|
||||
MIN_WORKER: 1
|
||||
MAX_WORKER: 1
|
||||
NUM_ASIC: 4
|
||||
MGMT_BRANCH: $(BUILD_BRANCH)
|
||||
|
||||
- job: dualtor_elastictest
|
||||
pool: ubuntu-20.04
|
||||
displayName: "kvmtest-dualtor-t0 by Elastictest"
|
||||
timeoutInMinutes: 240
|
||||
- job: dualtor_testbedv2
|
||||
pool:
|
||||
vmImage: 'ubuntu-20.04'
|
||||
displayName: "kvmtest-dualtor-t0 by TestbedV2"
|
||||
timeoutInMinutes: 1080
|
||||
condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_TESTBEDV2_TEST, 'YES'))
|
||||
continueOnError: false
|
||||
steps:
|
||||
- template: .azure-pipelines/run-test-elastictest-template.yml@sonic-mgmt
|
||||
- template: .azure-pipelines/run-test-scheduler-template.yml
|
||||
parameters:
|
||||
TOPOLOGY: dualtor
|
||||
MIN_WORKER: $(T0_DUALTOR_INSTANCE_NUM)
|
||||
MAX_WORKER: $(T0_DUALTOR_INSTANCE_NUM)
|
||||
MGMT_BRANCH: $(BUILD_BRANCH)
|
||||
MIN_WORKER: 1
|
||||
MAX_WORKER: 1
|
||||
COMMON_EXTRA_PARAMS: "--disable_loganalyzer "
|
||||
|
||||
- job: sonic_t0_elastictest
|
||||
displayName: "kvmtest-t0-sonic by Elastictest"
|
||||
pool: ubuntu-20.04
|
||||
timeoutInMinutes: 240
|
||||
continueOnError: false
|
||||
- job: sonic_t0_testbedv2
|
||||
displayName: "kvmtest-t0-sonic by TestbedV2"
|
||||
pool:
|
||||
vmImage: 'ubuntu-20.04'
|
||||
timeoutInMinutes: 1080
|
||||
condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_TESTBEDV2_TEST, 'YES'))
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: .azure-pipelines/run-test-elastictest-template.yml@sonic-mgmt
|
||||
- template: .azure-pipelines/run-test-scheduler-template.yml
|
||||
parameters:
|
||||
TOPOLOGY: t0-64-32
|
||||
MIN_WORKER: $(T0_SONIC_INSTANCE_NUM)
|
||||
MAX_WORKER: $(T0_SONIC_INSTANCE_NUM)
|
||||
MIN_WORKER: 1
|
||||
MAX_WORKER: 2
|
||||
TEST_SET: t0-sonic
|
||||
MGMT_BRANCH: $(BUILD_BRANCH)
|
||||
COMMON_EXTRA_PARAMS: "--neighbor_type=sonic "
|
||||
COMMON_EXTRA_PARAMS: "--neighbor_type=sonic --enable_macsec --macsec_profile=128_SCI,256_XPN_SCI"
|
||||
VM_TYPE: vsonic
|
||||
|
||||
- job: dpu_elastictest
|
||||
displayName: "kvmtest-dpu by Elastictest"
|
||||
timeoutInMinutes: 240
|
||||
continueOnError: false
|
||||
pool: ubuntu-20.04
|
||||
steps:
|
||||
- template: .azure-pipelines/run-test-elastictest-template.yml@sonic-mgmt
|
||||
parameters:
|
||||
TOPOLOGY: dpu
|
||||
MIN_WORKER: $(T0_SONIC_INSTANCE_NUM)
|
||||
MAX_WORKER: $(T0_SONIC_INSTANCE_NUM)
|
||||
MGMT_BRANCH: $(BUILD_BRANCH)
|
||||
|
||||
|
||||
# - job: wan_elastictest
|
||||
# displayName: "kvmtest-wan by Elastictest"
|
||||
# pool: ubuntu-20.04
|
||||
# timeoutInMinutes: 240
|
||||
# continueOnError: false
|
||||
# steps:
|
||||
# - template: .azure-pipelines/run-test-scheduler-template.yml
|
||||
# parameters:
|
||||
# TOPOLOGY: wan-pub
|
||||
# MIN_WORKER: $(WAN_INSTANCE_NUM)
|
||||
# MAX_WORKER: $(WAN_INSTANCE_NUM)
|
||||
# COMMON_EXTRA_PARAMS: "--skip_sanity "
|
||||
SPECIFIED_PARAMS: '{\"test_pretest.py\":[\"--completeness_level=confident\",\"--allow_recover\"],\"test_posttest.py\":[\"--completeness_level=confident\",\"--allow_recover\"]}'
|
||||
|
360
build_debian.sh
360
build_debian.sh
@ -31,9 +31,9 @@ set -x -e
|
||||
CONFIGURED_ARCH=$([ -f .arch ] && cat .arch || echo amd64)
|
||||
|
||||
## docker engine version (with platform)
|
||||
DOCKER_VERSION=5:24.0.2-1~debian.12~$IMAGE_DISTRO
|
||||
CONTAINERD_IO_VERSION=1.6.21-1
|
||||
LINUX_KERNEL_VERSION=6.1.0-11-2
|
||||
DOCKER_VERSION=5:20.10.14~3-0~debian-$IMAGE_DISTRO
|
||||
CONTAINERD_IO_VERSION=1.5.11-1
|
||||
LINUX_KERNEL_VERSION=5.10.0-18-2
|
||||
|
||||
## Working directory to prepare the file system
|
||||
FILESYSTEM_ROOT=./fsroot
|
||||
@ -50,8 +50,8 @@ TRUSTED_GPG_DIR=$BUILD_TOOL_PATH/trusted.gpg.d
|
||||
echo "Error: Invalid ONIE_IMAGE_PART_SIZE in onie image config file"
|
||||
exit 1
|
||||
}
|
||||
[ -n "$INSTALLER_PAYLOAD" ] || {
|
||||
echo "Error: Invalid INSTALLER_PAYLOAD in onie image config file"
|
||||
[ -n "$ONIE_INSTALLER_PAYLOAD" ] || {
|
||||
echo "Error: Invalid ONIE_INSTALLER_PAYLOAD in onie image config file"
|
||||
exit 1
|
||||
}
|
||||
[ -n "$FILESYSTEM_SQUASHFS" ] || {
|
||||
@ -59,39 +59,29 @@ TRUSTED_GPG_DIR=$BUILD_TOOL_PATH/trusted.gpg.d
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ "$IMAGE_TYPE" = "aboot" ]; then
|
||||
TARGET_BOOTLOADER="aboot"
|
||||
fi
|
||||
|
||||
## Check if not a last stage of RFS build
|
||||
if [[ $RFS_SPLIT_LAST_STAGE != y ]]; then
|
||||
|
||||
## Prepare the file system directory
|
||||
if [[ -d $FILESYSTEM_ROOT ]]; then
|
||||
sudo rm -rf $FILESYSTEM_ROOT || die "Failed to clean chroot directory"
|
||||
fi
|
||||
mkdir -p $FILESYSTEM_ROOT
|
||||
mkdir -p $FILESYSTEM_ROOT/$PLATFORM_DIR
|
||||
mkdir -p $FILESYSTEM_ROOT/$PLATFORM_DIR/grub
|
||||
touch $FILESYSTEM_ROOT/$PLATFORM_DIR/firsttime
|
||||
|
||||
bootloader_packages=""
|
||||
if [ "$TARGET_BOOTLOADER" != "aboot" ]; then
|
||||
mkdir -p $FILESYSTEM_ROOT/$PLATFORM_DIR/grub
|
||||
bootloader_packages="grub2-common"
|
||||
fi
|
||||
|
||||
## ensure proc is mounted
|
||||
sudo mount proc /proc -t proc || true
|
||||
|
||||
## make / as a mountpoint in chroot env, needed by dockerd
|
||||
pushd $FILESYSTEM_ROOT
|
||||
sudo mount --bind . .
|
||||
popd
|
||||
|
||||
## Build the host debian base system
|
||||
echo '[INFO] Build host debian base system...'
|
||||
TARGET_PATH=$TARGET_PATH scripts/build_debian_base_system.sh $CONFIGURED_ARCH $IMAGE_DISTRO $FILESYSTEM_ROOT
|
||||
|
||||
# Prepare buildinfo
|
||||
sudo SONIC_VERSION_CACHE=${SONIC_VERSION_CACHE} \
|
||||
DBGOPT="${DBGOPT}" \
|
||||
scripts/prepare_debian_image_buildinfo.sh $CONFIGURED_ARCH $IMAGE_DISTRO $FILESYSTEM_ROOT $http_proxy
|
||||
|
||||
sudo scripts/prepare_debian_image_buildinfo.sh $CONFIGURED_ARCH $IMAGE_DISTRO $FILESYSTEM_ROOT $http_proxy
|
||||
|
||||
sudo chown root:root $FILESYSTEM_ROOT
|
||||
|
||||
@ -120,7 +110,6 @@ sudo LANG=C chroot $FILESYSTEM_ROOT mount
|
||||
## Pointing apt to public apt mirrors and getting latest packages, needed for latest security updates
|
||||
scripts/build_mirror_config.sh files/apt $CONFIGURED_ARCH $IMAGE_DISTRO
|
||||
sudo cp files/apt/sources.list.$CONFIGURED_ARCH $FILESYSTEM_ROOT/etc/apt/sources.list
|
||||
sudo cp files/apt/apt-retries-count $FILESYSTEM_ROOT/etc/apt/apt.conf.d/
|
||||
sudo cp files/apt/apt.conf.d/{81norecommends,apt-{clean,gzip-indexes,no-languages},no-check-valid-until} $FILESYSTEM_ROOT/etc/apt/apt.conf.d/
|
||||
|
||||
## Note: set lang to prevent locale warnings in your chroot
|
||||
@ -131,8 +120,6 @@ echo '[INFO] Install and setup eatmydata'
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install eatmydata
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT ln -s /usr/bin/eatmydata /usr/local/bin/dpkg
|
||||
echo 'Dir::Bin::dpkg "/usr/local/bin/dpkg";' | sudo tee $FILESYSTEM_ROOT/etc/apt/apt.conf.d/00image-install-eatmydata > /dev/null
|
||||
## Note: dpkg hook conflict with eatmydata
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT rm /usr/local/sbin/dpkg -f
|
||||
|
||||
echo '[INFO] Install packages for building image'
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install makedev psmisc
|
||||
@ -148,12 +135,6 @@ if [[ $CONFIGURED_ARCH == armhf || $CONFIGURED_ARCH == arm64 ]]; then
|
||||
else
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT /bin/bash -c 'cd /dev && MAKEDEV generic'
|
||||
fi
|
||||
|
||||
## docker and mkinitramfs on target system will use pigz/unpigz automatically
|
||||
if [[ $GZ_COMPRESS_PROGRAM == pigz ]]; then
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install pigz
|
||||
fi
|
||||
|
||||
## Install initramfs-tools and linux kernel
|
||||
## Note: initramfs-tools recommends depending on busybox, and we really want busybox for
|
||||
## 1. commands such as touch
|
||||
@ -175,8 +156,7 @@ if [[ $CONFIGURED_ARCH == amd64 ]]; then
|
||||
fi
|
||||
|
||||
## Sign the Linux kernel
|
||||
# note: when flag SONIC_ENABLE_SECUREBOOT_SIGNATURE is enabled the Secure Upgrade flags should be disabled (no_sign) to avoid conflict between the features.
|
||||
if [ "$SONIC_ENABLE_SECUREBOOT_SIGNATURE" = "y" ] && [ "$SECURE_UPGRADE_MODE" != 'dev' ] && [ "$SECURE_UPGRADE_MODE" != "prod" ]; then
|
||||
if [ "$SONIC_ENABLE_SECUREBOOT_SIGNATURE" = "y" ]; then
|
||||
if [ ! -f $SIGNING_KEY ]; then
|
||||
echo "Error: SONiC linux kernel signing key missing"
|
||||
exit 1
|
||||
@ -256,40 +236,35 @@ sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install apparmor
|
||||
sudo cp files/image_config/ntp/ntp-apparmor $FILESYSTEM_ROOT/etc/apparmor.d/local/usr.sbin.ntpd
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install apt-transport-https \
|
||||
ca-certificates \
|
||||
curl
|
||||
curl \
|
||||
gnupg2 \
|
||||
software-properties-common
|
||||
if [[ $CONFIGURED_ARCH == armhf ]]; then
|
||||
# update ssl ca certificates for secure pem
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT c_rehash
|
||||
fi
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT curl -o /tmp/docker.asc -fsSL https://download.docker.com/linux/debian/gpg
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT mv /tmp/docker.asc /etc/apt/trusted.gpg.d/
|
||||
sudo tee $FILESYSTEM_ROOT/etc/apt/sources.list.d/docker.list >/dev/null <<EOF
|
||||
deb [arch=$CONFIGURED_ARCH] https://download.docker.com/linux/debian $IMAGE_DISTRO stable
|
||||
EOF
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT add-apt-repository \
|
||||
"deb [arch=$CONFIGURED_ARCH] https://download.docker.com/linux/debian $IMAGE_DISTRO stable"
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get update
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install docker-ce=${DOCKER_VERSION} docker-ce-cli=${DOCKER_VERSION} containerd.io=${CONTAINERD_IO_VERSION}
|
||||
|
||||
# Uninstall 'python3-gi' installed as part of 'software-properties-common' to remove debian version of 'PyGObject'
|
||||
# pip version of 'PyGObject' will be installed during installation of 'sonic-host-services'
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y remove software-properties-common gnupg2 python3-gi
|
||||
|
||||
install_kubernetes () {
|
||||
local ver="$1"
|
||||
## Install k8s package from storage
|
||||
local storage_prefix="https://sonicstorage.blob.core.windows.net/public/kubernetes"
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT curl -o /tmp/cri-tools.deb -fsSL \
|
||||
${storage_prefix}/cri-tools_${KUBERNETES_CRI_TOOLS_VERSION}_${CONFIGURED_ARCH}.deb
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT curl -o /tmp/kubernetes-cni.deb -fsSL \
|
||||
${storage_prefix}/kubernetes-cni_${KUBERNETES_CNI_VERSION}_${CONFIGURED_ARCH}.deb
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT curl -o /tmp/kubelet.deb -fsSL \
|
||||
${storage_prefix}/kubelet_${ver}_${CONFIGURED_ARCH}.deb
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT curl -o /tmp/kubectl.deb -fsSL \
|
||||
${storage_prefix}/kubectl_${ver}_${CONFIGURED_ARCH}.deb
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT curl -o /tmp/kubeadm.deb -fsSL \
|
||||
${storage_prefix}/kubeadm_${ver}_${CONFIGURED_ARCH}.deb
|
||||
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install -f /tmp/cri-tools.deb
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install -f /tmp/kubernetes-cni.deb
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install -f /tmp/kubelet.deb
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install -f /tmp/kubectl.deb
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install -f /tmp/kubeadm.deb
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT rm -f /tmp/{cri-tools,kubernetes-cni,kubelet,kubeadm,kubectl}.deb
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT curl -fsSL \
|
||||
https://packages.cloud.google.com/apt/doc/apt-key.gpg | \
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-key add -
|
||||
## Check out the sources list update matches current Debian version
|
||||
sudo cp files/image_config/kubernetes/kubernetes.list $FILESYSTEM_ROOT/etc/apt/sources.list.d/
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get update
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install kubelet=${ver}
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install kubectl=${ver}
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install kubeadm=${ver}
|
||||
}
|
||||
|
||||
if [ "$INCLUDE_KUBERNETES" == "y" ]
|
||||
@ -297,6 +272,7 @@ then
|
||||
## Install Kubernetes
|
||||
echo '[INFO] Install kubernetes'
|
||||
install_kubernetes ${KUBERNETES_VERSION}
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install kubernetes-cni=${KUBERNETES_CNI_VERSION}
|
||||
else
|
||||
echo '[INFO] Skipping Install kubernetes'
|
||||
fi
|
||||
@ -307,7 +283,18 @@ then
|
||||
echo '[INFO] Install kubernetes master'
|
||||
install_kubernetes ${MASTER_KUBERNETES_VERSION}
|
||||
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install hyperv-daemons xmlstarlet parted netcat-openbsd
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT curl -fsSL \
|
||||
https://packages.microsoft.com/keys/microsoft.asc | \
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-key add -
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT curl -fsSL \
|
||||
https://packages.microsoft.com/keys/msopentech.asc | \
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-key add -
|
||||
echo "deb [arch=amd64] https://packages.microsoft.com/repos/azurecore-debian $IMAGE_DISTRO main" | \
|
||||
sudo tee $FILESYSTEM_ROOT/etc/apt/sources.list.d/azure.list
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get update
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install hyperv-daemons gnupg xmlstarlet
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install metricsext2
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y remove gnupg
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT curl -o /tmp/cri-dockerd.deb -fsSL \
|
||||
https://github.com/Mirantis/cri-dockerd/releases/download/v${MASTER_CRI_DOCKERD}/cri-dockerd_${MASTER_CRI_DOCKERD}.3-0.debian-${IMAGE_DISTRO}_amd64.deb
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install -f /tmp/cri-dockerd.deb
|
||||
@ -376,14 +363,14 @@ sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get -y in
|
||||
gdisk \
|
||||
sysfsutils \
|
||||
squashfs-tools \
|
||||
$bootloader_packages \
|
||||
rsyslog \
|
||||
grub2-common \
|
||||
screen \
|
||||
hping3 \
|
||||
tcptraceroute \
|
||||
mtr-tiny \
|
||||
locales \
|
||||
cgroup-tools \
|
||||
ipmitool \
|
||||
ndisc6 \
|
||||
makedumpfile \
|
||||
conntrack \
|
||||
@ -392,20 +379,19 @@ sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get -y in
|
||||
python3-pip \
|
||||
python-is-python3 \
|
||||
cron \
|
||||
libprotobuf32 \
|
||||
libgrpc29 \
|
||||
libgrpc++1.51 \
|
||||
libprotobuf23 \
|
||||
libgrpc++1 \
|
||||
libgrpc10 \
|
||||
haveged \
|
||||
fdisk \
|
||||
gpg \
|
||||
jq \
|
||||
auditd \
|
||||
linux-perf \
|
||||
resolvconf \
|
||||
lsof \
|
||||
sysstat \
|
||||
xxd \
|
||||
zstd
|
||||
linux-perf
|
||||
|
||||
# default rsyslog version is 8.2110.0 which has a bug on log rate limit,
|
||||
# use backport version
|
||||
sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get -t bullseye-backports -y install rsyslog
|
||||
|
||||
# Have systemd create the auditd log directory
|
||||
sudo mkdir -p ${FILESYSTEM_ROOT}/etc/systemd/system/auditd.service.d
|
||||
@ -415,10 +401,6 @@ LogsDirectory=audit
|
||||
LogsDirectoryMode=0750
|
||||
EOF
|
||||
|
||||
# latest tcpdump control resource access with AppArmor.
|
||||
# override tcpdump profile to allow tcpdump access TACACS config file.
|
||||
sudo cp files/apparmor/usr.bin.tcpdump $FILESYSTEM_ROOT/etc/apparmor.d/local/usr.bin.tcpdump
|
||||
|
||||
if [[ $CONFIGURED_ARCH == amd64 ]]; then
|
||||
## Pre-install the fundamental packages for amd64 (x86)
|
||||
sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get -y install \
|
||||
@ -458,19 +440,15 @@ if [[ $TARGET_BOOTLOADER == grub ]]; then
|
||||
GRUB_PKG=grub-efi-arm64-bin
|
||||
fi
|
||||
|
||||
sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get install -d -o dir::cache=/var/cache/apt \
|
||||
sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get -y download \
|
||||
$GRUB_PKG
|
||||
|
||||
sudo cp $FILESYSTEM_ROOT/var/cache/apt/archives/grub*.deb $FILESYSTEM_ROOT/$PLATFORM_DIR/grub
|
||||
sudo mv $FILESYSTEM_ROOT/grub*.deb $FILESYSTEM_ROOT/$PLATFORM_DIR/grub
|
||||
fi
|
||||
|
||||
## Disable kexec supported reboot which was installed by default
|
||||
sudo sed -i 's/LOAD_KEXEC=true/LOAD_KEXEC=false/' $FILESYSTEM_ROOT/etc/default/kexec
|
||||
|
||||
# Ensure that 'logrotate-config.service' is set as a dependency to start before 'logrotate.service'.
|
||||
sudo mkdir $FILESYSTEM_ROOT/etc/systemd/system/logrotate.service.d
|
||||
sudo cp files/image_config/logrotate/logrotateOverride.conf $FILESYSTEM_ROOT/etc/systemd/system/logrotate.service.d/logrotateOverride.conf
|
||||
|
||||
## Remove sshd host keys, and will regenerate on first sshd start
|
||||
sudo rm -f $FILESYSTEM_ROOT/etc/ssh/ssh_host_*_key*
|
||||
sudo cp files/sshd/host-ssh-keygen.sh $FILESYSTEM_ROOT/usr/local/bin/
|
||||
@ -496,10 +474,10 @@ ins #comment before /files/etc/ssh/sshd_config/ClientAliveInterval
|
||||
set /files/etc/ssh/sshd_config/#comment[following-sibling::*[1][self::ClientAliveInterval]] "Close inactive client sessions after 5 minutes"
|
||||
rm /files/etc/ssh/sshd_config/MaxAuthTries
|
||||
set /files/etc/ssh/sshd_config/MaxAuthTries 3
|
||||
rm /files/etc/ssh/sshd_config/Banner
|
||||
set /files/etc/ssh/sshd_config/Banner /etc/issue
|
||||
rm /files/etc/ssh/sshd_config/LogLevel
|
||||
set /files/etc/ssh/sshd_config/LogLevel VERBOSE
|
||||
rm /files/etc/ssh/sshd_config/Banner
|
||||
set /files/etc/ssh/sshd_config/Banner /etc/issue
|
||||
save
|
||||
quit
|
||||
EOF
|
||||
@ -535,21 +513,20 @@ done < files/image_config/sysctl/sysctl-net.conf
|
||||
|
||||
sudo augtool --autosave "$sysctl_net_cmd_string" -r $FILESYSTEM_ROOT
|
||||
|
||||
# Specify that we want to explicitly install Python packages into the system environment, and risk breakages
|
||||
sudo cp files/image_config/pip/pip.conf $FILESYSTEM_ROOT/etc/pip.conf
|
||||
# Upgrade pip via PyPI and uninstall the Debian version
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip3 install --upgrade pip
|
||||
sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get purge -y python3-pip
|
||||
|
||||
# For building Python packages
|
||||
sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get -y install python3-setuptools python3-wheel
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip3 install 'setuptools==49.6.00'
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip3 install 'wheel==0.35.1'
|
||||
|
||||
# docker Python API package is needed by Ansible docker module as well as some SONiC applications
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip3 install 'docker==6.1.1'
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip3 install 'docker==5.0.3'
|
||||
|
||||
# Install scapy
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip3 install 'scapy==2.4.4'
|
||||
|
||||
# The option --no-build-isolation can be removed when upgrading PyYAML to 6.0.1
|
||||
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip3 install 'PyYAML==5.4.1' --no-build-isolation
|
||||
|
||||
## Note: keep pip installed for maintainance purpose
|
||||
|
||||
# Install GCC, needed for building/installing some Python packages
|
||||
@ -572,27 +549,32 @@ sudo cp files/dhcp/sethostname6 $FILESYSTEM_ROOT/etc/dhcp/dhclient-exit-hooks.d/
|
||||
sudo cp files/dhcp/graphserviceurl $FILESYSTEM_ROOT/etc/dhcp/dhclient-exit-hooks.d/
|
||||
sudo cp files/dhcp/snmpcommunity $FILESYSTEM_ROOT/etc/dhcp/dhclient-exit-hooks.d/
|
||||
sudo cp files/dhcp/vrf $FILESYSTEM_ROOT/etc/dhcp/dhclient-exit-hooks.d/
|
||||
if [ -f files/image_config/ntp/ntpsec ]; then
|
||||
sudo cp ./files/image_config/ntp/ntpsec $FILESYSTEM_ROOT/etc/init.d/
|
||||
if [ -f files/image_config/ntp/ntp ]; then
|
||||
sudo cp ./files/image_config/ntp/ntp $FILESYSTEM_ROOT/etc/init.d/
|
||||
fi
|
||||
|
||||
if [ -f files/image_config/ntp/ntp-systemd-wrapper ]; then
|
||||
sudo cp ./files/image_config/ntp/ntp-systemd-wrapper $FILESYSTEM_ROOT/usr/libexec/ntpsec/
|
||||
sudo mkdir -p $FILESYSTEM_ROOT/usr/lib/ntp/
|
||||
sudo cp ./files/image_config/ntp/ntp-systemd-wrapper $FILESYSTEM_ROOT/usr/lib/ntp/
|
||||
fi
|
||||
|
||||
## Version file part 1
|
||||
## Version file
|
||||
sudo mkdir -p $FILESYSTEM_ROOT/etc/sonic
|
||||
if [ -f files/image_config/sonic_release ]; then
|
||||
sudo cp files/image_config/sonic_release $FILESYSTEM_ROOT/etc/sonic/
|
||||
fi
|
||||
|
||||
# Default users info
|
||||
export password_expire="$( [[ "$CHANGE_DEFAULT_PASSWORD" == "y" ]] && echo true || echo false )"
|
||||
export username="${USERNAME}"
|
||||
export password="$(sudo grep ^${USERNAME} $FILESYSTEM_ROOT/etc/shadow | cut -d: -f2)"
|
||||
j2 files/build_templates/default_users.json.j2 | sudo tee $FILESYSTEM_ROOT/etc/sonic/default_users.json
|
||||
sudo LANG=c chroot $FILESYSTEM_ROOT chmod 600 /etc/sonic/default_users.json
|
||||
sudo LANG=c chroot $FILESYSTEM_ROOT chown root:shadow /etc/sonic/default_users.json
|
||||
export build_version="${SONIC_IMAGE_VERSION}"
|
||||
export debian_version="$(cat $FILESYSTEM_ROOT/etc/debian_version)"
|
||||
export kernel_version="${kversion}"
|
||||
export asic_type="${sonic_asic_platform}"
|
||||
export asic_subtype="${TARGET_MACHINE}"
|
||||
export commit_id="$(git rev-parse --short HEAD)"
|
||||
export branch="$(git rev-parse --abbrev-ref HEAD)"
|
||||
export release="$(if [ -f $FILESYSTEM_ROOT/etc/sonic/sonic_release ]; then cat $FILESYSTEM_ROOT/etc/sonic/sonic_release; fi)"
|
||||
export build_date="$(date -u)"
|
||||
export build_number="${BUILD_NUMBER:-0}"
|
||||
export built_by="$USER@$BUILD_HOSTNAME"
|
||||
j2 files/build_templates/sonic_version.yml.j2 | sudo tee $FILESYSTEM_ROOT/etc/sonic/sonic_version.yml
|
||||
|
||||
## Copy over clean-up script
|
||||
sudo cp ./files/scripts/core_cleanup.py $FILESYSTEM_ROOT/usr/bin/core_cleanup.py
|
||||
@ -606,60 +588,6 @@ if [[ ! -f './asic_config_checksum' ]]; then
|
||||
fi
|
||||
sudo cp ./asic_config_checksum $FILESYSTEM_ROOT/etc/sonic/asic_config_checksum
|
||||
|
||||
## Check if not a last stage of RFS build
|
||||
fi
|
||||
|
||||
if [[ $RFS_SPLIT_FIRST_STAGE == y ]]; then
|
||||
echo '[INFO] Finished with RFS first stage'
|
||||
echo '[INFO] Umount all'
|
||||
|
||||
## Display all process details access /proc
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT fuser -vm /proc
|
||||
## Kill the processes
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT fuser -km /proc || true
|
||||
## Wait fuser fully kill the processes
|
||||
sudo timeout 15s bash -c 'until LANG=C chroot $0 umount /proc; do sleep 1; done' $FILESYSTEM_ROOT || true
|
||||
|
||||
sudo rm -f $TARGET_PATH/$RFS_SQUASHFS_NAME
|
||||
sudo mksquashfs $FILESYSTEM_ROOT $TARGET_PATH/$RFS_SQUASHFS_NAME -Xcompression-level 1
|
||||
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ $RFS_SPLIT_LAST_STAGE == y ]]; then
|
||||
echo '[INFO] RFS build: second stage'
|
||||
|
||||
## ensure proc is mounted
|
||||
sudo mount proc /proc -t proc || true
|
||||
|
||||
sudo fuser -vm $FILESYSTEM_ROOT || true
|
||||
sudo rm -rf $FILESYSTEM_ROOT
|
||||
sudo unsquashfs -d $FILESYSTEM_ROOT $TARGET_PATH/$RFS_SQUASHFS_NAME
|
||||
|
||||
## make / as a mountpoint in chroot env, needed by dockerd
|
||||
pushd $FILESYSTEM_ROOT
|
||||
sudo mount --bind . .
|
||||
popd
|
||||
|
||||
trap_push 'sudo LANG=C chroot $FILESYSTEM_ROOT umount /proc || true'
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT mount proc /proc -t proc
|
||||
fi
|
||||
|
||||
## Version file part 2
|
||||
export build_version="${SONIC_IMAGE_VERSION}"
|
||||
export debian_version="$(cat $FILESYSTEM_ROOT/etc/debian_version)"
|
||||
export kernel_version="${kversion}"
|
||||
export asic_type="${sonic_asic_platform}"
|
||||
export asic_subtype="${TARGET_MACHINE}"
|
||||
export commit_id="$(git rev-parse --short HEAD)"
|
||||
export branch="$(git rev-parse --abbrev-ref HEAD)"
|
||||
export release="$(if [ -f $FILESYSTEM_ROOT/etc/sonic/sonic_release ]; then cat $FILESYSTEM_ROOT/etc/sonic/sonic_release; fi)"
|
||||
export build_date="$(date -u)"
|
||||
export build_number="${BUILD_NUMBER:-0}"
|
||||
export built_by="$USER@$BUILD_HOSTNAME"
|
||||
export sonic_os_version="${SONIC_OS_VERSION}"
|
||||
j2 files/build_templates/sonic_version.yml.j2 | sudo tee $FILESYSTEM_ROOT/etc/sonic/sonic_version.yml
|
||||
|
||||
if [ -f sonic_debian_extension.sh ]; then
|
||||
./sonic_debian_extension.sh $FILESYSTEM_ROOT $PLATFORM_DIR $IMAGE_DISTRO
|
||||
fi
|
||||
@ -694,103 +622,24 @@ then
|
||||
|
||||
fi
|
||||
|
||||
## Set FIPS runtime default option
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT /bin/bash -c "mkdir -p /etc/fips"
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT /bin/bash -c "echo 0 > /etc/fips/fips_enable"
|
||||
|
||||
# #################
|
||||
# secure boot
|
||||
# #################
|
||||
if [[ $SECURE_UPGRADE_MODE == 'dev' || $SECURE_UPGRADE_MODE == "prod" && $SONIC_ENABLE_SECUREBOOT_SIGNATURE != 'y' ]]; then
|
||||
# note: SONIC_ENABLE_SECUREBOOT_SIGNATURE is a feature that signing just kernel,
|
||||
# SECURE_UPGRADE_MODE is signing all the boot component including kernel.
|
||||
# its required to do not enable both features together to avoid conflicts.
|
||||
echo "Secure Boot support build stage: Starting .."
|
||||
|
||||
# debian secure boot dependecies
|
||||
sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get -y install \
|
||||
shim-unsigned \
|
||||
grub-efi
|
||||
|
||||
if [ ! -f $SECURE_UPGRADE_SIGNING_CERT ]; then
|
||||
echo "Error: SONiC SECURE_UPGRADE_SIGNING_CERT=$SECURE_UPGRADE_SIGNING_CERT key missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $SECURE_UPGRADE_MODE == 'dev' ]]; then
|
||||
# development signing & verification
|
||||
|
||||
if [ ! -f $SECURE_UPGRADE_DEV_SIGNING_KEY ]; then
|
||||
echo "Error: SONiC SECURE_UPGRADE_DEV_SIGNING_KEY=$SECURE_UPGRADE_DEV_SIGNING_KEY key missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sudo ./scripts/signing_secure_boot_dev.sh -a $CONFIGURED_ARCH \
|
||||
-r $FILESYSTEM_ROOT \
|
||||
-l $LINUX_KERNEL_VERSION \
|
||||
-c $SECURE_UPGRADE_SIGNING_CERT \
|
||||
-p $SECURE_UPGRADE_DEV_SIGNING_KEY
|
||||
elif [[ $SECURE_UPGRADE_MODE == "prod" ]]; then
|
||||
# Here Vendor signing should be implemented
|
||||
OUTPUT_SEC_BOOT_DIR=$FILESYSTEM_ROOT/boot
|
||||
|
||||
if [ ! -f $sonic_su_prod_signing_tool ]; then
|
||||
echo "Error: SONiC sonic_su_prod_signing_tool=$sonic_su_prod_signing_tool script missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sudo $sonic_su_prod_signing_tool -a $CONFIGURED_ARCH \
|
||||
-r $FILESYSTEM_ROOT \
|
||||
-l $LINUX_KERNEL_VERSION \
|
||||
-o $OUTPUT_SEC_BOOT_DIR \
|
||||
$SECURE_UPGRADE_PROD_TOOL_ARGS
|
||||
|
||||
# verifying all EFI files and kernel modules in $OUTPUT_SEC_BOOT_DIR
|
||||
sudo ./scripts/secure_boot_signature_verification.sh -e $OUTPUT_SEC_BOOT_DIR \
|
||||
-c $SECURE_UPGRADE_SIGNING_CERT \
|
||||
-k $FILESYSTEM_ROOT
|
||||
|
||||
# verifying vmlinuz file.
|
||||
sudo ./scripts/secure_boot_signature_verification.sh -e $FILESYSTEM_ROOT/boot/vmlinuz-${LINUX_KERNEL_VERSION}-${CONFIGURED_ARCH} \
|
||||
-c $SECURE_UPGRADE_SIGNING_CERT \
|
||||
-k $FILESYSTEM_ROOT
|
||||
fi
|
||||
echo "Secure Boot support build stage: END."
|
||||
fi
|
||||
|
||||
## Update initramfs
|
||||
sudo chroot $FILESYSTEM_ROOT update-initramfs -u
|
||||
## Convert initrd image to u-boot format
|
||||
if [[ $TARGET_BOOTLOADER == uboot ]]; then
|
||||
INITRD_FILE=initrd.img-${LINUX_KERNEL_VERSION}-${CONFIGURED_ARCH}
|
||||
KERNEL_FILE=vmlinuz-${LINUX_KERNEL_VERSION}-${CONFIGURED_ARCH}
|
||||
if [[ $CONFIGURED_ARCH == armhf ]]; then
|
||||
INITRD_FILE=initrd.img-${LINUX_KERNEL_VERSION}-armmp
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT mkimage -A arm -O linux -T ramdisk -C gzip -d /boot/$INITRD_FILE /boot/u${INITRD_FILE}
|
||||
## Overwriting the initrd image with uInitrd
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT mv /boot/u${INITRD_FILE} /boot/$INITRD_FILE
|
||||
elif [[ $CONFIGURED_ARCH == arm64 ]]; then
|
||||
if [[ $CONFIGURED_PLATFORM == pensando ]]; then
|
||||
## copy device tree file into boot (XXX: need to compile dtb from dts)
|
||||
sudo cp -v $PLATFORM_DIR/pensando/elba-asic-psci.dtb $FILESYSTEM_ROOT/boot/
|
||||
## make kernel as gzip file
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT gzip /boot/${KERNEL_FILE}
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT mv /boot/${KERNEL_FILE}.gz /boot/${KERNEL_FILE}
|
||||
## Convert initrd image to u-boot format
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT mkimage -A arm64 -O linux -T ramdisk -C gzip -d /boot/$INITRD_FILE /boot/u${INITRD_FILE}
|
||||
## Overwriting the initrd image with uInitrd
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT mv /boot/u${INITRD_FILE} /boot/$INITRD_FILE
|
||||
else
|
||||
sudo cp -v $PLATFORM_DIR/${sonic_asic_platform}-${CONFIGURED_ARCH}/sonic_fit.its $FILESYSTEM_ROOT/boot/
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT mkimage -f /boot/sonic_fit.its /boot/sonic_${CONFIGURED_ARCH}.fit
|
||||
fi
|
||||
sudo cp -v $PLATFORM_DIR/${sonic_asic_platform}-${CONFIGURED_ARCH}/sonic_fit.its $FILESYSTEM_ROOT/boot/
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT mkimage -f /boot/sonic_fit.its /boot/sonic_${CONFIGURED_ARCH}.fit
|
||||
fi
|
||||
fi
|
||||
|
||||
# Collect host image version files before cleanup
|
||||
SONIC_VERSION_CACHE=${SONIC_VERSION_CACHE} \
|
||||
DBGOPT="${DBGOPT}" \
|
||||
scripts/collect_host_image_version_files.sh $CONFIGURED_ARCH $IMAGE_DISTRO $TARGET_PATH $FILESYSTEM_ROOT
|
||||
scripts/collect_host_image_version_files.sh $TARGET_PATH $FILESYSTEM_ROOT
|
||||
|
||||
# Remove GCC
|
||||
sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get -y remove gcc
|
||||
@ -818,48 +667,21 @@ sudo LANG=C chroot $FILESYSTEM_ROOT fuser -vm /proc
|
||||
## Kill the processes
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT fuser -km /proc || true
|
||||
## Wait fuser fully kill the processes
|
||||
sudo timeout 15s bash -c 'until LANG=C chroot $0 umount /proc; do sleep 1; done' $FILESYSTEM_ROOT || true
|
||||
sleep 15
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT umount /proc || true
|
||||
|
||||
## Prepare empty directory to trigger mount move in initramfs-tools/mount_loop_root, implemented by patching
|
||||
sudo mkdir $FILESYSTEM_ROOT/host
|
||||
|
||||
|
||||
if [[ "$CHANGE_DEFAULT_PASSWORD" == "y" ]]; then
|
||||
## Expire default password for exitsing users that can do login
|
||||
default_users=$(cat $FILESYSTEM_ROOT/etc/passwd | grep "/home"| grep ":/bin/bash\|:/bin/sh" | awk -F ":" '{print $1}' 2> /dev/null)
|
||||
for user in $default_users
|
||||
do
|
||||
sudo LANG=C chroot $FILESYSTEM_ROOT passwd -e ${user}
|
||||
done
|
||||
fi
|
||||
|
||||
## Compress most file system into squashfs file
|
||||
sudo rm -f $INSTALLER_PAYLOAD $FILESYSTEM_SQUASHFS
|
||||
sudo rm -f $ONIE_INSTALLER_PAYLOAD $FILESYSTEM_SQUASHFS
|
||||
## Output the file system total size for diag purpose
|
||||
## Note: -x to skip directories on different file systems, such as /proc
|
||||
sudo du -hsx $FILESYSTEM_ROOT
|
||||
sudo mkdir -p $FILESYSTEM_ROOT/var/lib/docker
|
||||
|
||||
## Clear DNS configuration inherited from the build server
|
||||
sudo rm -f $FILESYSTEM_ROOT/etc/resolvconf/resolv.conf.d/original
|
||||
sudo cp files/image_config/resolv-config/resolv.conf.head $FILESYSTEM_ROOT/etc/resolvconf/resolv.conf.d/head
|
||||
|
||||
## Optimize filesystem size
|
||||
if [ "$BUILD_REDUCE_IMAGE_SIZE" = "y" ]; then
|
||||
sudo scripts/build-optimize-fs-size.py "$FILESYSTEM_ROOT" \
|
||||
--image-type "$IMAGE_TYPE" \
|
||||
--hardlinks var/lib/docker \
|
||||
--hardlinks usr/share/sonic/device \
|
||||
--remove-docs \
|
||||
--remove-mans \
|
||||
--remove-licenses
|
||||
fi
|
||||
|
||||
sudo cp files/image_config/resolv-config/resolv.conf $FILESYSTEM_ROOT/etc/resolv.conf
|
||||
sudo mksquashfs $FILESYSTEM_ROOT $FILESYSTEM_SQUASHFS -comp zstd -b 1M -e boot -e var/lib/docker -e $PLATFORM_DIR
|
||||
|
||||
## Reduce /boot permission
|
||||
sudo chmod -R go-wx $FILESYSTEM_ROOT/boot
|
||||
|
||||
# Ensure admin gid is 1000
|
||||
gid_user=$(sudo LANG=C chroot $FILESYSTEM_ROOT id -g $USERNAME) || gid_user="none"
|
||||
if [ "${gid_user}" != "1000" ]; then
|
||||
@ -876,8 +698,8 @@ if [[ $MULTIARCH_QEMU_ENVIRON == y || $CROSS_BUILD_ENVIRON == y ]]; then
|
||||
fi
|
||||
|
||||
## Compress docker files
|
||||
pushd $FILESYSTEM_ROOT && sudo tar -I $GZ_COMPRESS_PROGRAM -cf $OLDPWD/$FILESYSTEM_DOCKERFS -C ${DOCKERFS_PATH}var/lib/docker .; popd
|
||||
pushd $FILESYSTEM_ROOT && sudo tar czf $OLDPWD/$FILESYSTEM_DOCKERFS -C ${DOCKERFS_PATH}var/lib/docker .; popd
|
||||
|
||||
## Compress together with /boot, /var/lib/docker and $PLATFORM_DIR as an installer payload zip file
|
||||
pushd $FILESYSTEM_ROOT && sudo tar -I $GZ_COMPRESS_PROGRAM -cf platform.tar.gz -C $PLATFORM_DIR . && sudo zip -n .gz $OLDPWD/$INSTALLER_PAYLOAD -r boot/ platform.tar.gz; popd
|
||||
sudo zip -g -n .squashfs:.gz $INSTALLER_PAYLOAD $FILESYSTEM_SQUASHFS $FILESYSTEM_DOCKERFS
|
||||
pushd $FILESYSTEM_ROOT && sudo tar czf platform.tar.gz -C $PLATFORM_DIR . && sudo zip -n .gz $OLDPWD/$ONIE_INSTALLER_PAYLOAD -r boot/ platform.tar.gz; popd
|
||||
sudo zip -g -n .squashfs:.gz $ONIE_INSTALLER_PAYLOAD $FILESYSTEM_SQUASHFS $FILESYSTEM_DOCKERFS
|
||||
|
@ -101,8 +101,7 @@ fi
|
||||
|
||||
## Save the docker image in a gz file
|
||||
mkdir -p target
|
||||
command -v pigz > /dev/null && GZ_COMPRESS_PROGRAM=pigz || GZ_COMPRESS_PROGRAM=gzip
|
||||
docker save $docker_image_name | $GZ_COMPRESS_PROGRAM -c > target/$docker_image_name.gz
|
||||
docker save $docker_image_name | gzip -c > target/$docker_image_name.gz
|
||||
|
||||
if [ -n "$1" ]; then
|
||||
./push_docker.sh target/$docker_image_name.gz $@ $docker_image_tag
|
||||
|
@ -18,8 +18,8 @@ fi
|
||||
echo "Error: Invalid ONIE_IMAGE_PART_SIZE in onie image config file"
|
||||
exit 1
|
||||
}
|
||||
[ -n "$INSTALLER_PAYLOAD" ] || {
|
||||
echo "Error: Invalid INSTALLER_PAYLOAD in onie image config file"
|
||||
[ -n "$ONIE_INSTALLER_PAYLOAD" ] || {
|
||||
echo "Error: Invalid ONIE_INSTALLER_PAYLOAD in onie image config file"
|
||||
exit 1
|
||||
}
|
||||
|
||||
@ -56,10 +56,10 @@ generate_kvm_image()
|
||||
exit 1
|
||||
}
|
||||
|
||||
$GZ_COMPRESS_PROGRAM $KVM_IMAGE_DISK
|
||||
gzip $KVM_IMAGE_DISK
|
||||
|
||||
[ -r $KVM_IMAGE_DISK.gz ] || {
|
||||
echo "Error : $GZ_COMPRESS_PROGRAM $KVM_IMAGE_DISK failed!"
|
||||
echo "Error : gzip $KVM_IMAGE_DISK failed!"
|
||||
exit 1
|
||||
}
|
||||
|
||||
@ -86,7 +86,7 @@ generate_onie_installer_image()
|
||||
## Note: Don't leave blank between lines. It is single line command.
|
||||
./onie-mk-demo.sh $CONFIGURED_ARCH $TARGET_MACHINE $TARGET_PLATFORM-$TARGET_MACHINE-$ONIEIMAGE_VERSION \
|
||||
installer platform/$TARGET_MACHINE/platform.conf $output_file OS $IMAGE_VERSION $ONIE_IMAGE_PART_SIZE \
|
||||
$INSTALLER_PAYLOAD $SECURE_UPGRADE_SIGNING_CERT $SECURE_UPGRADE_DEV_SIGNING_KEY
|
||||
$ONIE_INSTALLER_PAYLOAD
|
||||
}
|
||||
|
||||
# Generate asic-specific device list
|
||||
@ -139,11 +139,7 @@ elif [ "$IMAGE_TYPE" = "raw" ]; then
|
||||
## Run the installer
|
||||
## The 'build' install mode of the installer is used to generate this dump.
|
||||
sudo chmod a+x $tmp_output_onie_image
|
||||
sudo ./$tmp_output_onie_image || {
|
||||
## Failure during 'build' install mode of the installer results in an incomplete raw image.
|
||||
## Delete the incomplete raw image.
|
||||
sudo rm -f $OUTPUT_RAW_IMAGE
|
||||
}
|
||||
sudo ./$tmp_output_onie_image
|
||||
rm $tmp_output_onie_image
|
||||
|
||||
[ -r $OUTPUT_RAW_IMAGE ] || {
|
||||
@ -151,7 +147,15 @@ elif [ "$IMAGE_TYPE" = "raw" ]; then
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo "The raw image is in $OUTPUT_RAW_IMAGE"
|
||||
gzip $OUTPUT_RAW_IMAGE
|
||||
|
||||
[ -r $OUTPUT_RAW_IMAGE.gz ] || {
|
||||
echo "Error : gzip $OUTPUT_RAW_IMAGE failed!"
|
||||
exit 1
|
||||
}
|
||||
|
||||
mv $OUTPUT_RAW_IMAGE.gz $OUTPUT_RAW_IMAGE
|
||||
echo "The compressed raw image is in $OUTPUT_RAW_IMAGE"
|
||||
|
||||
elif [ "$IMAGE_TYPE" = "kvm" ]; then
|
||||
|
||||
@ -175,7 +179,7 @@ elif [ "$IMAGE_TYPE" = "aboot" ]; then
|
||||
sudo rm -f $OUTPUT_ABOOT_IMAGE
|
||||
sudo rm -f $ABOOT_BOOT_IMAGE
|
||||
## Add main payload
|
||||
cp $INSTALLER_PAYLOAD $OUTPUT_ABOOT_IMAGE
|
||||
cp $ONIE_INSTALLER_PAYLOAD $OUTPUT_ABOOT_IMAGE
|
||||
## Add Aboot boot0 file
|
||||
j2 -f env files/Aboot/boot0.j2 ./onie-image.conf > files/Aboot/boot0
|
||||
sed -i -e "s/%%IMAGE_VERSION%%/$IMAGE_VERSION/g" files/Aboot/boot0
|
||||
@ -198,12 +202,12 @@ elif [ "$IMAGE_TYPE" = "aboot" ]; then
|
||||
zip -g $OUTPUT_ABOOT_IMAGE .platforms_asic
|
||||
|
||||
if [ "$ENABLE_FIPS" = "y" ]; then
|
||||
echo "sonic_fips=1" >> kernel-cmdline-append
|
||||
echo "sonic_fips=1" > kernel-cmdline
|
||||
else
|
||||
echo "sonic_fips=0" >> kernel-cmdline-append
|
||||
echo "sonic_fips=0" > kernel-cmdline
|
||||
fi
|
||||
zip -g $OUTPUT_ABOOT_IMAGE kernel-cmdline-append
|
||||
rm kernel-cmdline-append
|
||||
zip -g $OUTPUT_ABOOT_IMAGE kernel-cmdline
|
||||
rm kernel-cmdline
|
||||
|
||||
zip -g $OUTPUT_ABOOT_IMAGE $ABOOT_BOOT_IMAGE
|
||||
rm $ABOOT_BOOT_IMAGE
|
||||
@ -213,38 +217,6 @@ elif [ "$IMAGE_TYPE" = "aboot" ]; then
|
||||
[ -f "$CA_CERT" ] && cp "$CA_CERT" "$TARGET_CA_CERT"
|
||||
./scripts/sign_image.sh -i "$OUTPUT_ABOOT_IMAGE" -k "$SIGNING_KEY" -c "$SIGNING_CERT" -a "$TARGET_CA_CERT"
|
||||
fi
|
||||
|
||||
elif [ "$IMAGE_TYPE" = "dsc" ]; then
|
||||
echo "Build DSC installer"
|
||||
|
||||
dsc_installer_dir=files/dsc
|
||||
dsc_installer=$dsc_installer_dir/install_debian
|
||||
dsc_installer_manifest=$dsc_installer_dir/MANIFEST
|
||||
|
||||
mkdir -p `dirname $OUTPUT_DSC_IMAGE`
|
||||
sudo rm -f $OUTPUT_DSC_IMAGE
|
||||
|
||||
source ./onie-image.conf
|
||||
|
||||
j2 $dsc_installer.j2 > $dsc_installer
|
||||
export installer_sha=$(sha512sum "$dsc_installer" | awk '{print $1}')
|
||||
|
||||
export build_date=$(date -u)
|
||||
export build_user=$(id -un)
|
||||
export installer_payload_sha=$(sha512sum "$INSTALLER_PAYLOAD" | awk '{print $1}')
|
||||
j2 $dsc_installer_manifest.j2 > $dsc_installer_manifest
|
||||
|
||||
cp $INSTALLER_PAYLOAD $dsc_installer_dir
|
||||
tar cf $OUTPUT_DSC_IMAGE -C files/dsc $(basename $dsc_installer_manifest) $INSTALLER_PAYLOAD $(basename $dsc_installer)
|
||||
|
||||
echo "Build ONIE installer"
|
||||
mkdir -p `dirname $OUTPUT_ONIE_IMAGE`
|
||||
sudo rm -f $OUTPUT_ONIE_IMAGE
|
||||
|
||||
generate_device_list "./installer/platforms_asic"
|
||||
|
||||
generate_onie_installer_image
|
||||
|
||||
else
|
||||
echo "Error: Non supported image type $IMAGE_TYPE"
|
||||
exit 1
|
||||
|
@ -11,7 +11,6 @@ def main():
|
||||
parser = argparse.ArgumentParser(description='test_login cmdline parser')
|
||||
parser.add_argument('-u', default="admin", help='login user name')
|
||||
parser.add_argument('-P', default="YourPaSsWoRd", help='login password')
|
||||
parser.add_argument('-N', default="Test@2022", help='new password')
|
||||
parser.add_argument('-p', type=int, default=9000, help='local port')
|
||||
|
||||
args = parser.parse_args()
|
||||
@ -21,7 +20,6 @@ def main():
|
||||
cmd_prompt = "{}@sonic:~\$ $".format(args.u)
|
||||
grub_selection = "The highlighted entry will be executed"
|
||||
firsttime_prompt = 'firsttime_exit'
|
||||
passwd_change_prompt = ['Current password:', 'New password:', 'Retype new password:']
|
||||
|
||||
i = 0
|
||||
while True:
|
||||
@ -38,6 +36,7 @@ def main():
|
||||
# select default SONiC Image
|
||||
p.expect(grub_selection)
|
||||
p.sendline()
|
||||
|
||||
# bootup sonic image
|
||||
while True:
|
||||
i = p.expect([login_prompt, passwd_prompt, firsttime_prompt, cmd_prompt])
|
||||
@ -47,30 +46,6 @@ def main():
|
||||
elif i == 1:
|
||||
# send password
|
||||
p.sendline(args.P)
|
||||
# Check for password change prompt
|
||||
try:
|
||||
p.expect('Current password:', timeout=2)
|
||||
except pexpect.TIMEOUT:
|
||||
break
|
||||
else:
|
||||
# send old password for password prompt
|
||||
p.sendline(args.P)
|
||||
p.expect(passwd_change_prompt[1])
|
||||
# send new password
|
||||
p.sendline(args.N)
|
||||
p.expect(passwd_change_prompt[2])
|
||||
# retype new password
|
||||
p.sendline(args.N)
|
||||
time.sleep(1)
|
||||
# Restore default password
|
||||
p.sendline('passwd {}'.format(args.u))
|
||||
p.expect(passwd_change_prompt[0])
|
||||
p.sendline(args.N)
|
||||
p.expect(passwd_change_prompt[1])
|
||||
p.sendline(args.P)
|
||||
p.expect(passwd_change_prompt[2])
|
||||
p.sendline(args.P)
|
||||
break
|
||||
elif i == 2:
|
||||
# fix a login timeout issue, caused by the login_prompt message mixed with the output message of the rc.local
|
||||
time.sleep(1)
|
||||
|
@ -1,8 +1,8 @@
|
||||
import os
|
||||
import struct
|
||||
import subprocess
|
||||
from mmap import *
|
||||
from sonic_py_common import device_info
|
||||
from sonic_py_common.general import getstatusoutput_noshell
|
||||
|
||||
HOST_CHK_CMD = ["docker"]
|
||||
EMPTY_STRING = ""
|
||||
@ -14,11 +14,7 @@ class APIHelper():
|
||||
(self.platform, self.hwsku) = device_info.get_platform_and_hwsku()
|
||||
|
||||
def is_host(self):
|
||||
try:
|
||||
status, output = getstatusoutput_noshell(HOST_CHK_CMD)
|
||||
return status == 0
|
||||
except Exception:
|
||||
return False
|
||||
return subprocess.call(HOST_CHK_CMD) == 0
|
||||
|
||||
def pci_get_value(self, resource, offset):
|
||||
status = True
|
||||
|
@ -1,8 +1,8 @@
|
||||
import os
|
||||
import struct
|
||||
import subprocess
|
||||
from mmap import *
|
||||
from sonic_py_common import device_info
|
||||
from sonic_py_common.general import getstatusoutput_noshell
|
||||
|
||||
HOST_CHK_CMD = ["docker"]
|
||||
EMPTY_STRING = ""
|
||||
@ -14,11 +14,7 @@ class APIHelper():
|
||||
(self.platform, self.hwsku) = device_info.get_platform_and_hwsku()
|
||||
|
||||
def is_host(self):
|
||||
try:
|
||||
status, output = getstatusoutput_noshell(HOST_CHK_CMD)
|
||||
return status == 0
|
||||
except Exception:
|
||||
return False
|
||||
return subprocess.call(HOST_CHK_CMD) == 0
|
||||
|
||||
def pci_get_value(self, resource, offset):
|
||||
status = True
|
||||
|
@ -1,8 +1,8 @@
|
||||
import os
|
||||
import struct
|
||||
import subprocess
|
||||
from mmap import *
|
||||
from sonic_py_common import device_info
|
||||
from sonic_py_common.general import getstatusoutput_noshell
|
||||
|
||||
HOST_CHK_CMD = ["docker"]
|
||||
EMPTY_STRING = ""
|
||||
@ -14,11 +14,7 @@ class APIHelper():
|
||||
(self.platform, self.hwsku) = device_info.get_platform_and_hwsku()
|
||||
|
||||
def is_host(self):
|
||||
try:
|
||||
status, output = getstatusoutput_noshell(HOST_CHK_CMD)
|
||||
return status == 0
|
||||
except Exception:
|
||||
return False
|
||||
return subprocess.call(HOST_CHK_CMD) == 0
|
||||
|
||||
def pci_get_value(self, resource, offset):
|
||||
status = True
|
||||
|
@ -1,8 +1,8 @@
|
||||
import os
|
||||
import struct
|
||||
import subprocess
|
||||
from mmap import *
|
||||
from sonic_py_common import device_info
|
||||
from sonic_py_common.general import getstatusoutput_noshell
|
||||
|
||||
HOST_CHK_CMD = ["docker"]
|
||||
EMPTY_STRING = ""
|
||||
@ -14,11 +14,7 @@ class APIHelper():
|
||||
(self.platform, self.hwsku) = device_info.get_platform_and_hwsku()
|
||||
|
||||
def is_host(self):
|
||||
try:
|
||||
status, output = getstatusoutput_noshell(HOST_CHK_CMD)
|
||||
return status == 0
|
||||
except Exception:
|
||||
return False
|
||||
return subprocess.call(HOST_CHK_CMD) == 0
|
||||
|
||||
def pci_get_value(self, resource, offset):
|
||||
status = True
|
||||
|
@ -1,8 +1,8 @@
|
||||
import os
|
||||
import struct
|
||||
import subprocess
|
||||
from mmap import *
|
||||
from sonic_py_common import device_info
|
||||
from sonic_py_common.general import getstatusoutput_noshell
|
||||
|
||||
HOST_CHK_CMD = ["docker"]
|
||||
EMPTY_STRING = ""
|
||||
@ -14,11 +14,7 @@ class APIHelper():
|
||||
(self.platform, self.hwsku) = device_info.get_platform_and_hwsku()
|
||||
|
||||
def is_host(self):
|
||||
try:
|
||||
status, output = getstatusoutput_noshell(HOST_CHK_CMD)
|
||||
return status == 0
|
||||
except Exception:
|
||||
return False
|
||||
return subprocess.call(HOST_CHK_CMD) == 0
|
||||
|
||||
def pci_get_value(self, resource, offset):
|
||||
status = True
|
||||
|
@ -1,8 +1,8 @@
|
||||
import os
|
||||
import struct
|
||||
import subprocess
|
||||
from mmap import *
|
||||
from sonic_py_common import device_info
|
||||
from sonic_py_common.general import getstatusoutput_noshell
|
||||
|
||||
HOST_CHK_CMD = ["docker"]
|
||||
EMPTY_STRING = ""
|
||||
@ -14,11 +14,7 @@ class APIHelper():
|
||||
(self.platform, self.hwsku) = device_info.get_platform_and_hwsku()
|
||||
|
||||
def is_host(self):
|
||||
try:
|
||||
status, output = getstatusoutput_noshell(HOST_CHK_CMD)
|
||||
return status == 0
|
||||
except Exception:
|
||||
return False
|
||||
return subprocess.call(HOST_CHK_CMD) == 0
|
||||
|
||||
def pci_get_value(self, resource, offset):
|
||||
status = True
|
||||
|
@ -187,15 +187,9 @@ class LedControl(LedControlBase):
|
||||
lanes = swss.get(
|
||||
swss.APPL_DB, self.PORT_TABLE_PREFIX + port_name, 'lanes')
|
||||
|
||||
# SonicV2Connector.get() will return None when key does not exist.
|
||||
if lanes:
|
||||
lanes_len = len(lanes.split(','))
|
||||
else:
|
||||
lanes_len = 0
|
||||
|
||||
# SONiC port nums are 0-based and increment by 4
|
||||
# Arista QSFP indices are 1-based and increment by 1
|
||||
return (((sonic_port_num/4) + 1), sonic_port_num % 4, lanes_len)
|
||||
return (((sonic_port_num/4) + 1), sonic_port_num % 4, len(lanes.split(',')))
|
||||
|
||||
# Concrete implementation of port_link_state_change() method
|
||||
|
||||
|
@ -154,16 +154,9 @@ class LedControl(LedControlBase):
|
||||
lanes = swss.get(
|
||||
swss.APPL_DB, self.PORT_TABLE_PREFIX + port_name, 'lanes')
|
||||
|
||||
# SonicV2Connector.get() will return None when key does not exist.
|
||||
if lanes:
|
||||
lanes_len = len(lanes.split(','))
|
||||
else:
|
||||
lanes_len = 0
|
||||
|
||||
|
||||
# SONiC port nums are 0-based and increment by 4
|
||||
# Arista QSFP indices are 1-based and increment by 1
|
||||
return (((sonic_port_num/4) + 1), sonic_port_num % 4, lanes_len)
|
||||
return (((sonic_port_num/4) + 1), sonic_port_num % 4, len(lanes.split(',')))
|
||||
|
||||
# Concrete implementation of port_link_state_change() method
|
||||
|
||||
|
@ -1,17 +1,7 @@
|
||||
{
|
||||
"chassis": {
|
||||
"name": "DCS-7050QX-32",
|
||||
"components": [
|
||||
{
|
||||
"name": "Scd(addr=0000:04:00.0)"
|
||||
},
|
||||
{
|
||||
"name": "Ucd90120A(addr=6-004e)"
|
||||
},
|
||||
{
|
||||
"name": "Ucd90160(addr=10-004e)"
|
||||
}
|
||||
],
|
||||
"components": [],
|
||||
"fans": [
|
||||
{
|
||||
"name": "fan1"
|
||||
@ -63,43 +53,37 @@
|
||||
"psus": [
|
||||
{
|
||||
"name": "psu1",
|
||||
"fans": [
|
||||
{
|
||||
"name": "psu1/1",
|
||||
"speed": {
|
||||
"controllable": false
|
||||
}
|
||||
}
|
||||
]
|
||||
"fans": []
|
||||
},
|
||||
{
|
||||
"name": "psu2",
|
||||
"fans": [
|
||||
{
|
||||
"name": "psu2/1",
|
||||
"speed": {
|
||||
"controllable": false
|
||||
}
|
||||
}
|
||||
]
|
||||
"fans": []
|
||||
}
|
||||
],
|
||||
"thermals": [
|
||||
{
|
||||
"name": "Cpu temp sensor",
|
||||
"controllable": false
|
||||
"name": "Cpu temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Board sensor",
|
||||
"controllable": false
|
||||
"name": "Board sensor"
|
||||
},
|
||||
{
|
||||
"name": "Front-panel temp sensor",
|
||||
"controllable": false
|
||||
"name": "Front-panel temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Rear temp sensor",
|
||||
"controllable": false
|
||||
"name": "Rear temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 1 inlet temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 1 internal sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 2 inlet temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 2 internal sensor"
|
||||
}
|
||||
],
|
||||
"sfps": [
|
||||
|
@ -0,0 +1,144 @@
|
||||
{
|
||||
"interfaces": {
|
||||
"Ethernet0": {
|
||||
"default_brkout_mode": "1x10G",
|
||||
"port_type": "RJ45"
|
||||
},
|
||||
"Ethernet1": {
|
||||
"default_brkout_mode": "1x10G",
|
||||
"port_type": "RJ45"
|
||||
},
|
||||
"Ethernet2": {
|
||||
"default_brkout_mode": "1x10G",
|
||||
"port_type": "RJ45"
|
||||
},
|
||||
"Ethernet3": {
|
||||
"default_brkout_mode": "1x1G",
|
||||
"port_type": "RJ45"
|
||||
},
|
||||
"Ethernet4": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet8": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet12": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet16": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet20": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet24": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet28": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet32": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet36": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet40": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet44": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet48": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet52": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet56": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet60": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet64": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet68": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet72": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet76": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet80": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet84": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet88": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet92": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet96": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet100": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet104": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet108": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet112": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet116": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet120": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
},
|
||||
"Ethernet124": {
|
||||
"default_brkout_mode": "1x40G",
|
||||
"port_type": "QSFP+"
|
||||
}
|
||||
}
|
||||
}
|
@ -1,36 +1,36 @@
|
||||
# name lanes alias index speed
|
||||
Ethernet0 9 Ethernet1 1 10000
|
||||
Ethernet1 10 Ethernet2 2 10000
|
||||
Ethernet2 11 Ethernet3 3 10000
|
||||
Ethernet3 12 Ethernet4 4 1000
|
||||
Ethernet4 13,14,15,16 Ethernet6/1 6 40000
|
||||
Ethernet8 17,18,19,20 Ethernet7/1 7 40000
|
||||
Ethernet12 21,22,23,24 Ethernet8/1 8 40000
|
||||
Ethernet16 29,30,31,32 Ethernet9/1 9 40000
|
||||
Ethernet20 25,26,27,28 Ethernet10/1 10 40000
|
||||
Ethernet24 33,34,35,36 Ethernet11/1 11 40000
|
||||
Ethernet28 37,38,39,40 Ethernet12/1 12 40000
|
||||
Ethernet32 45,46,47,48 Ethernet13/1 13 40000
|
||||
Ethernet36 41,42,43,44 Ethernet14/1 14 40000
|
||||
Ethernet40 49,50,51,52 Ethernet15/1 15 40000
|
||||
Ethernet44 53,54,55,56 Ethernet16/1 16 40000
|
||||
Ethernet48 69,70,71,72 Ethernet17/1 17 40000
|
||||
Ethernet52 65,66,67,68 Ethernet18/1 18 40000
|
||||
Ethernet56 73,74,75,76 Ethernet19/1 19 40000
|
||||
Ethernet60 77,78,79,80 Ethernet20/1 20 40000
|
||||
Ethernet64 93,94,95,96 Ethernet21/1 21 40000
|
||||
Ethernet68 89,90,91,92 Ethernet22/1 22 40000
|
||||
Ethernet72 97,98,99,100 Ethernet23/1 23 40000
|
||||
Ethernet76 101,102,103,104 Ethernet24/1 24 40000
|
||||
Ethernet80 109,110,111,112 Ethernet25/1 25 40000
|
||||
Ethernet84 105,106,107,108 Ethernet26/1 26 40000
|
||||
Ethernet88 121,122,123,124 Ethernet27/1 27 40000
|
||||
Ethernet92 125,126,127,128 Ethernet28/1 28 40000
|
||||
Ethernet96 61,62,63,64 Ethernet29 29 40000
|
||||
Ethernet100 57,58,59,60 Ethernet30 30 40000
|
||||
Ethernet104 81,82,83,84 Ethernet31 31 40000
|
||||
Ethernet108 85,86,87,88 Ethernet32 32 40000
|
||||
Ethernet112 117,118,119,120 Ethernet33 33 40000
|
||||
Ethernet116 113,114,115,116 Ethernet34 34 40000
|
||||
Ethernet120 1,2,3,4 Ethernet35 35 40000
|
||||
Ethernet124 5,6,7,8 Ethernet36 36 40000
|
||||
# name lanes alias index
|
||||
Ethernet0 9 Ethernet1 1
|
||||
Ethernet1 10 Ethernet2 2
|
||||
Ethernet2 11 Ethernet3 3
|
||||
Ethernet3 12 Ethernet4 4
|
||||
Ethernet4 13,14,15,16 Ethernet6/1 6
|
||||
Ethernet8 17,18,19,20 Ethernet7/1 7
|
||||
Ethernet12 21,22,23,24 Ethernet8/1 8
|
||||
Ethernet16 29,30,31,32 Ethernet9/1 9
|
||||
Ethernet20 25,26,27,28 Ethernet10/1 10
|
||||
Ethernet24 33,34,35,36 Ethernet11/1 11
|
||||
Ethernet28 37,38,39,40 Ethernet12/1 12
|
||||
Ethernet32 45,46,47,48 Ethernet13/1 13
|
||||
Ethernet36 41,42,43,44 Ethernet14/1 14
|
||||
Ethernet40 49,50,51,52 Ethernet15/1 15
|
||||
Ethernet44 53,54,55,56 Ethernet16/1 16
|
||||
Ethernet48 69,70,71,72 Ethernet17/1 17
|
||||
Ethernet52 65,66,67,68 Ethernet18/1 18
|
||||
Ethernet56 73,74,75,76 Ethernet19/1 19
|
||||
Ethernet60 77,78,79,80 Ethernet20/1 20
|
||||
Ethernet64 93,94,95,96 Ethernet21/1 21
|
||||
Ethernet68 89,90,91,92 Ethernet22/1 22
|
||||
Ethernet72 97,98,99,100 Ethernet23/1 23
|
||||
Ethernet76 101,102,103,104 Ethernet24/1 24
|
||||
Ethernet80 109,110,111,112 Ethernet25/1 25
|
||||
Ethernet84 105,106,107,108 Ethernet26/1 26
|
||||
Ethernet88 121,122,123,124 Ethernet27/1 27
|
||||
Ethernet92 125,126,127,128 Ethernet28/1 28
|
||||
Ethernet96 61,62,63,64 Ethernet29 29
|
||||
Ethernet100 57,58,59,60 Ethernet30 30
|
||||
Ethernet104 81,82,83,84 Ethernet31 31
|
||||
Ethernet108 85,86,87,88 Ethernet32 32
|
||||
Ethernet112 117,118,119,120 Ethernet33 33
|
||||
Ethernet116 113,114,115,116 Ethernet34 34
|
||||
Ethernet120 1,2,3,4 Ethernet35 35
|
||||
Ethernet124 5,6,7,8 Ethernet36 36
|
||||
|
@ -1,23 +1,7 @@
|
||||
{
|
||||
"chassis": {
|
||||
"name": "DCS-7050QX-32S",
|
||||
"components": [
|
||||
{
|
||||
"name": "Aboot()"
|
||||
},
|
||||
{
|
||||
"name": "Scd(addr=0000:02:00.0)"
|
||||
},
|
||||
{
|
||||
"name": "Ucd90120A(addr=4-004e)"
|
||||
},
|
||||
{
|
||||
"name": "Ucd90120A(addr=8-004e)"
|
||||
},
|
||||
{
|
||||
"name": "CrowSysCpld(addr=2-0023)"
|
||||
}
|
||||
],
|
||||
"components": [],
|
||||
"fans": [],
|
||||
"fan_drawers": [
|
||||
{
|
||||
@ -56,47 +40,46 @@
|
||||
"psus": [
|
||||
{
|
||||
"name": "psu1",
|
||||
"fans": [
|
||||
{
|
||||
"name": "psu1/1",
|
||||
"speed": {
|
||||
"controllable": false
|
||||
}
|
||||
}
|
||||
]
|
||||
"fans": []
|
||||
},
|
||||
{
|
||||
"name": "psu2",
|
||||
"fans": [
|
||||
{
|
||||
"name": "psu2/1",
|
||||
"speed": {
|
||||
"controllable": false
|
||||
}
|
||||
}
|
||||
]
|
||||
"fans": []
|
||||
}
|
||||
],
|
||||
"thermals": [
|
||||
{
|
||||
"name": "Cpu temp sensor",
|
||||
"controllable": false
|
||||
"name": "Cpu temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Cpu board temp sensor",
|
||||
"controllable": false
|
||||
"name": "Cpu board temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Back-panel temp sensor",
|
||||
"controllable": false
|
||||
"name": "Back-panel temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Board Sensor",
|
||||
"controllable": false
|
||||
"name": "Board Sensor"
|
||||
},
|
||||
{
|
||||
"name": "Front-panel temp sensor",
|
||||
"controllable": false
|
||||
"name": "Front-panel temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 1 hotspot sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 1 inlet temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 1 exhaust temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 2 hotspot sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 2 inlet temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 2 exhaust temp sensor"
|
||||
}
|
||||
],
|
||||
"sfps": [
|
||||
@ -218,12 +201,6 @@
|
||||
"1x40G[10G]": [
|
||||
"Ethernet5/1"
|
||||
],
|
||||
"3x10G(3)+1x1G(1)": [
|
||||
"Ethernet1",
|
||||
"Ethernet2",
|
||||
"Ethernet3",
|
||||
"Ethernet4"
|
||||
],
|
||||
"2x20G[10G]": [
|
||||
"Ethernet5/1",
|
||||
"Ethernet5/3"
|
||||
|
@ -11,9 +11,6 @@ bus "i2c-7" "SCD 0000:02:00.0 SMBus master 0 bus 5"
|
||||
chip "k10temp-pci-00c3"
|
||||
label temp1 "Cpu temp sensor"
|
||||
|
||||
chip "fam15h_power-pci-00c4"
|
||||
ignore power1
|
||||
|
||||
chip "max6658-i2c-2-4c"
|
||||
label temp1 "Board temp sensor"
|
||||
set temp1_max 55
|
||||
@ -37,13 +34,7 @@ chip "pmbus-i2c-6-58"
|
||||
label temp2 "Power supply 1 inlet temp sensor"
|
||||
label temp3 "Power supply 1 sensor"
|
||||
|
||||
ignore fan2
|
||||
ignore fan3
|
||||
|
||||
chip "pmbus-i2c-5-58"
|
||||
label temp1 "Power supply 2 hotspot sensor"
|
||||
label temp2 "Power supply 2 inlet temp sensor"
|
||||
label temp3 "Power supply 2 sensor"
|
||||
|
||||
ignore fan2
|
||||
ignore fan3
|
||||
|
@ -1,6 +1,4 @@
|
||||
{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %}
|
||||
{% set different_dscp_to_tc_map = true %}
|
||||
{% set different_tc_to_queue_map = true %}
|
||||
{%- macro generate_dscp_to_tc_map() %}
|
||||
"DSCP_TO_TC_MAP": {
|
||||
"AZURE": {
|
||||
@ -69,73 +67,7 @@
|
||||
"62": "1",
|
||||
"63": "1"
|
||||
},
|
||||
"AZURE_UPLINK": {
|
||||
"0" : "1",
|
||||
"1" : "1",
|
||||
"2" : "2",
|
||||
"3" : "3",
|
||||
"4" : "4",
|
||||
"5" : "1",
|
||||
"6" : "6",
|
||||
"7" : "1",
|
||||
"8" : "0",
|
||||
"9" : "1",
|
||||
"10": "1",
|
||||
"11": "1",
|
||||
"12": "1",
|
||||
"13": "1",
|
||||
"14": "1",
|
||||
"15": "1",
|
||||
"16": "1",
|
||||
"17": "1",
|
||||
"18": "1",
|
||||
"19": "1",
|
||||
"20": "1",
|
||||
"21": "1",
|
||||
"22": "1",
|
||||
"23": "1",
|
||||
"24": "1",
|
||||
"25": "1",
|
||||
"26": "1",
|
||||
"27": "1",
|
||||
"28": "1",
|
||||
"29": "1",
|
||||
"30": "1",
|
||||
"31": "1",
|
||||
"32": "1",
|
||||
"33": "8",
|
||||
"34": "1",
|
||||
"35": "1",
|
||||
"36": "1",
|
||||
"37": "1",
|
||||
"38": "1",
|
||||
"39": "1",
|
||||
"40": "1",
|
||||
"41": "1",
|
||||
"42": "1",
|
||||
"43": "1",
|
||||
"44": "1",
|
||||
"45": "1",
|
||||
"46": "5",
|
||||
"47": "1",
|
||||
"48": "7",
|
||||
"49": "1",
|
||||
"50": "1",
|
||||
"51": "1",
|
||||
"52": "1",
|
||||
"53": "1",
|
||||
"54": "1",
|
||||
"55": "1",
|
||||
"56": "1",
|
||||
"57": "1",
|
||||
"58": "1",
|
||||
"59": "1",
|
||||
"60": "1",
|
||||
"61": "1",
|
||||
"62": "1",
|
||||
"63": "1"
|
||||
},
|
||||
"AZURE_TUNNEL": {
|
||||
"AZURE_TUNNEL": {
|
||||
"0" : "1",
|
||||
"1" : "1",
|
||||
"2" : "1",
|
||||
@ -242,17 +174,6 @@
|
||||
"7": "7",
|
||||
"8": "1"
|
||||
},
|
||||
"AZURE_UPLINK": {
|
||||
"0": "0",
|
||||
"1": "1",
|
||||
"2": "2",
|
||||
"3": "3",
|
||||
"4": "4",
|
||||
"5": "5",
|
||||
"6": "6",
|
||||
"7": "7",
|
||||
"8": "1"
|
||||
},
|
||||
"AZURE_TUNNEL": {
|
||||
"0": "0",
|
||||
"1": "1",
|
||||
|
@ -1,24 +1,16 @@
|
||||
{# Construct config.bcm to include additional soc properties per specific device metadata requirement #}
|
||||
{%- set map_prio = '' -%}
|
||||
{%- set pfcwd_sock = '' -%}
|
||||
{%- if DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['subtype'] is defined -%}
|
||||
{%- set switch_subtype = DEVICE_METADATA['localhost']['subtype'] -%}
|
||||
{%- if 'dualtor' in switch_subtype.lower() %}
|
||||
{%- set map_prio = 'sai_remap_prio_on_tnl_egress=1' -%}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if SYSTEM_DEFAULTS is defined and 'tunnel_qos_remap' in SYSTEM_DEFAULTS and SYSTEM_DEFAULTS['tunnel_qos_remap']['status'] == 'enabled' -%}
|
||||
{%- set pfcwd_sock =
|
||||
'hybrid_pfc_deadlock_enable=1
|
||||
pfc_deadlock_seq_control=1
|
||||
sai_pfc_dlr_init_capability=1' -%}
|
||||
{%- endif %}
|
||||
sai_load_hw_config=/etc/bcm/flex/bcm56870_a0_premium_issu/b870.6.4.1/
|
||||
l3_alpm_hit_skip=1
|
||||
sai_adjust_acl_drop_in_rx_drop=1
|
||||
sai_verify_incoming_chksum=0
|
||||
{{ map_prio }}
|
||||
{{ pfcwd_sock }}
|
||||
host_as_route_disable=1
|
||||
use_all_splithorizon_groups=1
|
||||
riot_enable=1
|
||||
|
@ -1,6 +1,4 @@
|
||||
{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %}
|
||||
{% set different_dscp_to_tc_map = true %}
|
||||
{% set different_tc_to_queue_map = true %}
|
||||
{%- macro generate_dscp_to_tc_map() %}
|
||||
"DSCP_TO_TC_MAP": {
|
||||
"AZURE": {
|
||||
@ -69,73 +67,7 @@
|
||||
"62": "1",
|
||||
"63": "1"
|
||||
},
|
||||
"AZURE_UPLINK": {
|
||||
"0" : "1",
|
||||
"1" : "1",
|
||||
"2" : "2",
|
||||
"3" : "3",
|
||||
"4" : "4",
|
||||
"5" : "1",
|
||||
"6" : "6",
|
||||
"7" : "1",
|
||||
"8" : "0",
|
||||
"9" : "1",
|
||||
"10": "1",
|
||||
"11": "1",
|
||||
"12": "1",
|
||||
"13": "1",
|
||||
"14": "1",
|
||||
"15": "1",
|
||||
"16": "1",
|
||||
"17": "1",
|
||||
"18": "1",
|
||||
"19": "1",
|
||||
"20": "1",
|
||||
"21": "1",
|
||||
"22": "1",
|
||||
"23": "1",
|
||||
"24": "1",
|
||||
"25": "1",
|
||||
"26": "1",
|
||||
"27": "1",
|
||||
"28": "1",
|
||||
"29": "1",
|
||||
"30": "1",
|
||||
"31": "1",
|
||||
"32": "1",
|
||||
"33": "8",
|
||||
"34": "1",
|
||||
"35": "1",
|
||||
"36": "1",
|
||||
"37": "1",
|
||||
"38": "1",
|
||||
"39": "1",
|
||||
"40": "1",
|
||||
"41": "1",
|
||||
"42": "1",
|
||||
"43": "1",
|
||||
"44": "1",
|
||||
"45": "1",
|
||||
"46": "5",
|
||||
"47": "1",
|
||||
"48": "7",
|
||||
"49": "1",
|
||||
"50": "1",
|
||||
"51": "1",
|
||||
"52": "1",
|
||||
"53": "1",
|
||||
"54": "1",
|
||||
"55": "1",
|
||||
"56": "1",
|
||||
"57": "1",
|
||||
"58": "1",
|
||||
"59": "1",
|
||||
"60": "1",
|
||||
"61": "1",
|
||||
"62": "1",
|
||||
"63": "1"
|
||||
},
|
||||
"AZURE_TUNNEL": {
|
||||
"AZURE_TUNNEL": {
|
||||
"0" : "1",
|
||||
"1" : "1",
|
||||
"2" : "1",
|
||||
@ -242,17 +174,6 @@
|
||||
"7": "7",
|
||||
"8": "1"
|
||||
},
|
||||
"AZURE_UPLINK": {
|
||||
"0": "0",
|
||||
"1": "1",
|
||||
"2": "2",
|
||||
"3": "3",
|
||||
"4": "4",
|
||||
"5": "5",
|
||||
"6": "6",
|
||||
"7": "7",
|
||||
"8": "1"
|
||||
},
|
||||
"AZURE_TUNNEL": {
|
||||
"0": "0",
|
||||
"1": "1",
|
||||
|
@ -1,24 +1,16 @@
|
||||
{# Construct config.bcm to include additional soc properties per specific device metadata requirement #}
|
||||
{%- set map_prio = '' -%}
|
||||
{%- set pfcwd_sock = '' -%}
|
||||
{%- if DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['subtype'] is defined -%}
|
||||
{%- set switch_subtype = DEVICE_METADATA['localhost']['subtype'] -%}
|
||||
{%- if 'dualtor' in switch_subtype.lower() %}
|
||||
{%- set map_prio = 'sai_remap_prio_on_tnl_egress=1' -%}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if SYSTEM_DEFAULTS is defined and 'tunnel_qos_remap' in SYSTEM_DEFAULTS and SYSTEM_DEFAULTS['tunnel_qos_remap']['status'] == 'enabled' -%}
|
||||
{%- set pfcwd_sock =
|
||||
'hybrid_pfc_deadlock_enable=1
|
||||
pfc_deadlock_seq_control=1
|
||||
sai_pfc_dlr_init_capability=1' -%}
|
||||
{%- endif %}
|
||||
sai_load_hw_config=/etc/bcm/flex/bcm56870_a0_premium_issu/b870.6.4.1/
|
||||
l3_alpm_hit_skip=1
|
||||
sai_adjust_acl_drop_in_rx_drop=1
|
||||
sai_verify_incoming_chksum=0
|
||||
{{ map_prio }}
|
||||
{{ pfcwd_sock }}
|
||||
host_as_route_disable=1
|
||||
use_all_splithorizon_groups=1
|
||||
riot_enable=1
|
||||
|
@ -3,7 +3,7 @@
|
||||
"DCS-7050CX3-32S": {
|
||||
"component": {
|
||||
"Aboot()": {},
|
||||
"Scd(addr=0000:02:00.0)": {},
|
||||
"Scd(addr=0000:00:18.7)": {},
|
||||
"Ucd90120A(addr=3-004e)": {},
|
||||
"Ucd90120A(addr=16-004e)": {},
|
||||
"CrowSysCpld(addr=2-0023)": {}
|
||||
|
@ -10,9 +10,6 @@ bus "i2c-14" "SCD 0000:02:00.0 SMBus master 1 bus 4"
|
||||
chip "k10temp-pci-00c3"
|
||||
label temp1 "Cpu temp sensor"
|
||||
|
||||
chip "fam15h_power-pci-00c4"
|
||||
ignore power1
|
||||
|
||||
chip "max6658-i2c-2-4c"
|
||||
label temp1 "Cpu board temp sensor"
|
||||
set temp1_max 75
|
||||
@ -36,13 +33,7 @@ chip "pmbus-i2c-13-58"
|
||||
label temp2 "Power supply 1 inlet temp sensor"
|
||||
label temp3 "Power supply 1 sensor"
|
||||
|
||||
ignore fan2
|
||||
ignore fan3
|
||||
|
||||
chip "pmbus-i2c-14-58"
|
||||
label temp1 "Power supply 2 hotspot sensor"
|
||||
label temp2 "Power supply 2 inlet temp sensor"
|
||||
label temp3 "Power supply 2 sensor"
|
||||
|
||||
ignore fan2
|
||||
ignore fan3
|
||||
|
@ -152,6 +152,11 @@
|
||||
id: '1466'
|
||||
name: 'Host bridge: Advanced Micro Devices, Inc. [AMD] Family 17h (Models 00h-0fh)
|
||||
Data Fabric: Device 18h; Function 6'
|
||||
- bus: '00'
|
||||
dev: '18'
|
||||
fn: '7'
|
||||
id: '0001'
|
||||
name: 'Host bridge: Arastra Inc. Device 0001'
|
||||
- bus: '01'
|
||||
dev: '00'
|
||||
fn: '0'
|
||||
|
@ -1,2 +1,2 @@
|
||||
SYNCD_SHM_SIZE=512m
|
||||
SYNCD_SHM_SIZE=256m
|
||||
is_ltsw_chip=1
|
||||
|
@ -152,6 +152,11 @@
|
||||
id: '1466'
|
||||
name: 'Host bridge: Advanced Micro Devices, Inc. [AMD] Family 17h (Models 00h-0fh)
|
||||
Data Fabric: Device 18h; Function 6'
|
||||
- bus: '00'
|
||||
dev: '18'
|
||||
fn: '7'
|
||||
id: '0001'
|
||||
name: 'Host bridge: Arastra Inc. Device 0001'
|
||||
- bus: '01'
|
||||
dev: '00'
|
||||
fn: '0'
|
||||
|
@ -1,2 +1,2 @@
|
||||
SYNCD_SHM_SIZE=512m
|
||||
SYNCD_SHM_SIZE=256m
|
||||
is_ltsw_chip=1
|
||||
|
@ -64,6 +64,11 @@
|
||||
id: '1578'
|
||||
name: 'Encryption controller: Advanced Micro Devices, Inc. [AMD] Carrizo Platform
|
||||
Security Processor'
|
||||
- bus: '00'
|
||||
dev: 09
|
||||
fn: '0'
|
||||
id: '0001'
|
||||
name: 'Host bridge: Arastra Inc. Device 0001'
|
||||
- bus: '00'
|
||||
dev: 09
|
||||
fn: '2'
|
||||
|
@ -64,6 +64,11 @@
|
||||
id: '1578'
|
||||
name: 'Encryption controller: Advanced Micro Devices, Inc. [AMD] Carrizo Platform
|
||||
Security Processor'
|
||||
- bus: '00'
|
||||
dev: 09
|
||||
fn: '0'
|
||||
id: '0001'
|
||||
name: 'Host bridge: Arastra Inc. Device 0001'
|
||||
- bus: '00'
|
||||
dev: 09
|
||||
fn: '2'
|
||||
|
@ -449,4 +449,3 @@ serdes_preemphasis_109=0x145c00
|
||||
|
||||
mmu_init_config="MSFT-TH-Tier1"
|
||||
phy_an_lt_msft=1
|
||||
phy_unlos_msft=1
|
||||
|
@ -1,23 +1,7 @@
|
||||
{
|
||||
"chassis": {
|
||||
"name": "DCS-7060CX-32S",
|
||||
"components": [
|
||||
{
|
||||
"name": "Aboot()"
|
||||
},
|
||||
{
|
||||
"name": "Scd(addr=0000:02:00.0)"
|
||||
},
|
||||
{
|
||||
"name": "Ucd90120A(addr=4-004e)"
|
||||
},
|
||||
{
|
||||
"name": "Ucd90120A(addr=8-004e)"
|
||||
},
|
||||
{
|
||||
"name": "CrowSysCpld(addr=2-0023)"
|
||||
}
|
||||
],
|
||||
"components": [],
|
||||
"fans": [],
|
||||
"fan_drawers": [
|
||||
{
|
||||
@ -56,55 +40,52 @@
|
||||
"psus": [
|
||||
{
|
||||
"name": "psu1",
|
||||
"fans": [
|
||||
{
|
||||
"name": "psu1/1",
|
||||
"speed": {
|
||||
"controllable": false
|
||||
}
|
||||
}
|
||||
]
|
||||
"fans": []
|
||||
},
|
||||
{
|
||||
"name": "psu2",
|
||||
"fans": [
|
||||
{
|
||||
"name": "psu2/1",
|
||||
"speed": {
|
||||
"controllable": false
|
||||
}
|
||||
}
|
||||
]
|
||||
"fans": []
|
||||
}
|
||||
],
|
||||
"thermals": [
|
||||
{
|
||||
"name": "Cpu temp sensor",
|
||||
"controllable": false
|
||||
"name": "Cpu temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Cpu board temp sensor",
|
||||
"controllable": false
|
||||
"name": "Cpu board temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Back-panel temp sensor",
|
||||
"controllable": false
|
||||
"name": "Back-panel temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Board sensor",
|
||||
"controllable": false
|
||||
"name": "Board sensor"
|
||||
},
|
||||
{
|
||||
"name": "Switch chip left sensor",
|
||||
"controllable": false
|
||||
"name": "Switch chip left sensor"
|
||||
},
|
||||
{
|
||||
"name": "Switch chip right sensor",
|
||||
"controllable": false
|
||||
"name": "Switch chip right sensor"
|
||||
},
|
||||
{
|
||||
"name": "Front-panel temp sensor",
|
||||
"controllable": false
|
||||
"name": "Front-panel temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 1 hotspot sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 1 inlet temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 1 exhaust temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 2 hotspot sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 2 inlet temp sensor"
|
||||
},
|
||||
{
|
||||
"name": "Power supply 2 exhaust temp sensor"
|
||||
}
|
||||
],
|
||||
"sfps": [
|
||||
|
@ -1,13 +0,0 @@
|
||||
{
|
||||
"chassis": {
|
||||
"DCS-7060CX-32S": {
|
||||
"component": {
|
||||
"Aboot()": {},
|
||||
"Scd(addr=0000:02:00.0)": {},
|
||||
"Ucd90120A(addr=4-004e)": {},
|
||||
"Ucd90120A(addr=8-004e)": {},
|
||||
"CrowSysCpld(addr=2-0023)": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -11,9 +11,6 @@ bus "i2c-7" "SCD 0000:02:00.0 SMBus master 0 bus 5"
|
||||
chip "k10temp-pci-00c3"
|
||||
label temp1 "Cpu temp sensor"
|
||||
|
||||
chip "fam15h_power-pci-00c4"
|
||||
ignore power1
|
||||
|
||||
chip "max6697-i2c-2-1a"
|
||||
label temp1 "Board sensor"
|
||||
set temp1_max 95
|
||||
@ -48,13 +45,7 @@ chip "pmbus-i2c-6-58"
|
||||
label temp2 "Power supply 1 inlet temp sensor"
|
||||
label temp3 "Power supply 1 exhaust temp sensor"
|
||||
|
||||
ignore fan2
|
||||
ignore fan3
|
||||
|
||||
chip "pmbus-i2c-5-58"
|
||||
label temp1 "Power supply 2 hotspot sensor"
|
||||
label temp2 "Power supply 2 inlet temp sensor"
|
||||
label temp3 "Power supply 2 exhaust temp sensor"
|
||||
|
||||
ignore fan2
|
||||
ignore fan3
|
||||
|
@ -16,8 +16,6 @@ miim_intr_enable.0=0
|
||||
module_64ports.0=1
|
||||
multicast_l2_range.0=511
|
||||
oversubscribe_mode=1
|
||||
sai_tunnel_global_sip_mask_enable=1
|
||||
bcm_tunnel_term_compatible_mode=1
|
||||
parity_correction=1
|
||||
parity_enable=1
|
||||
pbmp_xport_xe.0=0x3ffffffffffffffffffffffffffffffffffffffe
|
||||
|
@ -186,6 +186,11 @@
|
||||
id: 6f37
|
||||
name: 'Performance counters: Intel Corporation Xeon E7 v4/Xeon E5 v4/Xeon E3 v4/Xeon
|
||||
D R3 QPI Link 0/1 (rev 03)'
|
||||
- bus: ff
|
||||
dev: 0b
|
||||
fn: '3'
|
||||
id: '0001'
|
||||
name: 'System peripheral: Arastra Inc. Device 0001 (rev 03)'
|
||||
- bus: ff
|
||||
dev: 0c
|
||||
fn: '0'
|
||||
|
@ -16,8 +16,6 @@ miim_intr_enable.0=0
|
||||
module_64ports.0=1
|
||||
multicast_l2_range.0=511
|
||||
oversubscribe_mode=1
|
||||
sai_tunnel_global_sip_mask_enable=1
|
||||
bcm_tunnel_term_compatible_mode=1
|
||||
parity_correction=1
|
||||
parity_enable=1
|
||||
pbmp_xport_xe.0=0x3ffffffffffffffffffffffffffffffffffffffe
|
||||
|
@ -1,104 +0,0 @@
|
||||
{
|
||||
"interfaces": {
|
||||
"Ethernet0": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet8": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet16": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet24": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet32": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet40": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet48": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet56": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet64": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet72": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet80": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet88": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet96": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet104": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet112": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet120": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet128": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet136": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet144": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet152": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet160": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet168": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet176": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet184": {
|
||||
"default_brkout_mode": "2x200G[100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet192": {
|
||||
"default_brkout_mode": "4x100G[50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet200": {
|
||||
"default_brkout_mode": "4x100G[50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet208": {
|
||||
"default_brkout_mode": "4x100G[50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet216": {
|
||||
"default_brkout_mode": "4x100G[50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet224": {
|
||||
"default_brkout_mode": "4x100G[50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet232": {
|
||||
"default_brkout_mode": "4x100G[50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet240": {
|
||||
"default_brkout_mode": "4x100G[50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet248": {
|
||||
"default_brkout_mode": "4x100G[50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet256": {
|
||||
"default_brkout_mode": "1x10G",
|
||||
"fec": "none"
|
||||
}
|
||||
}
|
||||
}
|
@ -1 +0,0 @@
|
||||
SAI_INIT_CONFIG_FILE=/usr/share/sonic/hwsku/th4-a7060dx5-32-200Gx48-100Gx32.config.bcm
|
@ -1,879 +0,0 @@
|
||||
# configuration yaml file
|
||||
# device:
|
||||
# <unit>:
|
||||
# <table>:
|
||||
# ?
|
||||
# <key_fld_1>: <value>
|
||||
# <key_fld_2>: <value>
|
||||
# ...
|
||||
# <key_fld_n>: <value>
|
||||
# :
|
||||
# <data_fld_1>: <value>
|
||||
# <data_fld_2>: <value>
|
||||
# ...
|
||||
# <data_fld_n>: <value>
|
||||
#
|
||||
|
||||
---
|
||||
bcm_device:
|
||||
0:
|
||||
global:
|
||||
pktio_mode: 1
|
||||
vlan_flooding_l2mc_num_reserved: 0
|
||||
ipv6_lpm_128b_enable: 1
|
||||
shared_block_mask_section: uc_bc
|
||||
skip_protocol_default_entries: 1
|
||||
# LTSW uses value 1 for ALPM combined mode
|
||||
l3_alpm_template: 1
|
||||
l3_alpm_hit_skip: 1
|
||||
sai_feat_tail_timestamp : 1
|
||||
sai_field_group_auto_prioritize: 1
|
||||
#l3_intf_vlan_split_egress for MTU at L3IF
|
||||
l3_intf_vlan_split_egress : 1
|
||||
|
||||
# vxlan
|
||||
l3_alpm_template: 1
|
||||
riot_overlay_l3_egress_mem_size: 16384
|
||||
riot_overlay_l3_intf_mem_size: 4096
|
||||
l3_ecmp_member_first_lkup_mem_size: 12288
|
||||
bcm_tunnel_term_compatible_mode: 1
|
||||
shared_l2_tunnel: 1
|
||||
sai_tunnel_support: 10
|
||||
sai_tunnel_underlay_route_mode: 2
|
||||
sai_tunnel_ecmp_sharing_mode: 0 # change to 2 if SAI_NEXT_HOP_GROUP_ATTR_LEVEL_1 used
|
||||
|
||||
# bfd
|
||||
#bfd_enable: 1
|
||||
#bfd_sha1_keys: 5
|
||||
#bfd_num_sessions: 2048
|
||||
#bfd_simple_password_keys: 5
|
||||
#num_queues_pci: 47
|
||||
#num_queues_uc0: 1
|
||||
#bfd_feature_enable: 1
|
||||
#bfd_use_endpoint_id_as_discriminator: 1
|
||||
#bfd_tx_raw_ingress_enable: 1
|
||||
#sai_eapp_config_file: "/usr/share/sonic/device/x86_64-broadcom_common/eapps/eapp_config.json"
|
||||
---
|
||||
device:
|
||||
0:
|
||||
PC_SERDES_CONFIG:
|
||||
PKG_SWAP_BYPASS: 1
|
||||
|
||||
PC_PM_CORE:
|
||||
?
|
||||
PC_PM_ID: 1
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x67235401
|
||||
TX_LANE_MAP: 0x2731465
|
||||
RX_POLARITY_FLIP: 0x00
|
||||
TX_POLARITY_FLIP: 0x00
|
||||
?
|
||||
PC_PM_ID: 2
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x31752064
|
||||
TX_LANE_MAP: 0x76025314
|
||||
RX_POLARITY_FLIP: 0xff
|
||||
TX_POLARITY_FLIP: 0xff
|
||||
?
|
||||
PC_PM_ID: 3
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x67241503
|
||||
TX_LANE_MAP: 0x13650274
|
||||
RX_POLARITY_FLIP: 0x00
|
||||
TX_POLARITY_FLIP: 0x01
|
||||
?
|
||||
PC_PM_ID: 4
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x46025713
|
||||
TX_LANE_MAP: 0x31650274
|
||||
RX_POLARITY_FLIP: 0x00
|
||||
TX_POLARITY_FLIP: 0x00
|
||||
?
|
||||
PC_PM_ID: 5
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x62713054
|
||||
TX_LANE_MAP: 0x21703465
|
||||
RX_POLARITY_FLIP: 0x00
|
||||
TX_POLARITY_FLIP: 0xd8
|
||||
?
|
||||
PC_PM_ID: 6
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x13460257
|
||||
TX_LANE_MAP: 0x64137502
|
||||
RX_POLARITY_FLIP: 0xff
|
||||
TX_POLARITY_FLIP: 0xff
|
||||
?
|
||||
PC_PM_ID: 7
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x43512670
|
||||
TX_LANE_MAP: 0x2571364
|
||||
RX_POLARITY_FLIP: 0x00
|
||||
TX_POLARITY_FLIP: 0x00
|
||||
?
|
||||
PC_PM_ID: 8
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x14270356
|
||||
TX_LANE_MAP: 0x64237501
|
||||
RX_POLARITY_FLIP: 0xfb
|
||||
TX_POLARITY_FLIP: 0xff
|
||||
?
|
||||
PC_PM_ID: 9
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x76140235
|
||||
TX_LANE_MAP: 0x74036521
|
||||
RX_POLARITY_FLIP: 0xc1
|
||||
TX_POLARITY_FLIP: 0x00
|
||||
?
|
||||
PC_PM_ID: 10
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x47125603
|
||||
TX_LANE_MAP: 0x30451276
|
||||
RX_POLARITY_FLIP: 0xff
|
||||
TX_POLARITY_FLIP: 0xff
|
||||
?
|
||||
PC_PM_ID: 11
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x76041235
|
||||
TX_LANE_MAP: 0x74036512
|
||||
RX_POLARITY_FLIP: 0x40
|
||||
TX_POLARITY_FLIP: 0x00
|
||||
?
|
||||
PC_PM_ID: 12
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x27145603
|
||||
TX_LANE_MAP: 0x21450376
|
||||
RX_POLARITY_FLIP: 0xff
|
||||
TX_POLARITY_FLIP: 0xff
|
||||
?
|
||||
PC_PM_ID: 13
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x2735146
|
||||
TX_LANE_MAP: 0x3657421
|
||||
RX_POLARITY_FLIP: 0x00
|
||||
TX_POLARITY_FLIP: 0xc8
|
||||
?
|
||||
PC_PM_ID: 14
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x47306521
|
||||
TX_LANE_MAP: 0x12643075
|
||||
RX_POLARITY_FLIP: 0xff
|
||||
TX_POLARITY_FLIP: 0x33
|
||||
?
|
||||
PC_PM_ID: 15
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x20157436
|
||||
TX_LANE_MAP: 0x56237104
|
||||
RX_POLARITY_FLIP: 0x08
|
||||
TX_POLARITY_FLIP: 0x00
|
||||
?
|
||||
PC_PM_ID: 16
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x14270356
|
||||
TX_LANE_MAP: 0x76325014
|
||||
RX_POLARITY_FLIP: 0xff
|
||||
TX_POLARITY_FLIP: 0xff
|
||||
?
|
||||
PC_PM_ID: 49
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x56704312
|
||||
TX_LANE_MAP: 0x32641570
|
||||
RX_POLARITY_FLIP: 0xff
|
||||
TX_POLARITY_FLIP: 0xff
|
||||
?
|
||||
PC_PM_ID: 50
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x21564073
|
||||
TX_LANE_MAP: 0x50314762
|
||||
RX_POLARITY_FLIP: 0x00
|
||||
TX_POLARITY_FLIP: 0x00
|
||||
?
|
||||
PC_PM_ID: 51
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x36715402
|
||||
TX_LANE_MAP: 0x32641075
|
||||
RX_POLARITY_FLIP: 0xff
|
||||
TX_POLARITY_FLIP: 0xff
|
||||
?
|
||||
PC_PM_ID: 52
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x51462073
|
||||
TX_LANE_MAP: 0x5127634
|
||||
RX_POLARITY_FLIP: 0x69
|
||||
TX_POLARITY_FLIP: 0x08
|
||||
?
|
||||
PC_PM_ID: 53
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x73204651
|
||||
TX_LANE_MAP: 0x32640175
|
||||
RX_POLARITY_FLIP: 0x69
|
||||
TX_POLARITY_FLIP: 0xff
|
||||
?
|
||||
PC_PM_ID: 54
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x5134627
|
||||
TX_LANE_MAP: 0x60137254
|
||||
RX_POLARITY_FLIP: 0x00
|
||||
TX_POLARITY_FLIP: 0x00
|
||||
?
|
||||
PC_PM_ID: 55
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x57420163
|
||||
TX_LANE_MAP: 0x32651074
|
||||
RX_POLARITY_FLIP: 0xff
|
||||
TX_POLARITY_FLIP: 0xff
|
||||
?
|
||||
PC_PM_ID: 56
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x5134627
|
||||
TX_LANE_MAP: 0x10524376
|
||||
RX_POLARITY_FLIP: 0x05
|
||||
TX_POLARITY_FLIP: 0x00
|
||||
?
|
||||
PC_PM_ID: 57
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x73520146
|
||||
TX_LANE_MAP: 0x31650274
|
||||
RX_POLARITY_FLIP: 0x3d
|
||||
TX_POLARITY_FLIP: 0xff
|
||||
?
|
||||
PC_PM_ID: 58
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x16072354
|
||||
TX_LANE_MAP: 0x51634270
|
||||
RX_POLARITY_FLIP: 0x01
|
||||
TX_POLARITY_FLIP: 0x00
|
||||
?
|
||||
PC_PM_ID: 59
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x72530164
|
||||
TX_LANE_MAP: 0x32506147
|
||||
RX_POLARITY_FLIP: 0xff
|
||||
TX_POLARITY_FLIP: 0xff
|
||||
?
|
||||
PC_PM_ID: 60
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x4371256
|
||||
TX_LANE_MAP: 0x21673054
|
||||
RX_POLARITY_FLIP: 0x00
|
||||
TX_POLARITY_FLIP: 0xcc
|
||||
?
|
||||
PC_PM_ID: 61
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x67241503
|
||||
TX_LANE_MAP: 0x12730465
|
||||
RX_POLARITY_FLIP: 0x20
|
||||
TX_POLARITY_FLIP: 0x00
|
||||
?
|
||||
PC_PM_ID: 62
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x73625140
|
||||
TX_LANE_MAP: 0x12640573
|
||||
RX_POLARITY_FLIP: 0x00
|
||||
TX_POLARITY_FLIP: 0x00
|
||||
?
|
||||
PC_PM_ID: 63
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x23674015
|
||||
TX_LANE_MAP: 0x57206143
|
||||
RX_POLARITY_FLIP: 0xff
|
||||
TX_POLARITY_FLIP: 0xff
|
||||
?
|
||||
PC_PM_ID: 64
|
||||
CORE_INDEX: 0
|
||||
:
|
||||
RX_LANE_MAP_AUTO: 0
|
||||
TX_LANE_MAP_AUTO: 0
|
||||
RX_POLARITY_FLIP_AUTO: 0
|
||||
TX_POLARITY_FLIP_AUTO: 0
|
||||
RX_LANE_MAP: 0x57314260
|
||||
TX_LANE_MAP: 0x1735264
|
||||
RX_POLARITY_FLIP: 0x10
|
||||
TX_POLARITY_FLIP: 0x00
|
||||
...
|
||||
---
|
||||
device:
|
||||
0:
|
||||
PC_PORT_PHYS_MAP:
|
||||
?
|
||||
PORT_ID: 0
|
||||
:
|
||||
PC_PHYS_PORT_ID: 0
|
||||
?
|
||||
PORT_ID: 1
|
||||
:
|
||||
PC_PHYS_PORT_ID: 1
|
||||
?
|
||||
PORT_ID: 2
|
||||
:
|
||||
PC_PHYS_PORT_ID: 3
|
||||
?
|
||||
PORT_ID: 5
|
||||
:
|
||||
PC_PHYS_PORT_ID: 5
|
||||
?
|
||||
PORT_ID: 6
|
||||
:
|
||||
PC_PHYS_PORT_ID: 7
|
||||
?
|
||||
PORT_ID: 9
|
||||
:
|
||||
PC_PHYS_PORT_ID: 9
|
||||
?
|
||||
PORT_ID: 10
|
||||
:
|
||||
PC_PHYS_PORT_ID: 11
|
||||
?
|
||||
PORT_ID: 13
|
||||
:
|
||||
PC_PHYS_PORT_ID: 13
|
||||
?
|
||||
PORT_ID: 14
|
||||
:
|
||||
PC_PHYS_PORT_ID: 15
|
||||
?
|
||||
PORT_ID: 17
|
||||
:
|
||||
PC_PHYS_PORT_ID: 17
|
||||
?
|
||||
PORT_ID: 18
|
||||
:
|
||||
PC_PHYS_PORT_ID: 19
|
||||
?
|
||||
PORT_ID: 21
|
||||
:
|
||||
PC_PHYS_PORT_ID: 21
|
||||
?
|
||||
PORT_ID: 22
|
||||
:
|
||||
PC_PHYS_PORT_ID: 23
|
||||
?
|
||||
PORT_ID: 25
|
||||
:
|
||||
PC_PHYS_PORT_ID: 25
|
||||
?
|
||||
PORT_ID: 26
|
||||
:
|
||||
PC_PHYS_PORT_ID: 27
|
||||
?
|
||||
PORT_ID: 29
|
||||
:
|
||||
PC_PHYS_PORT_ID: 29
|
||||
?
|
||||
PORT_ID: 30
|
||||
:
|
||||
PC_PHYS_PORT_ID: 31
|
||||
?
|
||||
PORT_ID: 33
|
||||
:
|
||||
PC_PHYS_PORT_ID: 259
|
||||
?
|
||||
PORT_ID: 34
|
||||
:
|
||||
PC_PHYS_PORT_ID: 33
|
||||
?
|
||||
PORT_ID: 35
|
||||
:
|
||||
PC_PHYS_PORT_ID: 35
|
||||
?
|
||||
PORT_ID: 38
|
||||
:
|
||||
PC_PHYS_PORT_ID: 37
|
||||
?
|
||||
PORT_ID: 39
|
||||
:
|
||||
PC_PHYS_PORT_ID: 39
|
||||
?
|
||||
PORT_ID: 42
|
||||
:
|
||||
PC_PHYS_PORT_ID: 41
|
||||
?
|
||||
PORT_ID: 43
|
||||
:
|
||||
PC_PHYS_PORT_ID: 43
|
||||
?
|
||||
PORT_ID: 46
|
||||
:
|
||||
PC_PHYS_PORT_ID: 45
|
||||
?
|
||||
PORT_ID: 47
|
||||
:
|
||||
PC_PHYS_PORT_ID: 47
|
||||
?
|
||||
PORT_ID: 50
|
||||
:
|
||||
PC_PHYS_PORT_ID: 258
|
||||
?
|
||||
PORT_ID: 51
|
||||
:
|
||||
PC_PHYS_PORT_ID: 49
|
||||
?
|
||||
PORT_ID: 52
|
||||
:
|
||||
PC_PHYS_PORT_ID: 51
|
||||
?
|
||||
PORT_ID: 55
|
||||
:
|
||||
PC_PHYS_PORT_ID: 53
|
||||
?
|
||||
PORT_ID: 56
|
||||
:
|
||||
PC_PHYS_PORT_ID: 55
|
||||
?
|
||||
PORT_ID: 59
|
||||
:
|
||||
PC_PHYS_PORT_ID: 57
|
||||
?
|
||||
PORT_ID: 60
|
||||
:
|
||||
PC_PHYS_PORT_ID: 59
|
||||
?
|
||||
PORT_ID: 63
|
||||
:
|
||||
PC_PHYS_PORT_ID: 61
|
||||
?
|
||||
PORT_ID: 64
|
||||
:
|
||||
PC_PHYS_PORT_ID: 63
|
||||
?
|
||||
PORT_ID: 67
|
||||
:
|
||||
PC_PHYS_PORT_ID: 260
|
||||
?
|
||||
PORT_ID: 204
|
||||
:
|
||||
PC_PHYS_PORT_ID: 193
|
||||
?
|
||||
PORT_ID: 205
|
||||
:
|
||||
PC_PHYS_PORT_ID: 195
|
||||
?
|
||||
PORT_ID: 208
|
||||
:
|
||||
PC_PHYS_PORT_ID: 197
|
||||
?
|
||||
PORT_ID: 209
|
||||
:
|
||||
PC_PHYS_PORT_ID: 199
|
||||
?
|
||||
PORT_ID: 212
|
||||
:
|
||||
PC_PHYS_PORT_ID: 201
|
||||
?
|
||||
PORT_ID: 213
|
||||
:
|
||||
PC_PHYS_PORT_ID: 203
|
||||
?
|
||||
PORT_ID: 216
|
||||
:
|
||||
PC_PHYS_PORT_ID: 205
|
||||
?
|
||||
PORT_ID: 217
|
||||
:
|
||||
PC_PHYS_PORT_ID: 207
|
||||
?
|
||||
PORT_ID: 221
|
||||
:
|
||||
PC_PHYS_PORT_ID: 209
|
||||
?
|
||||
PORT_ID: 222
|
||||
:
|
||||
PC_PHYS_PORT_ID: 211
|
||||
?
|
||||
PORT_ID: 225
|
||||
:
|
||||
PC_PHYS_PORT_ID: 213
|
||||
?
|
||||
PORT_ID: 226
|
||||
:
|
||||
PC_PHYS_PORT_ID: 215
|
||||
?
|
||||
PORT_ID: 229
|
||||
:
|
||||
PC_PHYS_PORT_ID: 217
|
||||
?
|
||||
PORT_ID: 230
|
||||
:
|
||||
PC_PHYS_PORT_ID: 219
|
||||
?
|
||||
PORT_ID: 233
|
||||
:
|
||||
PC_PHYS_PORT_ID: 221
|
||||
?
|
||||
PORT_ID: 234
|
||||
:
|
||||
PC_PHYS_PORT_ID: 223
|
||||
?
|
||||
PORT_ID: 237
|
||||
:
|
||||
PC_PHYS_PORT_ID: 265
|
||||
?
|
||||
PORT_ID: 238
|
||||
:
|
||||
PC_PHYS_PORT_ID: 225
|
||||
?
|
||||
PORT_ID: 239
|
||||
:
|
||||
PC_PHYS_PORT_ID: 226
|
||||
?
|
||||
PORT_ID: 240
|
||||
:
|
||||
PC_PHYS_PORT_ID: 227
|
||||
?
|
||||
PORT_ID: 241
|
||||
:
|
||||
PC_PHYS_PORT_ID: 228
|
||||
?
|
||||
PORT_ID: 242
|
||||
:
|
||||
PC_PHYS_PORT_ID: 229
|
||||
?
|
||||
PORT_ID: 243
|
||||
:
|
||||
PC_PHYS_PORT_ID: 230
|
||||
?
|
||||
PORT_ID: 244
|
||||
:
|
||||
PC_PHYS_PORT_ID: 231
|
||||
?
|
||||
PORT_ID: 245
|
||||
:
|
||||
PC_PHYS_PORT_ID: 232
|
||||
?
|
||||
PORT_ID: 246
|
||||
:
|
||||
PC_PHYS_PORT_ID: 233
|
||||
?
|
||||
PORT_ID: 247
|
||||
:
|
||||
PC_PHYS_PORT_ID: 234
|
||||
?
|
||||
PORT_ID: 248
|
||||
:
|
||||
PC_PHYS_PORT_ID: 235
|
||||
?
|
||||
PORT_ID: 249
|
||||
:
|
||||
PC_PHYS_PORT_ID: 236
|
||||
?
|
||||
PORT_ID: 250
|
||||
:
|
||||
PC_PHYS_PORT_ID: 237
|
||||
?
|
||||
PORT_ID: 251
|
||||
:
|
||||
PC_PHYS_PORT_ID: 238
|
||||
?
|
||||
PORT_ID: 252
|
||||
:
|
||||
PC_PHYS_PORT_ID: 239
|
||||
?
|
||||
PORT_ID: 253
|
||||
:
|
||||
PC_PHYS_PORT_ID: 240
|
||||
?
|
||||
PORT_ID: 255
|
||||
:
|
||||
PC_PHYS_PORT_ID: 241
|
||||
?
|
||||
PORT_ID: 256
|
||||
:
|
||||
PC_PHYS_PORT_ID: 242
|
||||
?
|
||||
PORT_ID: 257
|
||||
:
|
||||
PC_PHYS_PORT_ID: 243
|
||||
?
|
||||
PORT_ID: 258
|
||||
:
|
||||
PC_PHYS_PORT_ID: 244
|
||||
?
|
||||
PORT_ID: 259
|
||||
:
|
||||
PC_PHYS_PORT_ID: 245
|
||||
?
|
||||
PORT_ID: 260
|
||||
:
|
||||
PC_PHYS_PORT_ID: 246
|
||||
?
|
||||
PORT_ID: 261
|
||||
:
|
||||
PC_PHYS_PORT_ID: 247
|
||||
?
|
||||
PORT_ID: 262
|
||||
:
|
||||
PC_PHYS_PORT_ID: 248
|
||||
?
|
||||
PORT_ID: 263
|
||||
:
|
||||
PC_PHYS_PORT_ID: 249
|
||||
?
|
||||
PORT_ID: 264
|
||||
:
|
||||
PC_PHYS_PORT_ID: 250
|
||||
?
|
||||
PORT_ID: 265
|
||||
:
|
||||
PC_PHYS_PORT_ID: 251
|
||||
?
|
||||
PORT_ID: 266
|
||||
:
|
||||
PC_PHYS_PORT_ID: 252
|
||||
?
|
||||
PORT_ID: 267
|
||||
:
|
||||
PC_PHYS_PORT_ID: 253
|
||||
?
|
||||
PORT_ID: 268
|
||||
:
|
||||
PC_PHYS_PORT_ID: 254
|
||||
?
|
||||
PORT_ID: 269
|
||||
:
|
||||
PC_PHYS_PORT_ID: 255
|
||||
?
|
||||
PORT_ID: 270
|
||||
:
|
||||
PC_PHYS_PORT_ID: 256
|
||||
|
||||
...
|
||||
---
|
||||
device:
|
||||
0:
|
||||
PC_PORT:
|
||||
?
|
||||
PORT_ID: 0
|
||||
:
|
||||
&port_mode_10g
|
||||
ENABLE: 1
|
||||
SPEED: 10000
|
||||
NUM_LANES: 1
|
||||
?
|
||||
PORT_ID: [[50, 50]]
|
||||
:
|
||||
ENABLE: 0
|
||||
MAX_FRAME_SIZE: 9416
|
||||
SPEED: 10000
|
||||
NUM_LANES: 1
|
||||
?
|
||||
PORT_ID: [[1, 2],
|
||||
[5, 6],
|
||||
[9, 10],
|
||||
[13, 14],
|
||||
[17, 18],
|
||||
[21, 22],
|
||||
[25, 26],
|
||||
[29, 30],
|
||||
[34, 35],
|
||||
[38, 39],
|
||||
[42, 43],
|
||||
[46, 47],
|
||||
[51, 52],
|
||||
[55, 56],
|
||||
[59, 60],
|
||||
[63, 64],
|
||||
[204, 205],
|
||||
[208, 209],
|
||||
[212, 213],
|
||||
[216, 217],
|
||||
[221, 222],
|
||||
[225, 226],
|
||||
[229, 230],
|
||||
[233, 234]]
|
||||
:
|
||||
ENABLE: 0
|
||||
SPEED: 200000
|
||||
NUM_LANES: 4
|
||||
FEC_MODE: PC_FEC_RS544_2XN
|
||||
MAX_FRAME_SIZE: 9416
|
||||
?
|
||||
PORT_ID: [[238, 241],
|
||||
[242, 245],
|
||||
[246, 249],
|
||||
[250, 253],
|
||||
[255, 258],
|
||||
[259, 262],
|
||||
[263, 266],
|
||||
[267, 270]]
|
||||
:
|
||||
ENABLE: 0
|
||||
SPEED: 100000
|
||||
NUM_LANES: 2
|
||||
FEC_MODE: PC_FEC_RS544
|
||||
MAX_FRAME_SIZE: 9416
|
||||
...
|
||||
---
|
||||
device:
|
||||
0:
|
||||
# Per pipe flex counter configuration
|
||||
CTR_EFLEX_CONFIG:
|
||||
CTR_ING_EFLEX_OPERMODE_PIPEUNIQUE: 0
|
||||
CTR_EGR_EFLEX_OPERMODE_PIPEUNIQUE: 0
|
||||
|
||||
# Per pipe flex state configuration
|
||||
#FLEX_STATE_CONFIG:
|
||||
# FLEX_STATE_ING_OPERMODE_PIPEUNIQUE: 0
|
||||
# FLEX_STATE_EGR_OPERMODE_PIPEUNIQUE: 1
|
||||
|
||||
# Lossy vs Lossless mode
|
||||
TM_THD_CONFIG:
|
||||
THRESHOLD_MODE: LOSSLESS
|
||||
|
||||
# IFP mode
|
||||
FP_CONFIG:
|
||||
FP_ING_OPERMODE: GLOBAL_PIPE_AWARE
|
||||
...
|
@ -1,114 +0,0 @@
|
||||
#name lanes alias index speed fec
|
||||
Ethernet0 1 Ethernet1/1 1 25000
|
||||
Ethernet1 5 Ethernet1/2 1 25000
|
||||
Ethernet2 3 Ethernet1/3 1 25000
|
||||
Ethernet3 7 Ethernet1/4 1 25000
|
||||
Ethernet8 9 Ethernet2/1 2 25000
|
||||
Ethernet9 13 Ethernet2/2 2 25000
|
||||
Ethernet10 11 Ethernet2/3 2 25000
|
||||
Ethernet11 15 Ethernet2/4 2 25000
|
||||
Ethernet16 17 Ethernet3/1 3 25000
|
||||
Ethernet17 21 Ethernet3/2 3 25000
|
||||
Ethernet18 19 Ethernet3/3 3 25000
|
||||
Ethernet19 23 Ethernet3/4 3 25000
|
||||
Ethernet24 25 Ethernet4/1 4 25000
|
||||
Ethernet25 29 Ethernet4/2 4 25000
|
||||
Ethernet26 27 Ethernet4/3 4 25000
|
||||
Ethernet27 31 Ethernet4/4 4 25000
|
||||
Ethernet32 33 Ethernet5/1 5 25000
|
||||
Ethernet33 37 Ethernet5/2 5 25000
|
||||
Ethernet34 35 Ethernet5/3 5 25000
|
||||
Ethernet35 39 Ethernet5/4 5 25000
|
||||
Ethernet40 41 Ethernet6/1 6 25000
|
||||
Ethernet41 45 Ethernet6/2 6 25000
|
||||
Ethernet42 43 Ethernet6/3 6 25000
|
||||
Ethernet43 47 Ethernet6/4 6 25000
|
||||
Ethernet48 49 Ethernet7/1 7 25000
|
||||
Ethernet49 53 Ethernet7/2 7 25000
|
||||
Ethernet50 51 Ethernet7/3 7 25000
|
||||
Ethernet51 55 Ethernet7/4 7 25000
|
||||
Ethernet56 57 Ethernet8/1 8 25000
|
||||
Ethernet57 61 Ethernet8/2 8 25000
|
||||
Ethernet58 59 Ethernet8/3 8 25000
|
||||
Ethernet59 63 Ethernet8/4 8 25000
|
||||
Ethernet64 65 Ethernet9/1 9 25000
|
||||
Ethernet65 69 Ethernet9/2 9 25000
|
||||
Ethernet66 67 Ethernet9/3 9 25000
|
||||
Ethernet67 71 Ethernet9/4 9 25000
|
||||
Ethernet72 73 Ethernet10/1 10 25000
|
||||
Ethernet73 77 Ethernet10/2 10 25000
|
||||
Ethernet74 75 Ethernet10/3 10 25000
|
||||
Ethernet75 79 Ethernet10/4 10 25000
|
||||
Ethernet80 81 Ethernet11/1 11 25000
|
||||
Ethernet81 85 Ethernet11/2 11 25000
|
||||
Ethernet82 83 Ethernet11/3 11 25000
|
||||
Ethernet83 87 Ethernet11/4 11 25000
|
||||
Ethernet88 89 Ethernet12/1 12 25000
|
||||
Ethernet89 93 Ethernet12/2 12 25000
|
||||
Ethernet90 91 Ethernet12/3 12 25000
|
||||
Ethernet91 95 Ethernet12/4 12 25000
|
||||
Ethernet96 97 Ethernet13/1 13 25000
|
||||
Ethernet97 101 Ethernet13/2 13 25000
|
||||
Ethernet98 99 Ethernet13/3 13 25000
|
||||
Ethernet99 103 Ethernet13/4 13 25000
|
||||
Ethernet104 105 Ethernet14/1 14 25000
|
||||
Ethernet105 109 Ethernet14/2 14 25000
|
||||
Ethernet106 107 Ethernet14/3 14 25000
|
||||
Ethernet107 111 Ethernet14/4 14 25000
|
||||
Ethernet112 113 Ethernet15/1 15 25000
|
||||
Ethernet113 117 Ethernet15/2 15 25000
|
||||
Ethernet114 115 Ethernet15/3 15 25000
|
||||
Ethernet115 119 Ethernet15/4 15 25000
|
||||
Ethernet120 121 Ethernet16/1 16 25000
|
||||
Ethernet121 125 Ethernet16/2 16 25000
|
||||
Ethernet122 123 Ethernet16/3 16 25000
|
||||
Ethernet123 127 Ethernet16/4 16 25000
|
||||
Ethernet128 385 Ethernet17/1 17 25000
|
||||
Ethernet129 389 Ethernet17/2 17 25000
|
||||
Ethernet130 387 Ethernet17/3 17 25000
|
||||
Ethernet131 391 Ethernet17/4 17 25000
|
||||
Ethernet136 393 Ethernet18/1 18 25000
|
||||
Ethernet137 397 Ethernet18/2 18 25000
|
||||
Ethernet138 395 Ethernet18/3 18 25000
|
||||
Ethernet139 399 Ethernet18/4 18 25000
|
||||
Ethernet144 401 Ethernet19/1 19 25000
|
||||
Ethernet145 405 Ethernet19/2 19 25000
|
||||
Ethernet146 403 Ethernet19/3 19 25000
|
||||
Ethernet147 407 Ethernet19/4 19 25000
|
||||
Ethernet152 409 Ethernet20/1 20 25000
|
||||
Ethernet153 413 Ethernet20/2 20 25000
|
||||
Ethernet154 411 Ethernet20/3 20 25000
|
||||
Ethernet155 415 Ethernet20/4 20 25000
|
||||
Ethernet160 417 Ethernet21/1 21 25000
|
||||
Ethernet161 421 Ethernet21/2 21 25000
|
||||
Ethernet162 419 Ethernet21/3 21 25000
|
||||
Ethernet163 423 Ethernet21/4 21 25000
|
||||
Ethernet168 425 Ethernet22/1 22 25000
|
||||
Ethernet169 429 Ethernet22/2 22 25000
|
||||
Ethernet170 427 Ethernet22/3 22 25000
|
||||
Ethernet171 431 Ethernet22/4 22 25000
|
||||
Ethernet176 433 Ethernet23/1 23 25000
|
||||
Ethernet177 437 Ethernet23/2 23 25000
|
||||
Ethernet178 435 Ethernet23/3 23 25000
|
||||
Ethernet179 439 Ethernet23/4 23 25000
|
||||
Ethernet184 441 Ethernet24/1 24 25000
|
||||
Ethernet185 445 Ethernet24/2 24 25000
|
||||
Ethernet186 443 Ethernet24/3 24 25000
|
||||
Ethernet187 447 Ethernet24/4 24 25000
|
||||
Ethernet192 449,450,451,452 Ethernet25/1 25 100000 rs
|
||||
Ethernet196 453,454,455,456 Ethernet25/5 25 100000 rs
|
||||
Ethernet200 457,458,459,460 Ethernet26/1 26 100000 rs
|
||||
Ethernet204 461,462,463,464 Ethernet26/5 26 100000 rs
|
||||
Ethernet208 465,466,467,468 Ethernet27/1 27 100000 rs
|
||||
Ethernet212 469,470,471,472 Ethernet27/5 27 100000 rs
|
||||
Ethernet216 473,474,475,476 Ethernet28/1 28 100000 rs
|
||||
Ethernet220 477,478,479,480 Ethernet28/5 28 100000 rs
|
||||
Ethernet224 481,482,483,484 Ethernet29/1 29 200000 rs
|
||||
Ethernet228 485,486,487,488 Ethernet29/5 29 200000 rs
|
||||
Ethernet232 489,490,491,492 Ethernet30/1 30 200000 rs
|
||||
Ethernet236 493,494,495,496 Ethernet30/5 30 200000 rs
|
||||
Ethernet240 497,498,499,500 Ethernet31/1 31 200000 rs
|
||||
Ethernet244 501,502,503,504 Ethernet31/5 31 200000 rs
|
||||
Ethernet248 505,506,507,508 Ethernet32/1 32 200000 rs
|
||||
Ethernet252 509,510,511,512 Ethernet32/5 32 200000 rs
|
||||
Ethernet256 513 Ethernet33 33 10000 none
|
@ -1 +0,0 @@
|
||||
SAI_INIT_CONFIG_FILE=/usr/share/sonic/hwsku/th4-a7060dx5-32-25Gx96-100Gx8-200Gx8.config.bcm
|
File diff suppressed because it is too large
Load Diff
@ -1 +0,0 @@
|
||||
../../../common/profiles/th4/gen/BALANCED
|
@ -1,2 +0,0 @@
|
||||
{%- set default_topo = 't1' %}
|
||||
{%- include 'buffers_config.j2' %}
|
@ -1 +0,0 @@
|
||||
BALANCED/buffers_defaults_t0.j2
|
@ -1 +0,0 @@
|
||||
BALANCED/buffers_defaults_t1.j2
|
@ -1,104 +0,0 @@
|
||||
{
|
||||
"interfaces": {
|
||||
"Ethernet0": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet8": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet16": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet24": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet32": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet40": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet48": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet56": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet64": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet72": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet80": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet88": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet96": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet104": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet112": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet120": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet128": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet136": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet144": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet152": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet160": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet168": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet176": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet184": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet192": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet200": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet208": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet216": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet224": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet232": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet240": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet248": {
|
||||
"default_brkout_mode": "1x400G[200G,100G,50G,40G,25G,10G]"
|
||||
},
|
||||
"Ethernet256": {
|
||||
"default_brkout_mode": "1x10G",
|
||||
"fec": "none"
|
||||
}
|
||||
}
|
||||
}
|
@ -1 +0,0 @@
|
||||
BALANCED/pg_profile_lookup.ini
|
@ -1,34 +0,0 @@
|
||||
# name lanes alias index speed fec
|
||||
Ethernet0 1,2,3,4,5,6,7,8 Ethernet1/1 1 400000 rs
|
||||
Ethernet8 9,10,11,12,13,14,15,16 Ethernet2/1 2 400000 rs
|
||||
Ethernet16 17,18,19,20,21,22,23,24 Ethernet3/1 3 400000 rs
|
||||
Ethernet24 25,26,27,28,29,30,31,32 Ethernet4/1 4 400000 rs
|
||||
Ethernet32 33,34,35,36,37,38,39,40 Ethernet5/1 5 400000 rs
|
||||
Ethernet40 41,42,43,44,45,46,47,48 Ethernet6/1 6 400000 rs
|
||||
Ethernet48 49,50,51,52,53,54,55,56 Ethernet7/1 7 400000 rs
|
||||
Ethernet56 57,58,59,60,61,62,63,64 Ethernet8/1 8 400000 rs
|
||||
Ethernet64 65,66,67,68,69,70,71,72 Ethernet9/1 9 400000 rs
|
||||
Ethernet72 73,74,75,76,77,78,79,80 Ethernet10/1 10 400000 rs
|
||||
Ethernet80 81,82,83,84,85,86,87,88 Ethernet11/1 11 400000 rs
|
||||
Ethernet88 89,90,91,92,93,94,95,96 Ethernet12/1 12 400000 rs
|
||||
Ethernet96 97,98,99,100,101,102,103,104 Ethernet13/1 13 400000 rs
|
||||
Ethernet104 105,106,107,108,109,110,111,112 Ethernet14/1 14 400000 rs
|
||||
Ethernet112 113,114,115,116,117,118,119,120 Ethernet15/1 15 400000 rs
|
||||
Ethernet120 121,122,123,124,125,126,127,128 Ethernet16/1 16 400000 rs
|
||||
Ethernet128 385,386,387,388,389,390,391,392 Ethernet17/1 17 400000 rs
|
||||
Ethernet136 393,394,395,396,397,398,399,400 Ethernet18/1 18 400000 rs
|
||||
Ethernet144 401,402,403,404,405,406,407,408 Ethernet19/1 19 400000 rs
|
||||
Ethernet152 409,410,411,412,413,414,415,416 Ethernet20/1 20 400000 rs
|
||||
Ethernet160 417,418,419,420,421,422,423,424 Ethernet21/1 21 400000 rs
|
||||
Ethernet168 425,426,427,428,429,430,431,432 Ethernet22/1 22 400000 rs
|
||||
Ethernet176 433,434,435,436,437,438,439,440 Ethernet23/1 23 400000 rs
|
||||
Ethernet184 441,442,443,444,445,446,447,448 Ethernet24/1 24 400000 rs
|
||||
Ethernet192 449,450,451,452,453,454,455,456 Ethernet25/1 25 400000 rs
|
||||
Ethernet200 457,458,459,460,461,462,463,464 Ethernet26/1 26 400000 rs
|
||||
Ethernet208 465,466,467,468,469,470,471,472 Ethernet27/1 27 400000 rs
|
||||
Ethernet216 473,474,475,476,477,478,479,480 Ethernet28/1 28 400000 rs
|
||||
Ethernet224 481,482,483,484,485,486,487,488 Ethernet29/1 29 400000 rs
|
||||
Ethernet232 489,490,491,492,493,494,495,496 Ethernet30/1 30 400000 rs
|
||||
Ethernet240 497,498,499,500,501,502,503,504 Ethernet31/1 31 400000 rs
|
||||
Ethernet248 505,506,507,508,509,510,511,512 Ethernet32/1 32 400000 rs
|
||||
Ethernet256 513 Ethernet33 33 10000 none
|
@ -1 +0,0 @@
|
||||
{%- include 'qos_config.j2' %}
|
@ -1 +0,0 @@
|
||||
SAI_INIT_CONFIG_FILE=/usr/share/sonic/hwsku/th4-a7060dx5-32.config.bcm
|
File diff suppressed because it is too large
Load Diff
@ -1 +0,0 @@
|
||||
Arista-7060DX5-32 t1
|
@ -1 +0,0 @@
|
||||
../x86_64-arista_common/pcie.yaml
|
File diff suppressed because it is too large
Load Diff
@ -1 +0,0 @@
|
||||
broadcom
|
@ -1,12 +0,0 @@
|
||||
{
|
||||
"chassis": {
|
||||
"DCS-7060DX5-32": {
|
||||
"component": {
|
||||
"Aboot()": {},
|
||||
"Scd(addr=0000:00:18.7)": {},
|
||||
"Scd(addr=0000:01:00.0)": {},
|
||||
"LorikeetSysCpld(addr=13-0023)": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,2 +0,0 @@
|
||||
SYNCD_SHM_SIZE=512m
|
||||
is_ltsw_chip=1
|
@ -1 +0,0 @@
|
||||
../x86_64-arista_common/platform_reboot
|
@ -1 +0,0 @@
|
||||
../x86_64-arista_common/plugins/
|
@ -1 +0,0 @@
|
||||
../x86_64-arista_common/pmon_daemon_control.json
|
@ -1,36 +0,0 @@
|
||||
# libsensors configuration file for DCS-7060DX4-32
|
||||
# ------------------------------------------------#
|
||||
|
||||
bus "i2c-9" "SCD 0000:00:18.7 SMBus master 0 bus 0"
|
||||
bus "i2c-29" "SCD 0000:01:00.0 SMBus master 1 bus 0"
|
||||
bus "i2c-32" "SCD 0000:01:00.0 SMBus master 1 bus 3"
|
||||
bus "i2c-33" "SCD 0000:01:00.0 SMBus master 1 bus 4"
|
||||
|
||||
chip "max6658-i2c-9-4c"
|
||||
label temp1 "CPU board temp sensor"
|
||||
label temp2 "Back-panel temp sensor"
|
||||
|
||||
chip "max6581-i2c-29-4d"
|
||||
label temp1 "Center Rear"
|
||||
label temp2 "Switch board right sensor"
|
||||
label temp3 "Switch board left sensor"
|
||||
label temp4 "Front-panel temp sensor"
|
||||
label temp5 "Switch chip diode 1 sensor"
|
||||
label temp6 "Switch chip diode 2 sensor"
|
||||
ignore temp7
|
||||
ignore temp8
|
||||
|
||||
chip "pmbus-i2c-32-58"
|
||||
label temp1 "Power supply 1 hotspot sensor"
|
||||
label temp2 "Power supply 1 inlet temp sensor"
|
||||
label temp3 "Power supply 1 exhaust temp sensor"
|
||||
ignore fan2
|
||||
ignore fan3
|
||||
|
||||
chip "pmbus-i2c-33-58"
|
||||
label temp1 "Power supply 2 hotspot sensor"
|
||||
label temp2 "Power supply 2 inlet temp sensor"
|
||||
label temp3 "Power supply 2 exhaust temp sensor"
|
||||
ignore fan2
|
||||
ignore fan3
|
||||
|
@ -1 +0,0 @@
|
||||
../x86_64-arista_common/system_health_monitoring_config.json
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user