2020-11-26 06:18:25 -06:00
|
|
|
# Starter pipeline
|
|
|
|
# Start with a minimal pipeline that you can customize to build and deploy your code.
|
|
|
|
# Add steps that build, run tests, deploy, and more:
|
|
|
|
# https://aka.ms/yaml
|
|
|
|
|
|
|
|
trigger:
|
2021-03-14 01:27:26 -06:00
|
|
|
branches:
|
|
|
|
include:
|
|
|
|
- master
|
|
|
|
- 202012
|
|
|
|
paths:
|
|
|
|
exclude:
|
|
|
|
- .github
|
2020-11-26 06:18:25 -06:00
|
|
|
|
2021-03-14 01:27:26 -06:00
|
|
|
pr:
|
|
|
|
branches:
|
|
|
|
include:
|
|
|
|
- master
|
|
|
|
- 202012
|
|
|
|
paths:
|
|
|
|
exclude:
|
|
|
|
- .github
|
2020-11-26 06:18:25 -06:00
|
|
|
|
2021-03-14 01:27:26 -06:00
|
|
|
name: $(TeamProject)_$(Build.DefinitionName)_$(SourceBranchName)_$(Date:yyyyMMdd)$(Rev:.r)
|
2020-11-26 06:18:25 -06:00
|
|
|
|
2021-03-14 01:27:26 -06:00
|
|
|
resources:
|
|
|
|
repositories:
|
|
|
|
- repository: sonic-mgmt
|
|
|
|
type: github
|
2022-07-28 02:32:26 -05:00
|
|
|
name: sonic-net/sonic-mgmt
|
|
|
|
endpoint: sonic-net
|
2022-05-17 16:59:31 -05:00
|
|
|
ref: refs/heads/202012
|
2022-06-09 02:58:39 -05:00
|
|
|
- repository: buildimage
|
|
|
|
type: github
|
2022-08-02 03:22:26 -05:00
|
|
|
name: sonic-net/sonic-buildimage
|
|
|
|
endpoint: sonic-net
|
2022-06-09 02:58:39 -05:00
|
|
|
ref: master
|
|
|
|
|
|
|
|
variables:
|
|
|
|
- template: .azure-pipelines/azure-pipelines-repd-build-variables.yml@buildimage
|
|
|
|
- template: .azure-pipelines/template-variables.yml@buildimage
|
|
|
|
- name: CACHE_MODE
|
|
|
|
value: rcache
|
2021-03-14 01:27:26 -06:00
|
|
|
|
|
|
|
stages:
|
2021-12-03 19:31:53 -06:00
|
|
|
- stage: BuildVS
|
2021-03-14 01:27:26 -06:00
|
|
|
pool: sonicbld
|
2021-04-21 21:18:04 -05:00
|
|
|
variables:
|
|
|
|
CACHE_MODE: rcache
|
2022-03-15 23:23:04 -05:00
|
|
|
VERSION_CONTROL_OPTIONS: 'SONIC_VERSION_CONTROL_COMPONENTS=deb,py2,py3,web,git,docker'
|
2021-03-14 01:27:26 -06:00
|
|
|
jobs:
|
2021-04-21 21:18:04 -05:00
|
|
|
- template: .azure-pipelines/azure-pipelines-build.yml
|
2021-03-14 01:27:26 -06:00
|
|
|
parameters:
|
2021-04-21 21:18:04 -05:00
|
|
|
buildOptions: 'USERNAME=admin SONIC_BUILD_JOBS=$(nproc) ${{ variables.VERSION_CONTROL_OPTIONS }}'
|
[ci] Fix the boolean value case sensitive issue in Azure Pipelines (#7399)
Why I did it
Fix the boolean value case sensitive issue in Azure Pipelines
When passing parameters to a template, the "true" or "false" will have case sensitive issue, it should be a type casting issue.
To fix it, we change the true/false to yes/no, to escape the trap.
Support to override the job groups in the template, so PR build has chance to use different build parameters, only build simple targets. For example, for broadcom, we only build target/sonic-broadcom.bin, the other images, such as swi, debug bin, etc, will not be built.
2021-04-22 20:26:16 -05:00
|
|
|
jobGroups:
|
|
|
|
- name: vs
|
2021-12-03 19:31:53 -06:00
|
|
|
|
|
|
|
- stage: Build
|
|
|
|
pool: sonicbld
|
|
|
|
dependsOn: []
|
|
|
|
variables:
|
|
|
|
CACHE_MODE: rcache
|
2022-03-15 23:23:04 -05:00
|
|
|
VERSION_CONTROL_OPTIONS: 'SONIC_VERSION_CONTROL_COMPONENTS=deb,py2,py3,web,git,docker'
|
2021-12-03 19:31:53 -06:00
|
|
|
jobs:
|
|
|
|
- template: .azure-pipelines/azure-pipelines-build.yml
|
|
|
|
parameters:
|
|
|
|
buildOptions: 'USERNAME=admin SONIC_BUILD_JOBS=$(nproc) ${{ variables.VERSION_CONTROL_OPTIONS }}'
|
|
|
|
jobGroups:
|
[ci] Fix the boolean value case sensitive issue in Azure Pipelines (#7399)
Why I did it
Fix the boolean value case sensitive issue in Azure Pipelines
When passing parameters to a template, the "true" or "false" will have case sensitive issue, it should be a type casting issue.
To fix it, we change the true/false to yes/no, to escape the trap.
Support to override the job groups in the template, so PR build has chance to use different build parameters, only build simple targets. For example, for broadcom, we only build target/sonic-broadcom.bin, the other images, such as swi, debug bin, etc, will not be built.
2021-04-22 20:26:16 -05:00
|
|
|
- name: broadcom
|
|
|
|
- name: mellanox
|
2021-12-03 19:31:53 -06:00
|
|
|
- name: marvell-armhf
|
|
|
|
pool: sonicbld-armhf
|
|
|
|
timeoutInMinutes: 1200
|
|
|
|
variables:
|
|
|
|
PLATFORM_ARCH: armhf
|
2021-03-14 01:27:26 -06:00
|
|
|
|
|
|
|
- stage: Test
|
2021-12-03 19:31:53 -06:00
|
|
|
dependsOn: BuildVS
|
2022-10-18 23:26:50 -05:00
|
|
|
condition: and(succeeded(), and(ne(stageDependencies.BuildVS.outputs['vs.SetVar.SKIP_VSTEST'], 'YES'), in(dependencies.BuildVS.result, 'Succeeded', 'SucceededWithIssues')))
|
2021-03-14 01:27:26 -06:00
|
|
|
variables:
|
2023-06-01 21:39:00 -05:00
|
|
|
- group: SONiC-Elastictest
|
2021-03-14 01:27:26 -06:00
|
|
|
- name: inventory
|
|
|
|
value: veos_vtb
|
|
|
|
- name: testbed_file
|
|
|
|
value: vtestbed.csv
|
|
|
|
|
|
|
|
jobs:
|
|
|
|
- job:
|
|
|
|
pool: sonictest
|
|
|
|
displayName: "vstest"
|
|
|
|
timeoutInMinutes: 60
|
|
|
|
steps:
|
|
|
|
- checkout: self
|
|
|
|
clean: true
|
|
|
|
submodules: recursive
|
|
|
|
displayName: 'Checkout code'
|
|
|
|
|
|
|
|
- task: DownloadPipelineArtifact@2
|
|
|
|
inputs:
|
|
|
|
source: specific
|
|
|
|
project: build
|
|
|
|
pipeline: 9
|
2021-08-21 16:30:04 -05:00
|
|
|
artifact: sonic-swss-common.amd64.ubuntu20_04
|
2021-03-14 01:27:26 -06:00
|
|
|
runVersion: 'latestFromBranch'
|
2022-10-04 16:16:56 -05:00
|
|
|
runBranch: 'refs/heads/202012'
|
2021-03-14 01:27:26 -06:00
|
|
|
displayName: "Download sonic swss common deb packages"
|
|
|
|
|
|
|
|
- task: DownloadPipelineArtifact@2
|
|
|
|
inputs:
|
|
|
|
artifact: sonic-buildimage.vs
|
|
|
|
displayName: "Download sonic-buildimage.vs artifact"
|
|
|
|
|
|
|
|
- script: |
|
|
|
|
set -x
|
2021-08-21 16:30:04 -05:00
|
|
|
sudo dpkg -i --force-confask,confnew ../libswsscommon_1.0.0_amd64.deb
|
|
|
|
sudo dpkg -i ../python3-swsscommon_1.0.0_amd64.deb
|
2021-03-14 01:27:26 -06:00
|
|
|
sudo docker load -i ../target/docker-sonic-vs.gz
|
|
|
|
docker tag docker-sonic-vs:latest docker-sonic-vs:$(Build.BuildNumber)
|
|
|
|
username=$(id -un)
|
|
|
|
|
|
|
|
trap "docker ps; docker images; ip netns list; \
|
|
|
|
docker rmi docker-sonic-vs:$(Build.BuildNumber); \
|
|
|
|
ip netns list | grep -E [-]srv[0-9]+ | awk '{print $1}' | xargs -I {} sudo ip netns delete {}; \
|
|
|
|
sudo chown -R ${username}.${username} .; \
|
|
|
|
sudo chown -R ${username}.${username} $(System.DefaultWorkingDirectory)" EXIT
|
|
|
|
pushd platform/vs/tests
|
|
|
|
sudo py.test -v --junitxml=tr.xml --imgname=docker-sonic-vs:$(Build.BuildNumber)
|
|
|
|
displayName: "Run vs tests"
|
|
|
|
|
|
|
|
- task: PublishTestResults@2
|
|
|
|
inputs:
|
|
|
|
testResultsFiles: '**/tr.xml'
|
|
|
|
testRunTitle: vstest
|
|
|
|
|
2023-06-02 03:33:34 -05:00
|
|
|
- job: t0_elastictest
|
2023-05-23 21:27:04 -05:00
|
|
|
pool: ubuntu-20.04
|
2023-06-02 03:33:34 -05:00
|
|
|
displayName: "kvmtest-t0 by Elastictest"
|
[TestbedV2][202012]Remove timeout in each step. (#12916)
Previously, we set timeout in each step such as Lock testbed, Prepare testbed, Run test and KVM dump. When some issue suck like retry happens in one step, it will cause timeout error, but actually, it only needs more time to success. In this pr, we remove the timeout limit in each step and control the timeout outside in each job. When the job runs more than four hours, it will be cancelled.
Why I did it
Previously, we set timeout in each step such as Lock testbed, Prepare testbed, Run test and KVM dump. When some issue suck like retry happens in one step, it will cause timeout error, but actually, it only needs more time to success. In this pr, we remove the timeout limit in each step and control the timeout outside in each job. When the job runs more than four hours, it will be cancelled.
How I did it
Remove the timeout parameter in each step, and control the timeout outside in each job.
Signed-off-by: Yutong Zhang <yutongzhang@microsoft.com>
2022-12-04 23:56:54 -06:00
|
|
|
timeoutInMinutes: 240
|
2022-10-26 19:13:13 -05:00
|
|
|
continueOnError: false
|
|
|
|
steps:
|
|
|
|
- template: .azure-pipelines/run-test-scheduler-template.yml
|
|
|
|
parameters:
|
|
|
|
TOPOLOGY: t0
|
2022-11-30 08:37:08 -06:00
|
|
|
MIN_WORKER: $(T0_INSTANCE_NUM)
|
|
|
|
MAX_WORKER: $(T0_INSTANCE_NUM)
|
2022-10-26 19:13:13 -05:00
|
|
|
MGMT_BRANCH: 202012
|
|
|
|
|
2023-06-02 03:33:34 -05:00
|
|
|
- job: t0_2vlans_elastictest
|
2023-05-23 21:27:04 -05:00
|
|
|
pool: ubuntu-20.04
|
2023-06-02 03:33:34 -05:00
|
|
|
displayName: "kvmtest-t0-2vlans by Elastictest"
|
[TestbedV2][202012]Remove timeout in each step. (#12916)
Previously, we set timeout in each step such as Lock testbed, Prepare testbed, Run test and KVM dump. When some issue suck like retry happens in one step, it will cause timeout error, but actually, it only needs more time to success. In this pr, we remove the timeout limit in each step and control the timeout outside in each job. When the job runs more than four hours, it will be cancelled.
Why I did it
Previously, we set timeout in each step such as Lock testbed, Prepare testbed, Run test and KVM dump. When some issue suck like retry happens in one step, it will cause timeout error, but actually, it only needs more time to success. In this pr, we remove the timeout limit in each step and control the timeout outside in each job. When the job runs more than four hours, it will be cancelled.
How I did it
Remove the timeout parameter in each step, and control the timeout outside in each job.
Signed-off-by: Yutong Zhang <yutongzhang@microsoft.com>
2022-12-04 23:56:54 -06:00
|
|
|
timeoutInMinutes: 240
|
2022-10-26 19:13:13 -05:00
|
|
|
continueOnError: false
|
|
|
|
steps:
|
|
|
|
- template: .azure-pipelines/run-test-scheduler-template.yml
|
|
|
|
parameters:
|
|
|
|
TOPOLOGY: t0
|
|
|
|
TEST_SET: t0-2vlans
|
2022-11-30 08:37:08 -06:00
|
|
|
MIN_WORKER: $(T0_2VLANS_INSTANCE_NUM)
|
|
|
|
MAX_WORKER: $(T0_2VLANS_INSTANCE_NUM)
|
2022-10-26 19:13:13 -05:00
|
|
|
DEPLOY_MG_EXTRA_PARAMS: "-e vlan_config=two_vlan_a"
|
|
|
|
MGMT_BRANCH: 202012
|
|
|
|
|
2023-06-02 03:33:34 -05:00
|
|
|
- job: t1_lag_elastictest
|
2023-05-23 21:27:04 -05:00
|
|
|
pool: ubuntu-20.04
|
2023-06-02 03:33:34 -05:00
|
|
|
displayName: "kvmtest-t1-lag by Elastictest"
|
[TestbedV2][202012]Remove timeout in each step. (#12916)
Previously, we set timeout in each step such as Lock testbed, Prepare testbed, Run test and KVM dump. When some issue suck like retry happens in one step, it will cause timeout error, but actually, it only needs more time to success. In this pr, we remove the timeout limit in each step and control the timeout outside in each job. When the job runs more than four hours, it will be cancelled.
Why I did it
Previously, we set timeout in each step such as Lock testbed, Prepare testbed, Run test and KVM dump. When some issue suck like retry happens in one step, it will cause timeout error, but actually, it only needs more time to success. In this pr, we remove the timeout limit in each step and control the timeout outside in each job. When the job runs more than four hours, it will be cancelled.
How I did it
Remove the timeout parameter in each step, and control the timeout outside in each job.
Signed-off-by: Yutong Zhang <yutongzhang@microsoft.com>
2022-12-04 23:56:54 -06:00
|
|
|
timeoutInMinutes: 240
|
2022-10-26 19:13:13 -05:00
|
|
|
continueOnError: false
|
|
|
|
steps:
|
|
|
|
- template: .azure-pipelines/run-test-scheduler-template.yml
|
|
|
|
parameters:
|
|
|
|
TOPOLOGY: t1-lag
|
2022-11-30 08:37:08 -06:00
|
|
|
MIN_WORKER: $(T1_LAG_INSTANCE_NUM)
|
|
|
|
MAX_WORKER: $(T1_LAG_INSTANCE_NUM)
|
2022-10-26 19:13:13 -05:00
|
|
|
MGMT_BRANCH: 202012
|
|
|
|
|
2023-06-02 03:33:34 -05:00
|
|
|
- job: dualtor_elastictest
|
2023-05-23 21:27:04 -05:00
|
|
|
pool: ubuntu-20.04
|
2023-06-02 03:33:34 -05:00
|
|
|
displayName: "kvmtest-dualtor-t0 by Elastictest"
|
[TestbedV2][202012]Remove timeout in each step. (#12916)
Previously, we set timeout in each step such as Lock testbed, Prepare testbed, Run test and KVM dump. When some issue suck like retry happens in one step, it will cause timeout error, but actually, it only needs more time to success. In this pr, we remove the timeout limit in each step and control the timeout outside in each job. When the job runs more than four hours, it will be cancelled.
Why I did it
Previously, we set timeout in each step such as Lock testbed, Prepare testbed, Run test and KVM dump. When some issue suck like retry happens in one step, it will cause timeout error, but actually, it only needs more time to success. In this pr, we remove the timeout limit in each step and control the timeout outside in each job. When the job runs more than four hours, it will be cancelled.
How I did it
Remove the timeout parameter in each step, and control the timeout outside in each job.
Signed-off-by: Yutong Zhang <yutongzhang@microsoft.com>
2022-12-04 23:56:54 -06:00
|
|
|
timeoutInMinutes: 240
|
2022-11-15 23:48:19 -06:00
|
|
|
continueOnError: false
|
|
|
|
steps:
|
|
|
|
- template: .azure-pipelines/run-test-scheduler-template.yml
|
|
|
|
parameters:
|
|
|
|
TOPOLOGY: dualtor
|
2022-11-30 08:37:08 -06:00
|
|
|
MIN_WORKER: $(T0_DUALTOR_INSTANCE_NUM)
|
|
|
|
MAX_WORKER: $(T0_DUALTOR_INSTANCE_NUM)
|
2022-11-15 23:48:19 -06:00
|
|
|
COMMON_EXTRA_PARAMS: "--disable_loganalyzer "
|
|
|
|
MGMT_BRANCH: 202012
|