sonic-buildimage/Makefile.work
jusherma 4d48e6063c
[build] don't require passwordless sudo #11417
Why I did it
Not all build environments have passwordless sudo enabled for all users

How I did it
Instead of using sudo to delete fsroot directories, mount them in a small, temporary docker container and delete them from there

How to verify it
Build in an environment where the build user does not have passwordless sudo enabled and confirm that no sudo password prompts are seen
2022-07-26 13:02:46 +08:00

514 lines
24 KiB
Makefile

###############################################################################
## Wrapper for starting make inside sonic-slave container
#
# Supported parameters:
#
# * PLATFORM: Specific platform we wish to build images for.
# * BUILD_NUMBER: Desired version-number to pass to the building-system.
# * ENABLE_DHCP_GRAPH_SERVICE: Enables get-graph service to fetch minigraph files
# through http.
# * ENABLE_ZTP: Enables zero touch provisioning.
# * SHUTDOWN_BGP_ON_START: Sets admin-down state for all bgp peerings after restart.
# * INCLUDE_KUBERNETES: Allows including Kubernetes
# * INCLUDE_MUX: Include MUX feature/services for TOR switch.
# * ENABLE_PFCWD_ON_START: Enable PFC Watchdog (PFCWD) on server-facing ports
# * by default for TOR switch.
# * ENABLE_SYNCD_RPC: Enables rpc-based syncd builds.
# * INSTALL_DEBUG_TOOLS: Install debug tools and debug symbol packages.
# * USERNAME: Desired username -- default at rules/config
# * PASSWORD: Desired password -- default at rules/config
# * KEEP_SLAVE_ON: Keeps slave container up and active after building process concludes.
# * Note that rm=true is still set, so once user quits from the docker
# * session, the docker will be removed.
# * Please note that with current Stretch build structure,
# * user of KEEP_SLAVE_ON feature will have to be conscious
# * about which docker to stay inside after build is done.
# * - If user desires to stay inside Jessie docker, please issue
# * make KEEP_SLAVE_ON=yes jessie
# * - If user desires to stay inside Stretch docker, please issue
# * make NOJESSIE=1 KEEP_SLAVE_ON=yes <any target>
# * SOURCE_FOLDER: host path to be mount as /var/$(USER)/src, only effective when KEEP_SLAVE_ON=yes
# * SONIC_BUILD_JOBS: Specifying number of concurrent build job(s) to run
# * VS_PREPARE_MEM: Prepare memory in VS build (drop cache and compact).
# * Default: yes
# * Values: yes, no
# * KERNEL_PROCURE_METHOD: Specifying method of obtaining kernel Debian package: download or build
# * TELEMETRY_WRITABLE: Enable write/config operations via the gNMI interface.
# * Default: unset
# * Values: y
# * SONIC_DPKG_CACHE_METHOD: Specifying method of obtaining the Debian packages from cache: none or cache
# * SONIC_DPKG_CACHE_SOURCE: Debian package cache location when cache enabled for debian packages
# * BUILD_LOG_TIMESTAMP: Set timestamp in the build log (simple/none)
# * DOCKER_EXTRA_OPTS: Extra command line arguments for dockerd running in slave container.
# * ENABLE_AUTO_TECH_SUPPORT: Enable the configuration for event-driven techsupport & coredump mgmt feature
# * Default: y
# * Values: y,n
# * INCLUDE_BOOTCHART: Install SONiC bootchart
# * Default: y
# * Values: y,n
# * ENABLE_BOOTCHART: Enable SONiC bootchart
# * Default: n
# * Values: y,n
#
###############################################################################
SHELL = /bin/bash
USER := $(shell id -un)
PWD := $(shell pwd)
USER_LC := $(shell echo $(USER) | tr A-Z a-z)
DOCKER_MACHINE := $(shell docker run --rm debian:buster uname -m)
comma := ,
ifeq ($(DOCKER_MACHINE), aarch64)
COMPILE_HOST_ARCH := arm64
else ifeq ($(shell echo $(DOCKER_MACHINE) | grep -qE "armv7l|armv8l" && echo y),y)
COMPILE_HOST_ARCH := armhf
else
COMPILE_HOST_ARCH := amd64
endif
ifeq ($(USER), root)
$(error Add your user account to docker group and use your user account to make. root or sudo are not supported!)
endif
# Check for j2cli availability
J2_VER := $(shell j2 --version 2>&1 | grep j2cli | awk '{printf $$2}')
ifeq ($(J2_VER),)
$(error Please install j2cli (sudo pip install j2cli))
endif
# Check for minimum Docker version on build host
# Note: Using the greater of CE (17.05.0) and EE (17.06.1) versions that support ARG before FROM
docker_min := 17.06.1
docker_min_ver := $(shell echo "$(docker_min)" | awk -F. '{printf("%d%03d%03d\n",$$1,$$2,$$3);}' 2>/dev/null)
docker_ver := $(shell docker info 2>/dev/null | grep -i "server version" | rev | cut -d' ' -f1 | rev | awk -F. '{printf("%d%03d%03d\n",$$1,$$2,$$3);}' 2>/dev/null)
docker_is_valid := $(shell if [[ "$(docker_ver)" -lt $(docker_min_ver) ]] ; then echo "0"; else echo "1"; fi)
ifeq (0,$(docker_is_valid))
$(error SONiC requires Docker version $(docker_min) or later)
endif
# Remove lock file in case previous run was forcefully stopped
$(shell rm -f .screen)
MAKEFLAGS += -B
CONFIGURED_ARCH := $(shell [ -f .arch ] && cat .arch || echo $(PLATFORM_ARCH))
CONFIGURED_PLATFORM = $(if $(PLATFORM),$(PLATFORM),$(shell cat .platform 2>/dev/null))
ifeq ($(CONFIGURED_ARCH),)
override CONFIGURED_ARCH = amd64
endif
ifeq ($(PLATFORM_ARCH),)
override PLATFORM_ARCH = $(CONFIGURED_ARCH)
endif
ifeq ($(CONFIGURED_ARCH),amd64)
TARGET_BOOTLOADER = grub
else
TARGET_BOOTLOADER = uboot
endif
ifeq ($(BLDENV), bullseye)
SLAVE_DIR = sonic-slave-bullseye
else ifeq ($(BLDENV), buster)
SLAVE_DIR = sonic-slave-buster
else ifeq ($(BLDENV), stretch)
SLAVE_DIR = sonic-slave-stretch
else
SLAVE_DIR = sonic-slave-jessie
endif
# Define a do-nothing target for rules/config.user so that when
# the file is missing, make won't try to rebuld everything.
rules/config.user:
@echo -n ""
include rules/config
-include rules/config.user
ifneq ($(DEFAULT_CONTAINER_REGISTRY),)
override DEFAULT_CONTAINER_REGISTRY := $(DEFAULT_CONTAINER_REGISTRY)/
endif
ifeq ($(ENABLE_DOCKER_BASE_PULL),)
override ENABLE_DOCKER_BASE_PULL = n
endif
ifeq ($(CONFIGURED_ARCH),amd64)
SLAVE_BASE_IMAGE = $(SLAVE_DIR)
MULTIARCH_QEMU_ENVIRON = n
CROSS_BUILD_ENVIRON = n
else
ifeq ($(CONFIGURED_ARCH), $(COMPILE_HOST_ARCH))
SLAVE_BASE_IMAGE = $(SLAVE_DIR)
MULTIARCH_QEMU_ENVIRON = n
CROSS_BUILD_ENVIRON = n
else ifneq ($(CONFIGURED_ARCH),)
SLAVE_BASE_IMAGE = $(SLAVE_DIR)-march-$(CONFIGURED_ARCH)
ifneq ($(CROSS_BLDENV),)
MULTIARCH_QEMU_ENVIRON = n
CROSS_BUILD_ENVIRON = y
else
MULTIARCH_QEMU_ENVIRON = y
CROSS_BUILD_ENVIRON = n
endif
endif
endif
SLAVE_IMAGE = $(SLAVE_BASE_IMAGE)-$(USER_LC)
DOCKER_ROOT = $(PWD)/fsroot.docker.$(BLDENV)
# Support FIPS feature, armhf not supported yet
ifeq ($(PLATFORM_ARCH),armhf)
ENABLE_FIPS_FEATURE := n
ENABLE_FIPS := n
endif
ifeq ($(ENABLE_FIPS_FEATURE), n)
ifeq ($(ENABLE_FIPS), y)
$(error Cannot set fips config ENABLE_FIPS=y when ENABLE_FIPS_FEATURE=n)
endif
endif
# Generate the version control build info
$(shell SONIC_VERSION_CONTROL_COMPONENTS=$(SONIC_VERSION_CONTROL_COMPONENTS) \
TRUSTED_GPG_URLS=$(TRUSTED_GPG_URLS) PACKAGE_URL_PREFIX=$(PACKAGE_URL_PREFIX) \
scripts/generate_buildinfo_config.sh)
# Generate the slave Dockerfile, and prepare build info for it
$(shell CONFIGURED_ARCH=$(CONFIGURED_ARCH) MULTIARCH_QEMU_ENVIRON=$(MULTIARCH_QEMU_ENVIRON) CROSS_BUILD_ENVIRON=$(CROSS_BUILD_ENVIRON) ENABLE_FIPS_FEATURE=$(ENABLE_FIPS_FEATURE) DOCKER_EXTRA_OPTS=$(DOCKER_EXTRA_OPTS) DEFAULT_CONTAINER_REGISTRY=$(DEFAULT_CONTAINER_REGISTRY) j2 $(SLAVE_DIR)/Dockerfile.j2 > $(SLAVE_DIR)/Dockerfile)
$(shell CONFIGURED_ARCH=$(CONFIGURED_ARCH) MULTIARCH_QEMU_ENVIRON=$(MULTIARCH_QEMU_ENVIRON) CROSS_BUILD_ENVIRON=$(CROSS_BUILD_ENVIRON) j2 $(SLAVE_DIR)/Dockerfile.user.j2 > $(SLAVE_DIR)/Dockerfile.user)
$(shell BUILD_SLAVE=y DEFAULT_CONTAINER_REGISTRY=$(DEFAULT_CONTAINER_REGISTRY) scripts/prepare_docker_buildinfo.sh $(SLAVE_BASE_IMAGE) $(SLAVE_DIR)/Dockerfile $(CONFIGURED_ARCH) "" $(BLDENV))
# Add the versions in the tag, if the version change, need to rebuild the slave
SLAVE_BASE_TAG = $(shell cat $(SLAVE_DIR)/Dockerfile $(SLAVE_DIR)/buildinfo/versions/versions-* src/sonic-build-hooks/hooks/* | sha1sum | awk '{print substr($$1,0,11);}')
# Calculate the slave TAG based on $(USER)/$(PWD)/$(CONFIGURED_PLATFORM) to get unique SHA ID
SLAVE_TAG = $(shell (cat $(SLAVE_DIR)/Dockerfile.user $(SLAVE_DIR)/Dockerfile $(SLAVE_DIR)/buildinfo/versions/versions-* .git/HEAD && echo $(USER)/$(PWD)/$(CONFIGURED_PLATFORM)) \
| sha1sum | awk '{print substr($$1,0,11);}')
OVERLAY_MODULE_CHECK := \
lsmod | grep -q "^overlay " &>/dev/null || \
zgrep -q 'CONFIG_OVERLAY_FS=y' /proc/config.gz &>/dev/null || \
grep -q 'CONFIG_OVERLAY_FS=y' /boot/config-$(shell uname -r) &>/dev/null || \
(echo "ERROR: Module 'overlay' not loaded. Try running 'sudo modprobe overlay'."; exit 1)
BUILD_TIMESTAMP := $(shell date +%Y%m%d\.%H%M%S)
# Create separate Docker lockfiles for saving vs. loading an image.
ifeq ($(DOCKER_LOCKDIR),)
override DOCKER_LOCKDIR := /tmp/docklock
endif
DOCKER_LOCKFILE_SAVE := $(DOCKER_LOCKDIR)/docker_save.lock
$(shell mkdir -m 0777 -p $(DOCKER_LOCKDIR))
$(shell [ -f $(DOCKER_LOCKFILE_SAVE) ] || (touch $(DOCKER_LOCKFILE_SAVE) && chmod 0777 $(DOCKER_LOCKFILE_SAVE)))
$(docker run --rm -v $(DOCKER_ROOT)\:/mount alpine sh -c 'rm -rf /mount/')
$(mkdir -p $(DOCKER_ROOT))
ifeq ($(DOCKER_BUILDER_MOUNT),)
override DOCKER_BUILDER_MOUNT := "$(PWD):/sonic"
endif
ifeq ($(DOCKER_BUILDER_WORKDIR),)
override DOCKER_BUILDER_WORKDIR := "/sonic"
endif
DOCKER_RUN := docker run --rm=true --privileged --init \
-v $(DOCKER_BUILDER_MOUNT) \
-v "$(DOCKER_LOCKDIR):$(DOCKER_LOCKDIR)" \
-w $(DOCKER_BUILDER_WORKDIR) \
-e "http_proxy=$(http_proxy)" \
-e "https_proxy=$(https_proxy)" \
-e "no_proxy=$(no_proxy)" \
-i$(shell { if [ -t 0 ]; then echo t; fi }) \
$(SONIC_BUILDER_EXTRA_CMDLINE)
# Mount the $(DOCKER_ROOT) to /var/lib/docker in the slave container, the overlay fs is not supported as dockerd root folder.
ifneq ($(filter $(SONIC_SLAVE_DOCKER_DRIVER),overlay overlay2),)
DOCKER_RUN += -v $(DOCKER_ROOT):/var/lib/docker
endif
ifneq ($(DOCKER_BUILDER_USER_MOUNT),)
DOCKER_RUN += $(foreach mount,$(subst $(comma), ,$(DOCKER_BUILDER_USER_MOUNT)), $(addprefix -v , $(mount)))
endif
ifdef SONIC_BUILD_QUIETER
DOCKER_RUN += -e "SONIC_BUILD_QUIETER=$(SONIC_BUILD_QUIETER)"
endif
ifneq ($(SONIC_DPKG_CACHE_SOURCE),)
DOCKER_RUN += -v "$(SONIC_DPKG_CACHE_SOURCE):/dpkg_cache:rw"
endif
ifeq ($(SONIC_ENABLE_SECUREBOOT_SIGNATURE), y)
ifneq ($(SIGNING_KEY),)
DOCKER_SIGNING_SOURCE := $(shell dirname $(SIGNING_KEY))
DOCKER_RUN += -v "$(DOCKER_SIGNING_SOURCE):$(DOCKER_SIGNING_SOURCE):ro"
endif
ifneq ($(SIGNING_CERT),)
DOCKER_SIGNING_SOURCE := $(shell dirname $(SIGNING_CERT))
DOCKER_RUN += -v "$(DOCKER_SIGNING_SOURCE):$(DOCKER_SIGNING_SOURCE):ro"
endif
endif
# User name and tag for "docker-*" images created by native dockerd mode.
ifeq ($(strip $(SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD)),y)
DOCKER_USERNAME = $(USER_LC)
DOCKER_USERTAG = $(SLAVE_TAG)
else
DOCKER_USERNAME = sonic
DOCKER_USERTAG = latest
endif
# Define canned sequence to clean up Docker image cache.
# - These are the remnants from building the runtime Docker images using native (host) Docker daemon.
# - Image naming convention differs on a shared build system vs. non-shared.
# $(docker-image-cleanup)
ifeq ($(SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD),y)
define docker-image-cleanup
@for i in $(shell docker images --quiet --filter 'dangling=true') ; do (docker rmi -f $$i &> /dev/null || true) ; done
@for i in $(shell docker images --quiet docker-*$(DOCKER_USERNAME):$(DOCKER_USERTAG)) ; do (docker rmi -f $$i &> /dev/null || true) ; done
endef
else
define docker-image-cleanup
@:
endef
endif
ifeq ($(SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD), y)
ifneq ($(MULTIARCH_QEMU_ENVIRON), y)
DOCKER_RUN += -v /var/run/docker.sock:/var/run/docker.sock
endif
endif
ifneq ($(filter y, $(MULTIARCH_QEMU_ENVIRON) $(CROSS_BUILD_ENVIRON)),)
ifeq ($(DOCKER_DATA_ROOT_FOR_MULTIARCH),)
DOCKER_DATA_ROOT_FOR_MULTIARCH := /var/lib/march/docker
endif
# Multiarch docker cannot start dockerd service due to iptables cannot run over different arch kernel
SONIC_SERVICE_DOCKERD_FOR_MULTIARCH=y
SONIC_NATIVE_DOCKERD_FOR_MUTLIARCH := dockerd --experimental=true --storage-driver=vfs \
--data-root=$(DOCKER_DATA_ROOT_FOR_MULTIARCH) --exec-root=/var/run/march/docker/ \
-H unix:///var/run/march/docker.sock -p /var/run/march/docker.pid
ifneq ($(DOCKER_CONFIG_FILE_FOR_MULTIARCH),)
SONIC_NATIVE_DOCKERD_FOR_MUTLIARCH += --config-file=$(DOCKER_CONFIG_FILE_FOR_MULTIARCH)
endif
DOCKER_RUN += -v /var/run/march/docker.sock:/var/run/docker.sock
DOCKER_RUN += -v /var/run/march/docker.pid:/var/run/docker.pid
DOCKER_RUN += -v /var/run/march/docker:/var/run/docker
DOCKER_RUN += -v $(DOCKER_DATA_ROOT_FOR_MULTIARCH):/var/lib/docker
SONIC_USERFACL_DOCKERD_FOR_MUTLIARCH := setfacl -m user:$(USER):rw /var/run/march/docker.sock
#Override Native config to prevent docker service
SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD=y
DOCKER_MULTIARCH_CHECK := docker run --rm --privileged multiarch/qemu-user-static --reset -p yes --credential yes
DOCKER_SERVICE_SAFE_KILLER := (MARCH_PID=`ps -eo pid,cmd | grep "[0-9] dockerd.*march" | awk '{print $$1}'`; echo "Killing march docker $$MARCH_PID"; [ -z "$$MARCH_PID" ] || sudo kill -9 "$$MARCH_PID";)
DOCKER_SERVICE_MULTIARCH_CHECK := ($(DOCKER_SERVICE_SAFE_KILLER); sudo rm -fr /var/run/march/; (echo "Starting docker march service..."; sudo $(SONIC_NATIVE_DOCKERD_FOR_MUTLIARCH) &) &>/dev/null ; sleep 2; sudo $(SONIC_USERFACL_DOCKERD_FOR_MUTLIARCH);)
# Docker service to load the compiled dockers-*.gz
# docker 19.0 version above has path/length restriction, so replaced it with soft link in /tmp/
# Also dockerd does mkdir on the provided softlink, so used two level path "d/d"
D_ROOT=/tmp/d/d
SONIC_NATIVE_DOCKERD_FOR_DOCKERFS := rm -fr $(PWD)/dockerfs; mkdir -p $(PWD)/dockerfs; sudo rm -fr /tmp/d; mkdir -p /tmp/d; ln -s -f $(PWD)/dockerfs $(D_ROOT); \
sudo dockerd --storage-driver=overlay2 --iptables=false \
--data-root $(D_ROOT)/var/lib/docker/ --exec-root=$(D_ROOT)/var/run/docker/ \
-H unix://$(D_ROOT)/var/run/docker.sock -p $(D_ROOT)/var/run/docker.pid &
SONIC_USERFACL_DOCKERD_FOR_DOCKERFS := setfacl -m user:$(USER):rw $(D_ROOT)/var/run/docker.sock
DOCKER_SERVICE_DOCKERFS_CHECK := (sudo docker -H unix://$(D_ROOT)/var/run/docker.sock info &> /dev/null && sudo kill -9 `sudo cat $(D_ROOT)/var/run/docker.pid` && false) || (echo "Starting docker build service..."; (sudo $(SONIC_NATIVE_DOCKERD_FOR_DOCKERFS) ) &> /tmp/dockerfs.log ; sleep 1; sudo $(SONIC_USERFACL_DOCKERD_FOR_DOCKERFS);)
endif
SPLIT_LOG = | tee
DOCKER_BASE_LOG = $(SLAVE_DIR)/$(SLAVE_BASE_IMAGE)_$(SLAVE_BASE_TAG).log
DOCKER_LOG = $(SLAVE_DIR)/$(SLAVE_IMAGE)_$(SLAVE_TAG).log
DOCKER_BASE_BUILD = docker build --no-cache \
-t $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) \
--build-arg http_proxy=$(http_proxy) \
--build-arg https_proxy=$(https_proxy) \
--build-arg no_proxy=$(no_proxy) \
$(SLAVE_DIR) $(SPLIT_LOG) $(DOCKER_BASE_LOG)
DOCKER_BASE_PULL = docker pull \
$(REGISTRY_SERVER):$(REGISTRY_PORT)/$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG)
DOCKER_BUILD = docker build --no-cache \
--build-arg user=$(USER) \
--build-arg uid=$(shell id -u) \
--build-arg guid=$(shell id -g) \
--build-arg hostname=$(shell echo $$HOSTNAME) \
--build-arg slave_base_tag_ref=$(SLAVE_BASE_TAG) \
-t $(SLAVE_IMAGE):$(SLAVE_TAG) \
-f $(SLAVE_DIR)/Dockerfile.user \
$(SLAVE_DIR) $(SPLIT_LOG) $(DOCKER_LOG)
SONIC_BUILD_INSTRUCTION := make \
-f slave.mk \
PLATFORM=$(PLATFORM) \
PLATFORM_ARCH=$(PLATFORM_ARCH) \
MULTIARCH_QEMU_ENVIRON=$(MULTIARCH_QEMU_ENVIRON) \
CROSS_BUILD_ENVIRON=$(CROSS_BUILD_ENVIRON) \
BUILD_NUMBER=$(BUILD_NUMBER) \
BUILD_TIMESTAMP=$(BUILD_TIMESTAMP) \
SONIC_IMAGE_VERSION=$(SONIC_IMAGE_VERSION) \
SLAVE_TAG=$(SLAVE_TAG) \
ENABLE_DHCP_GRAPH_SERVICE=$(ENABLE_DHCP_GRAPH_SERVICE) \
ENABLE_ZTP=$(ENABLE_ZTP) \
INCLUDE_PDE=$(INCLUDE_PDE) \
SHUTDOWN_BGP_ON_START=$(SHUTDOWN_BGP_ON_START) \
INCLUDE_KUBERNETES=$(INCLUDE_KUBERNETES) \
KUBERNETES_VERSION=$(KUBERNETES_VERSION) \
KUBERNETES_CNI_VERSION=$(KUBERNETES_CNI_VERSION) \
K8s_GCR_IO_PAUSE_VERSION=$(K8s_GCR_IO_PAUSE_VERSION) \
SONIC_ENABLE_PFCWD_ON_START=$(ENABLE_PFCWD_ON_START) \
SONIC_ENABLE_SYNCD_RPC=$(ENABLE_SYNCD_RPC) \
SONIC_INSTALL_DEBUG_TOOLS=$(INSTALL_DEBUG_TOOLS) \
SONIC_SAITHRIFT_V2=$(SAITHRIFT_V2) \
MDEBUG=$(MDEBUG) \
PASSWORD=$(PASSWORD) \
USERNAME=$(USERNAME) \
SONIC_BUILD_JOBS=$(SONIC_BUILD_JOBS) \
SONIC_USE_DOCKER_BUILDKIT=$(SONIC_USE_DOCKER_BUILDKIT) \
VS_PREPARE_MEM=$(VS_PREPARE_MEM) \
KERNEL_PROCURE_METHOD=$(KERNEL_PROCURE_METHOD) \
SONIC_DPKG_CACHE_METHOD=$(SONIC_DPKG_CACHE_METHOD) \
SONIC_DPKG_CACHE_SOURCE=$(SONIC_DPKG_CACHE_SOURCE) \
HTTP_PROXY=$(http_proxy) \
HTTPS_PROXY=$(https_proxy) \
NO_PROXY=$(no_proxy) \
DOCKER_USERNAME=$(DOCKER_USERNAME) \
DOCKER_USERTAG=$(DOCKER_USERTAG) \
DOCKER_LOCKDIR=$(DOCKER_LOCKDIR) \
DOCKER_LOCKFILE_SAVE=$(DOCKER_LOCKFILE_SAVE) \
SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD=$(SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD) \
SONIC_INCLUDE_SYSTEM_TELEMETRY=$(INCLUDE_SYSTEM_TELEMETRY) \
INCLUDE_DHCP_RELAY=$(INCLUDE_DHCP_RELAY) \
INCLUDE_MACSEC=$(INCLUDE_MACSEC) \
SONIC_INCLUDE_RESTAPI=$(INCLUDE_RESTAPI) \
SONIC_INCLUDE_MUX=$(INCLUDE_MUX) \
TELEMETRY_WRITABLE=$(TELEMETRY_WRITABLE) \
EXTRA_DOCKER_TARGETS=$(EXTRA_DOCKER_TARGETS) \
BUILD_LOG_TIMESTAMP=$(BUILD_LOG_TIMESTAMP) \
SONIC_ENABLE_IMAGE_SIGNATURE=$(ENABLE_IMAGE_SIGNATURE) \
SONIC_ENABLE_SECUREBOOT_SIGNATURE=$(SONIC_ENABLE_SECUREBOOT_SIGNATURE) \
SONIC_DEFAULT_CONTAINER_REGISTRY=$(DEFAULT_CONTAINER_REGISTRY) \
ENABLE_HOST_SERVICE_ON_START=$(ENABLE_HOST_SERVICE_ON_START) \
SLAVE_DIR=$(SLAVE_DIR) \
ENABLE_AUTO_TECH_SUPPORT=$(ENABLE_AUTO_TECH_SUPPORT) \
BUILD_MULTIASIC_KVM=$(BUILD_MULTIASIC_KVM) \
ENABLE_ASAN=$(ENABLE_ASAN) \
SONIC_INCLUDE_BOOTCHART=$(INCLUDE_BOOTCHART) \
SONIC_ENABLE_BOOTCHART=$(ENABLE_BOOTCHART) \
ENABLE_FIPS_FEATURE=$(ENABLE_FIPS_FEATURE) \
ENABLE_FIPS=$(ENABLE_FIPS) \
SONIC_SLAVE_DOCKER_DRIVER=$(SONIC_SLAVE_DOCKER_DRIVER) \
$(SONIC_OVERRIDE_BUILD_VARS)
.PHONY: sonic-slave-build sonic-slave-bash init reset
.DEFAULT_GOAL := all
%::
ifneq ($(filter y, $(MULTIARCH_QEMU_ENVIRON) $(CROSS_BUILD_ENVIRON)),)
@$(DOCKER_MULTIARCH_CHECK)
ifneq ($(BLDENV), )
@$(DOCKER_SERVICE_MULTIARCH_CHECK)
@$(DOCKER_SERVICE_DOCKERFS_CHECK)
endif
endif
@$(OVERLAY_MODULE_CHECK)
@pushd src/sonic-build-hooks; TRUSTED_GPG_URLS=$(TRUSTED_GPG_URLS) make all; popd
@cp src/sonic-build-hooks/buildinfo/sonic-build-hooks* $(SLAVE_DIR)/buildinfo
@docker inspect --type image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) &> /dev/null || \
{ [ $(ENABLE_DOCKER_BASE_PULL) == y ] && { echo Image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) not found. Pulling...; } && \
$(DOCKER_BASE_PULL) && \
{ docker tag $(REGISTRY_SERVER):$(REGISTRY_PORT)/$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) && \
scripts/collect_docker_version_files.sh $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) target ; } } || \
{ echo Image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) not found. Building... ; \
$(DOCKER_BASE_BUILD) ; \
scripts/collect_docker_version_files.sh $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) target ; }
@docker inspect --type image $(SLAVE_IMAGE):$(SLAVE_TAG) &> /dev/null || \
{ echo Image $(SLAVE_IMAGE):$(SLAVE_TAG) not found. Building... ; \
$(DOCKER_BUILD) ; }
ifeq "$(KEEP_SLAVE_ON)" "yes"
ifdef SOURCE_FOLDER
@$(DOCKER_RUN) -v $(SOURCE_FOLDER):/var/$(USER)/src $(SLAVE_IMAGE):$(SLAVE_TAG) bash -c "$(SONIC_BUILD_INSTRUCTION) $@; scripts/collect_build_version_files.sh \$$?; /bin/bash"
else
@$(DOCKER_RUN) $(SLAVE_IMAGE):$(SLAVE_TAG) bash -c "$(SONIC_BUILD_INSTRUCTION) $@; scripts/collect_build_version_files.sh \$$?; /bin/bash"
endif
else
@$(DOCKER_RUN) $(SLAVE_IMAGE):$(SLAVE_TAG) bash -c "$(SONIC_BUILD_INSTRUCTION) $@; scripts/collect_build_version_files.sh \$$?"
endif
docker-cleanup:
$(docker-image-cleanup)
sonic-build-hooks:
@pushd src/sonic-build-hooks; TRUSTED_GPG_URLS=$(TRUSTED_GPG_URLS) make all; popd
@cp src/sonic-build-hooks/buildinfo/sonic-build-hooks* $(SLAVE_DIR)/buildinfo
sonic-slave-base-build : sonic-build-hooks
ifeq ($(MULTIARCH_QEMU_ENVIRON), y)
@$(DOCKER_MULTIARCH_CHECK)
endif
@$(OVERLAY_MODULE_CHECK)
@echo Checking sonic-slave-base image: $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG)
@docker inspect --type image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) &> /dev/null || \
{ [ $(ENABLE_DOCKER_BASE_PULL) == y ] && { echo Image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) not found. Pulling...; } && \
$(DOCKER_BASE_PULL) && \
{ docker tag $(REGISTRY_SERVER):$(REGISTRY_PORT)/$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) && \
scripts/collect_docker_version_files.sh $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) target ; } } || \
{ echo Image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) not found. Building... ; \
$(DOCKER_BASE_BUILD) ; \
scripts/collect_docker_version_files.sh $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) target ; }
sonic-slave-build : sonic-slave-base-build
@echo Checking sonic-slave image: $(SLAVE_IMAGE):$(SLAVE_TAG)
@docker inspect --type image $(SLAVE_IMAGE):$(SLAVE_TAG) &> /dev/null || \
{ echo Image $(SLAVE_IMAGE):$(SLAVE_TAG) not found. Building... ; \
$(DOCKER_BUILD) ; }
sonic-slave-bash : sonic-slave-build
@$(DOCKER_RUN) -t $(SLAVE_IMAGE):$(SLAVE_TAG) bash
sonic-slave-run : sonic-slave-build
@$(DOCKER_RUN) $(SLAVE_IMAGE):$(SLAVE_TAG) bash -c "$(SONIC_RUN_CMDS)"
showtag:
@echo $(SLAVE_IMAGE):$(SLAVE_TAG)
@echo $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG)
init :
@git submodule update --init --recursive
@git submodule foreach --recursive '[ -f .git ] && echo "gitdir: $$(realpath --relative-to=. $$(cut -d" " -f2 .git))" > .git'
.ONESHELL : reset
reset :
@echo && echo -n "Warning! All local changes will be lost. Proceed? [y/N]: "
@read ans && (
if [ $$ans == y ]; then
echo "Resetting local repository. Please wait...";
sudo rm -rf fsroot*;
if [ "$(MULTIARCH_QEMU_ENVIRON)" == y ] && [[ "$(CONFIGURED_ARCH)" == "armhf" || "$(CONFIGURED_ARCH)" == "arm64" ]]; then
echo "Stopping march $(CONFIGURED_ARCH) docker"
sudo kill -9 `sudo cat /var/run/march/docker.pid` || true
sudo rm -f /var/run/march/docker.pid || true
fi
git clean -xfdf;
git reset --hard;
git submodule foreach --recursive 'git clean -xfdf || true';
git submodule foreach --recursive 'git reset --hard || true';
git submodule foreach --recursive 'git remote update || true';
git submodule update --init --recursive;
echo "Reset complete!";
else
echo "Reset aborted";
fi )