d2bc825db6
- Why I did it Fixes #8898 Dockerd for multiarch build by default use host OS config file ("/etc/docker/daemon.json"). Default configuration used on host OS may not work for containers run inside sonic-slave. "DOCKER_CONFIG_FILE_FOR_MULTIARCH" variable allows overriding path to the config file that will be used for multiarch dockerd. - How I did it Added "DOCKER_CONFIG_FILE_FOR_MULTIARCH" to Makefile.work file that allow to override path to dockerd config file through environment variable: DOCKER_CONFIG_FILE_FOR_MULTIARCH=${path_to_file}/daemon.json make ... If the env variable is not set build the system preserves its default behavior. - How to verify it Set DOCKER_CONFIG_FILE_FOR_MULTIARCH env variable Run build While build is running execute ps -eo pid,cmd | grep "[0-9] dockerd.*march" command Verify that --config-file parameter is set to the same path that was specified in DOCKER_CONFIG_FILE_FOR_MULTIARCH variable.
392 lines
19 KiB
Makefile
392 lines
19 KiB
Makefile
###############################################################################
|
|
## Wrapper for starting make inside sonic-slave container
|
|
#
|
|
# Supported parameters:
|
|
#
|
|
# * PLATFORM: Specific platform we wish to build images for.
|
|
# * BUILD_NUMBER: Desired version-number to pass to the building-system.
|
|
# * ENABLE_DHCP_GRAPH_SERVICE: Enables get-graph service to fetch minigraph files
|
|
# through http.
|
|
# * ENABLE_ZTP: Enables zero touch provisioning.
|
|
# * SHUTDOWN_BGP_ON_START: Sets admin-down state for all bgp peerings after restart.
|
|
# * INCLUDE_KUBERNETES: Allows including Kubernetes
|
|
# * INCLUDE_MUX: Include MUX feature/services for TOR switch.
|
|
# * ENABLE_PFCWD_ON_START: Enable PFC Watchdog (PFCWD) on server-facing ports
|
|
# * by default for TOR switch.
|
|
# * ENABLE_SYNCD_RPC: Enables rpc-based syncd builds.
|
|
# * INSTALL_DEBUG_TOOLS: Install debug tools and debug symbol packages.
|
|
# * USERNAME: Desired username -- default at rules/config
|
|
# * PASSWORD: Desired password -- default at rules/config
|
|
# * KEEP_SLAVE_ON: Keeps slave container up and active after building process concludes.
|
|
# * Note that rm=true is still set, so once user quits from the docker
|
|
# * session, the docker will be removed.
|
|
# * Please note that with current Stretch build structure,
|
|
# * user of KEEP_SLAVE_ON feature will have to be conscious
|
|
# * about which docker to stay inside after build is done.
|
|
# * - If user desires to stay inside Jessie docker, please issue
|
|
# * make KEEP_SLAVE_ON=yes jessie
|
|
# * - If user desires to stay inside Stretch docker, please issue
|
|
# * make NOJESSIE=1 KEEP_SLAVE_ON=yes <any target>
|
|
# * SOURCE_FOLDER: host path to be mount as /var/$(USER)/src, only effective when KEEP_SLAVE_ON=yes
|
|
# * SONIC_BUILD_JOBS: Specifying number of concurrent build job(s) to run
|
|
# * VS_PREPARE_MEM: Prepare memory in VS build (drop cache and compact).
|
|
# * Default: yes
|
|
# * Values: yes, no
|
|
# * KERNEL_PROCURE_METHOD: Specifying method of obtaining kernel Debian package: download or build
|
|
# * TELEMETRY_WRITABLE: Enable write/config operations via the gNMI interface.
|
|
# * Default: unset
|
|
# * Values: y
|
|
# * SONIC_DPKG_CACHE_METHOD: Specifying method of obtaining the Debian packages from cache: none or cache
|
|
# * SONIC_DPKG_CACHE_SOURCE: Debian package cache location when cache enabled for debian packages
|
|
# * BUILD_LOG_TIMESTAMP: Set timestamp in the build log (simple/none)
|
|
# * DOCKER_EXTRA_OPTS: Extra command line arguments for dockerd running in slave container.
|
|
# * ENABLE_AUTO_TECH_SUPPORT: Enable the configuration for event-driven techsupport & coredump mgmt feature
|
|
# * Default: y
|
|
# * Values: y,n
|
|
#
|
|
###############################################################################
|
|
|
|
SHELL = /bin/bash
|
|
|
|
USER := $(shell id -un)
|
|
PWD := $(shell pwd)
|
|
USER_LC := $(shell echo $(USER) | tr A-Z a-z)
|
|
DOCKER_MACHINE := $(shell docker run --rm debian:buster uname -m)
|
|
|
|
comma := ,
|
|
|
|
ifeq ($(DOCKER_MACHINE), aarch64)
|
|
COMPILE_HOST_ARCH := arm64
|
|
else ifeq ($(shell echo $(DOCKER_MACHINE) | grep -qE "armv7l|armv8l" && echo y),y)
|
|
COMPILE_HOST_ARCH := armhf
|
|
else
|
|
COMPILE_HOST_ARCH := amd64
|
|
endif
|
|
|
|
|
|
ifeq ($(USER), root)
|
|
$(error Add your user account to docker group and use your user account to make. root or sudo are not supported!)
|
|
endif
|
|
|
|
# Check for j2cli availability
|
|
J2_VER := $(shell j2 --version 2>&1 | grep j2cli | awk '{printf $$2}')
|
|
ifeq ($(J2_VER),)
|
|
$(error Please install j2cli (sudo pip install j2cli))
|
|
endif
|
|
|
|
# Check for minimum Docker version on build host
|
|
# Note: Using the greater of CE (17.05.0) and EE (17.06.1) versions that support ARG before FROM
|
|
docker_min := 17.06.1
|
|
docker_min_ver := $(shell echo "$(docker_min)" | awk -F. '{printf("%d%03d%03d\n",$$1,$$2,$$3);}' 2>/dev/null)
|
|
docker_ver := $(shell docker info 2>/dev/null | grep -i "server version" | rev | cut -d' ' -f1 | rev | awk -F. '{printf("%d%03d%03d\n",$$1,$$2,$$3);}' 2>/dev/null)
|
|
docker_is_valid := $(shell if [[ "$(docker_ver)" -lt $(docker_min_ver) ]] ; then echo "0"; else echo "1"; fi)
|
|
ifeq (0,$(docker_is_valid))
|
|
$(error SONiC requires Docker version $(docker_min) or later)
|
|
endif
|
|
|
|
# Remove lock file in case previous run was forcefully stopped
|
|
$(shell rm -f .screen)
|
|
|
|
MAKEFLAGS += -B
|
|
|
|
CONFIGURED_ARCH := $(shell [ -f .arch ] && cat .arch || echo $(PLATFORM_ARCH))
|
|
ifeq ($(CONFIGURED_ARCH),)
|
|
override CONFIGURED_ARCH = amd64
|
|
endif
|
|
ifeq ($(PLATFORM_ARCH),)
|
|
override PLATFORM_ARCH = $(CONFIGURED_ARCH)
|
|
endif
|
|
|
|
ifeq ($(BLDENV), bullseye)
|
|
SLAVE_DIR = sonic-slave-bullseye
|
|
else ifeq ($(BLDENV), buster)
|
|
SLAVE_DIR = sonic-slave-buster
|
|
else ifeq ($(BLDENV), stretch)
|
|
SLAVE_DIR = sonic-slave-stretch
|
|
else
|
|
SLAVE_DIR = sonic-slave-jessie
|
|
endif
|
|
|
|
# Define a do-nothing target for rules/config.user so that when
|
|
# the file is missing, make won't try to rebuld everything.
|
|
rules/config.user:
|
|
@echo -n ""
|
|
|
|
include rules/config
|
|
-include rules/config.user
|
|
|
|
ifeq ($(ENABLE_DOCKER_BASE_PULL),)
|
|
override ENABLE_DOCKER_BASE_PULL = n
|
|
endif
|
|
|
|
ifeq ($(CONFIGURED_ARCH),amd64)
|
|
SLAVE_BASE_IMAGE = $(SLAVE_DIR)
|
|
MULTIARCH_QEMU_ENVIRON = n
|
|
else
|
|
ifeq ($(CONFIGURED_ARCH), $(COMPILE_HOST_ARCH))
|
|
SLAVE_BASE_IMAGE = $(SLAVE_DIR)
|
|
MULTIARCH_QEMU_ENVIRON = n
|
|
else ifneq ($(CONFIGURED_ARCH),)
|
|
SLAVE_BASE_IMAGE = $(SLAVE_DIR)-march-$(CONFIGURED_ARCH)
|
|
MULTIARCH_QEMU_ENVIRON = y
|
|
endif
|
|
endif
|
|
SLAVE_IMAGE = $(SLAVE_BASE_IMAGE)-$(USER_LC)
|
|
|
|
# Generate the version control build info
|
|
$(shell SONIC_VERSION_CONTROL_COMPONENTS=$(SONIC_VERSION_CONTROL_COMPONENTS) \
|
|
TRUSTED_GPG_URLS=$(TRUSTED_GPG_URLS) PACKAGE_URL_PREFIX=$(PACKAGE_URL_PREFIX) \
|
|
scripts/generate_buildinfo_config.sh)
|
|
|
|
# Generate the slave Dockerfile, and prepare build info for it
|
|
$(shell CONFIGURED_ARCH=$(CONFIGURED_ARCH) MULTIARCH_QEMU_ENVIRON=$(MULTIARCH_QEMU_ENVIRON) DOCKER_EXTRA_OPTS=$(DOCKER_EXTRA_OPTS) j2 $(SLAVE_DIR)/Dockerfile.j2 > $(SLAVE_DIR)/Dockerfile)
|
|
$(shell CONFIGURED_ARCH=$(CONFIGURED_ARCH) MULTIARCH_QEMU_ENVIRON=$(MULTIARCH_QEMU_ENVIRON) j2 $(SLAVE_DIR)/Dockerfile.user.j2 > $(SLAVE_DIR)/Dockerfile.user)
|
|
$(shell BUILD_SLAVE=y scripts/prepare_docker_buildinfo.sh $(SLAVE_BASE_IMAGE) $(SLAVE_DIR)/Dockerfile $(CONFIGURED_ARCH) "" $(BLDENV))
|
|
|
|
# Add the versions in the tag, if the version change, need to rebuild the slave
|
|
SLAVE_BASE_TAG = $(shell cat $(SLAVE_DIR)/Dockerfile $(SLAVE_DIR)/buildinfo/versions/versions-* src/sonic-build-hooks/hooks/* | sha1sum | awk '{print substr($$1,0,11);}')
|
|
SLAVE_TAG = $(shell cat $(SLAVE_DIR)/Dockerfile.user $(SLAVE_DIR)/Dockerfile $(SLAVE_DIR)/buildinfo/versions/versions-* | sha1sum | awk '{print substr($$1,0,11);}')
|
|
|
|
OVERLAY_MODULE_CHECK := \
|
|
lsmod | grep -q "^overlay " &>/dev/null || \
|
|
zgrep -q 'CONFIG_OVERLAY_FS=y' /proc/config.gz &>/dev/null || \
|
|
grep -q 'CONFIG_OVERLAY_FS=y' /boot/config-$(shell uname -r) &>/dev/null || \
|
|
(echo "ERROR: Module 'overlay' not loaded. Try running 'sudo modprobe overlay'."; exit 1)
|
|
|
|
BUILD_TIMESTAMP := $(shell date +%Y%m%d\.%H%M%S)
|
|
|
|
ifeq ($(DOCKER_BUILDER_MOUNT),)
|
|
override DOCKER_BUILDER_MOUNT := "$(PWD):/sonic"
|
|
endif
|
|
|
|
ifeq ($(DOCKER_BUILDER_WORKDIR),)
|
|
override DOCKER_BUILDER_WORKDIR := "/sonic"
|
|
endif
|
|
|
|
DOCKER_RUN := docker run --rm=true --privileged --init \
|
|
-v $(DOCKER_BUILDER_MOUNT) \
|
|
-w $(DOCKER_BUILDER_WORKDIR) \
|
|
-e "http_proxy=$(http_proxy)" \
|
|
-e "https_proxy=$(https_proxy)" \
|
|
-e "no_proxy=$(no_proxy)" \
|
|
-i$(shell { if [ -t 0 ]; then echo t; fi }) \
|
|
$(SONIC_BUILDER_EXTRA_CMDLINE)
|
|
|
|
ifneq ($(DOCKER_BUILDER_USER_MOUNT),)
|
|
DOCKER_RUN += $(foreach mount,$(subst $(comma), ,$(DOCKER_BUILDER_USER_MOUNT)), $(addprefix -v , $(mount)))
|
|
endif
|
|
|
|
ifdef SONIC_BUILD_QUIETER
|
|
DOCKER_RUN += -e "SONIC_BUILD_QUIETER=$(SONIC_BUILD_QUIETER)"
|
|
endif
|
|
|
|
ifneq ($(SONIC_DPKG_CACHE_SOURCE),)
|
|
DOCKER_RUN += -v "$(SONIC_DPKG_CACHE_SOURCE):/dpkg_cache:rw"
|
|
endif
|
|
|
|
ifeq ($(SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD), y)
|
|
DOCKER_RUN += -v /var/run/docker.sock:/var/run/docker.sock
|
|
endif
|
|
|
|
ifeq ($(MULTIARCH_QEMU_ENVIRON), y)
|
|
ifeq ($(DOCKER_DATA_ROOT_FOR_MULTIARCH),)
|
|
DOCKER_DATA_ROOT_FOR_MULTIARCH := /var/lib/march/docker
|
|
endif
|
|
# Multiarch docker cannot start dockerd service due to iptables cannot run over different arch kernel
|
|
SONIC_SERVICE_DOCKERD_FOR_MULTIARCH=y
|
|
SONIC_NATIVE_DOCKERD_FOR_MUTLIARCH := dockerd --experimental=true --storage-driver=vfs \
|
|
--data-root=$(DOCKER_DATA_ROOT_FOR_MULTIARCH) --exec-root=/var/run/march/docker/ \
|
|
-H unix:///var/run/march/docker.sock -p /var/run/march/docker.pid
|
|
|
|
ifneq ($(DOCKER_CONFIG_FILE_FOR_MULTIARCH),)
|
|
SONIC_NATIVE_DOCKERD_FOR_MUTLIARCH += --config-file=$(DOCKER_CONFIG_FILE_FOR_MULTIARCH)
|
|
endif
|
|
|
|
DOCKER_RUN += -v /var/run/march/docker.sock:/var/run/docker.sock
|
|
DOCKER_RUN += -v /var/run/march/docker.pid:/var/run/docker.pid
|
|
DOCKER_RUN += -v /var/run/march/docker:/var/run/docker
|
|
DOCKER_RUN += -v $(DOCKER_DATA_ROOT_FOR_MULTIARCH):/var/lib/docker
|
|
SONIC_USERFACL_DOCKERD_FOR_MUTLIARCH := setfacl -m user:$(USER):rw /var/run/march/docker.sock
|
|
|
|
#Override Native config to prevent docker service
|
|
SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD=y
|
|
|
|
DOCKER_MULTIARCH_CHECK := docker inspect --type image multiarch/qemu-user-static:register &> /dev/null || (echo "multiarch docker not found ..."; docker run --rm --privileged multiarch/qemu-user-static:register --reset --credential yes)
|
|
|
|
DOCKER_SERVICE_SAFE_KILLER := (MARCH_PID=`ps -eo pid,cmd | grep "[0-9] dockerd.*march" | awk '{print $$1}'`; echo "Killing march docker $$MARCH_PID"; [ -z "$$MARCH_PID" ] || sudo kill -9 "$$MARCH_PID";)
|
|
DOCKER_SERVICE_MULTIARCH_CHECK := ($(DOCKER_SERVICE_SAFE_KILLER); sudo rm -fr /var/run/march/; (echo "Starting docker march service..."; sudo $(SONIC_NATIVE_DOCKERD_FOR_MUTLIARCH) &) &>/dev/null ; sleep 2; sudo $(SONIC_USERFACL_DOCKERD_FOR_MUTLIARCH);)
|
|
|
|
# Docker service to load the compiled dockers-*.gz
|
|
# docker 19.0 version above has path/length restriction, so replaced it with soft link in /tmp/
|
|
# Also dockerd does mkdir on the provided softlink, so used two level path "d/d"
|
|
D_ROOT=/tmp/d/d
|
|
SONIC_NATIVE_DOCKERD_FOR_DOCKERFS := rm -fr $(PWD)/dockerfs; mkdir -p $(PWD)/dockerfs; sudo rm -fr /tmp/d; mkdir -p /tmp/d; ln -s -f $(PWD)/dockerfs $(D_ROOT); \
|
|
sudo dockerd --storage-driver=overlay2 --iptables=false \
|
|
--data-root $(D_ROOT)/var/lib/docker/ --exec-root=$(D_ROOT)/var/run/docker/ \
|
|
-H unix://$(D_ROOT)/var/run/docker.sock -p $(D_ROOT)/var/run/docker.pid &
|
|
SONIC_USERFACL_DOCKERD_FOR_DOCKERFS := setfacl -m user:$(USER):rw $(D_ROOT)/var/run/docker.sock
|
|
DOCKER_SERVICE_DOCKERFS_CHECK := (sudo docker -H unix://$(D_ROOT)/var/run/docker.sock info &> /dev/null && sudo kill -9 `sudo cat $(D_ROOT)/var/run/docker.pid` && false) || (echo "Starting docker build service..."; (sudo $(SONIC_NATIVE_DOCKERD_FOR_DOCKERFS) ) &> /tmp/dockerfs.log ; sleep 1; sudo $(SONIC_USERFACL_DOCKERD_FOR_DOCKERFS);)
|
|
|
|
endif
|
|
|
|
DOCKER_BASE_BUILD = docker build --no-cache \
|
|
-t $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) \
|
|
--build-arg http_proxy=$(http_proxy) \
|
|
--build-arg https_proxy=$(https_proxy) \
|
|
--build-arg no_proxy=$(no_proxy) \
|
|
$(SLAVE_DIR)
|
|
|
|
DOCKER_BASE_PULL = docker pull \
|
|
$(REGISTRY_SERVER):$(REGISTRY_PORT)/$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG)
|
|
|
|
DOCKER_BUILD = docker build --no-cache \
|
|
--build-arg user=$(USER) \
|
|
--build-arg uid=$(shell id -u) \
|
|
--build-arg guid=$(shell id -g) \
|
|
--build-arg hostname=$(shell echo $$HOSTNAME) \
|
|
--build-arg slave_base_tag_ref=$(SLAVE_BASE_TAG) \
|
|
-t $(SLAVE_IMAGE):$(SLAVE_TAG) \
|
|
-f $(SLAVE_DIR)/Dockerfile.user \
|
|
$(SLAVE_DIR)
|
|
|
|
SONIC_BUILD_INSTRUCTION := make \
|
|
-f slave.mk \
|
|
PLATFORM=$(PLATFORM) \
|
|
PLATFORM_ARCH=$(PLATFORM_ARCH) \
|
|
MULTIARCH_QEMU_ENVIRON=$(MULTIARCH_QEMU_ENVIRON) \
|
|
BUILD_NUMBER=$(BUILD_NUMBER) \
|
|
BUILD_TIMESTAMP=$(BUILD_TIMESTAMP) \
|
|
SONIC_IMAGE_VERSION=$(SONIC_IMAGE_VERSION) \
|
|
ENABLE_DHCP_GRAPH_SERVICE=$(ENABLE_DHCP_GRAPH_SERVICE) \
|
|
ENABLE_ZTP=$(ENABLE_ZTP) \
|
|
INCLUDE_PDE=$(INCLUDE_PDE) \
|
|
SHUTDOWN_BGP_ON_START=$(SHUTDOWN_BGP_ON_START) \
|
|
INCLUDE_KUBERNETES=$(INCLUDE_KUBERNETES) \
|
|
KUBERNETES_VERSION=$(KUBERNETES_VERSION) \
|
|
KUBERNETES_CNI_VERSION=$(KUBERNETES_CNI_VERSION) \
|
|
K8s_GCR_IO_PAUSE_VERSION=$(K8s_GCR_IO_PAUSE_VERSION) \
|
|
SONIC_ENABLE_PFCWD_ON_START=$(ENABLE_PFCWD_ON_START) \
|
|
SONIC_ENABLE_SYNCD_RPC=$(ENABLE_SYNCD_RPC) \
|
|
SONIC_INSTALL_DEBUG_TOOLS=$(INSTALL_DEBUG_TOOLS) \
|
|
MDEBUG=$(MDEBUG) \
|
|
PASSWORD=$(PASSWORD) \
|
|
USERNAME=$(USERNAME) \
|
|
SONIC_BUILD_JOBS=$(SONIC_BUILD_JOBS) \
|
|
SONIC_USE_DOCKER_BUILDKIT=$(SONIC_USE_DOCKER_BUILDKIT) \
|
|
VS_PREPARE_MEM=$(VS_PREPARE_MEM) \
|
|
KERNEL_PROCURE_METHOD=$(KERNEL_PROCURE_METHOD) \
|
|
SONIC_DPKG_CACHE_METHOD=$(SONIC_DPKG_CACHE_METHOD) \
|
|
SONIC_DPKG_CACHE_SOURCE=$(SONIC_DPKG_CACHE_SOURCE) \
|
|
HTTP_PROXY=$(http_proxy) \
|
|
HTTPS_PROXY=$(https_proxy) \
|
|
NO_PROXY=$(no_proxy) \
|
|
SONIC_INCLUDE_SYSTEM_TELEMETRY=$(INCLUDE_SYSTEM_TELEMETRY) \
|
|
INCLUDE_DHCP_RELAY=$(INCLUDE_DHCP_RELAY) \
|
|
SONIC_INCLUDE_RESTAPI=$(INCLUDE_RESTAPI) \
|
|
SONIC_INCLUDE_MUX=$(INCLUDE_MUX) \
|
|
TELEMETRY_WRITABLE=$(TELEMETRY_WRITABLE) \
|
|
EXTRA_DOCKER_TARGETS=$(EXTRA_DOCKER_TARGETS) \
|
|
BUILD_LOG_TIMESTAMP=$(BUILD_LOG_TIMESTAMP) \
|
|
SONIC_ENABLE_IMAGE_SIGNATURE=$(ENABLE_IMAGE_SIGNATURE) \
|
|
ENABLE_HOST_SERVICE_ON_START=$(ENABLE_HOST_SERVICE_ON_START) \
|
|
SLAVE_DIR=$(SLAVE_DIR) \
|
|
ENABLE_AUTO_TECH_SUPPORT=$(ENABLE_AUTO_TECH_SUPPORT) \
|
|
BUILD_MULTIASIC_KVM=$(BUILD_MULTIASIC_KVM) \
|
|
$(SONIC_OVERRIDE_BUILD_VARS)
|
|
|
|
.PHONY: sonic-slave-build sonic-slave-bash init reset
|
|
|
|
.DEFAULT_GOAL := all
|
|
|
|
%::
|
|
ifeq ($(MULTIARCH_QEMU_ENVIRON), y)
|
|
@$(DOCKER_MULTIARCH_CHECK)
|
|
ifneq ($(BLDENV), )
|
|
@$(DOCKER_SERVICE_MULTIARCH_CHECK)
|
|
@$(DOCKER_SERVICE_DOCKERFS_CHECK)
|
|
endif
|
|
endif
|
|
@$(OVERLAY_MODULE_CHECK)
|
|
|
|
@pushd src/sonic-build-hooks; TRUSTED_GPG_URLS=$(TRUSTED_GPG_URLS) make all; popd
|
|
@cp src/sonic-build-hooks/buildinfo/sonic-build-hooks* $(SLAVE_DIR)/buildinfo
|
|
@docker inspect --type image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) &> /dev/null || \
|
|
{ [ $(ENABLE_DOCKER_BASE_PULL) == y ] && { echo Image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) not found. Pulling...; } && \
|
|
$(DOCKER_BASE_PULL) && \
|
|
{ docker tag $(REGISTRY_SERVER):$(REGISTRY_PORT)/$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) && \
|
|
scripts/collect_docker_version_files.sh $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) target ; } } || \
|
|
{ echo Image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) not found. Building... ; \
|
|
$(DOCKER_BASE_BUILD) ; \
|
|
scripts/collect_docker_version_files.sh $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) target ; }
|
|
@docker inspect --type image $(SLAVE_IMAGE):$(SLAVE_TAG) &> /dev/null || \
|
|
{ echo Image $(SLAVE_IMAGE):$(SLAVE_TAG) not found. Building... ; \
|
|
$(DOCKER_BUILD) ; }
|
|
ifeq "$(KEEP_SLAVE_ON)" "yes"
|
|
ifdef SOURCE_FOLDER
|
|
@$(DOCKER_RUN) -v $(SOURCE_FOLDER):/var/$(USER)/src $(SLAVE_IMAGE):$(SLAVE_TAG) bash -c "$(SONIC_BUILD_INSTRUCTION) $@; scripts/collect_build_version_files.sh \$$?; /bin/bash"
|
|
else
|
|
@$(DOCKER_RUN) $(SLAVE_IMAGE):$(SLAVE_TAG) bash -c "$(SONIC_BUILD_INSTRUCTION) $@; scripts/collect_build_version_files.sh \$$?; /bin/bash"
|
|
endif
|
|
else
|
|
@$(DOCKER_RUN) $(SLAVE_IMAGE):$(SLAVE_TAG) bash -c "$(SONIC_BUILD_INSTRUCTION) $@; scripts/collect_build_version_files.sh \$$?"
|
|
endif
|
|
|
|
sonic-build-hooks:
|
|
@pushd src/sonic-build-hooks; TRUSTED_GPG_URLS=$(TRUSTED_GPG_URLS) make all; popd
|
|
@cp src/sonic-build-hooks/buildinfo/sonic-build-hooks* $(SLAVE_DIR)/buildinfo
|
|
|
|
sonic-slave-base-build : sonic-build-hooks
|
|
@$(OVERLAY_MODULE_CHECK)
|
|
@echo Checking sonic-slave-base image: $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG)
|
|
@docker inspect --type image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) &> /dev/null || \
|
|
{ [ $(ENABLE_DOCKER_BASE_PULL) == y ] && { echo Image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) not found. Pulling...; } && \
|
|
$(DOCKER_BASE_PULL) && \
|
|
{ docker tag $(REGISTRY_SERVER):$(REGISTRY_PORT)/$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) && \
|
|
scripts/collect_docker_version_files.sh $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) target ; } } || \
|
|
{ echo Image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) not found. Building... ; \
|
|
$(DOCKER_BASE_BUILD) ; \
|
|
scripts/collect_docker_version_files.sh $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) target ; }
|
|
|
|
sonic-slave-build : sonic-slave-base-build
|
|
@echo Checking sonic-slave image: $(SLAVE_IMAGE):$(SLAVE_TAG)
|
|
@docker inspect --type image $(SLAVE_IMAGE):$(SLAVE_TAG) &> /dev/null || \
|
|
{ echo Image $(SLAVE_IMAGE):$(SLAVE_TAG) not found. Building... ; \
|
|
$(DOCKER_BUILD) ; }
|
|
|
|
sonic-slave-bash : sonic-slave-build
|
|
@$(DOCKER_RUN) -t $(SLAVE_IMAGE):$(SLAVE_TAG) bash
|
|
|
|
sonic-slave-run : sonic-slave-build
|
|
@$(DOCKER_RUN) $(SLAVE_IMAGE):$(SLAVE_TAG) bash -c "$(SONIC_RUN_CMDS)"
|
|
|
|
showtag:
|
|
@echo $(SLAVE_IMAGE):$(SLAVE_TAG)
|
|
@echo $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG)
|
|
|
|
init :
|
|
@git submodule update --init --recursive
|
|
@git submodule foreach --recursive '[ -f .git ] && echo "gitdir: $$(realpath --relative-to=. $$(cut -d" " -f2 .git))" > .git'
|
|
|
|
.ONESHELL : reset
|
|
reset :
|
|
@echo && echo -n "Warning! All local changes will be lost. Proceed? [y/N]: "
|
|
@read ans && (
|
|
if [ $$ans == y ]; then
|
|
echo "Resetting local repository. Please wait...";
|
|
$(DOCKER_RUN) $(SLAVE_IMAGE):$(SLAVE_TAG) sudo rm -rf fsroot;
|
|
if [ "$(MULTIARCH_QEMU_ENVIRON)" == y ] && [[ "$(CONFIGURED_ARCH)" == "armhf" || "$(CONFIGURED_ARCH)" == "arm64" ]]; then
|
|
echo "Stopping march $(CONFIGURED_ARCH) docker"
|
|
sudo kill -9 `sudo cat /var/run/march/docker.pid` || true
|
|
sudo rm -f /var/run/march/docker.pid || true
|
|
fi
|
|
git clean -xfdf;
|
|
git reset --hard;
|
|
git submodule foreach --recursive 'git clean -xfdf || true';
|
|
git submodule foreach --recursive 'git reset --hard || true';
|
|
git submodule foreach --recursive 'git remote update || true';
|
|
git submodule update --init --recursive;
|
|
echo "Reset complete!";
|
|
else
|
|
echo "Reset aborted";
|
|
fi )
|