Merge pull request #296 from netbox-community/develop

Release 0.24.0
This commit is contained in:
Tobias Genannt 2020-05-16 18:56:06 +02:00 committed by GitHub
commit 93a3784295
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 247 additions and 150 deletions

10
.github/no-response.yml vendored Normal file
View File

@ -0,0 +1,10 @@
# Configuration for probot-no-response - https://github.com/probot/no-response
# Number of days of inactivity before an Issue is closed for lack of response
daysUntilClose: 30
# Label requiring a response
responseRequiredLabel: awaiting answer
# Comment to post when closing an Issue for lack of response. Set to `false` to disable
closeComment: >
This issue has been automatically closed because there has been no response
to our request for more information from the original author.

View File

@ -15,7 +15,8 @@ jobs:
build_cmd: build_cmd:
- ./build-latest.sh - ./build-latest.sh
- PRERELEASE=true ./build-latest.sh - PRERELEASE=true ./build-latest.sh
- ./build-branches.sh - ./build-next.sh
- ./build.sh develop
docker_from: docker_from:
- '' # use the default of the DOCKERFILE - '' # use the default of the DOCKERFILE
- python:3.7-alpine - python:3.7-alpine

View File

@ -14,14 +14,15 @@ jobs:
build_cmd: build_cmd:
- ./build-latest.sh - ./build-latest.sh
- PRERELEASE=true ./build-latest.sh - PRERELEASE=true ./build-latest.sh
- ./build-branches.sh - ./build-next.sh
- ./build.sh develop
fail-fast: false fail-fast: false
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Builds new Netbox Docker Images name: Builds new Netbox Docker Images
steps: steps:
- id: git-checkout - id: git-checkout
name: Checkout name: Checkout
uses: actions/checkout@v1 uses: actions/checkout@v2
- id: docker-build - id: docker-build
name: Build the image with '${{ matrix.build_cmd }}' name: Build the image with '${{ matrix.build_cmd }}'
run: ${{ matrix.build_cmd }} run: ${{ matrix.build_cmd }}

View File

@ -1,32 +0,0 @@
sudo: required
language: python
env:
- BUILD=release
- BUILD=prerelease
- BUILD=branches
- BUILD=special
git:
depth: 5
services:
- docker
install:
- docker-compose pull --parallel
- docker-compose build
script:
- docker-compose run netbox ./manage.py test
after_script:
- docker-compose down
after_success:
- docker login -u="$DOCKER_USERNAME" -p="$DOCKER_PASSWORD"
- ./build-all.sh --push
notifications:
slack:
secure: F3VsWcvU/XYyjGjU8ZAVGpREe7F1NjKq6LuMRzhQORbXUvanxDQtLzEe0Y5zm/6+gHkx6t8cX/v2PiCI+v46pkapYMUimd+QEOL1WxbUdnw2kQxcgw/R3wX34l2FHXbG3/a+TmH3euqbSCTIrPy9ufju948i+Q0E0u0fyInmozl8qOT23C4joQOpVAq7y+wHxTxsEg46ZzL2Ties+dmqjMsvHocv7mPI2IWzAWA8SJZxS82Amoapww++QjgEmoY+tMimLkdeXCRgeoj41UGHDg54rbEXh/PTaWiuzyzTr1WLmsGRScC57fDRivp3mSM37/MlNxsRj1z+j4zrvWFQgNfJ2yMjBHroc1jOX/uCY4dwbpSPqUCpc4idMGCGZFItgzTQ3lAPYAsom0C6n8C08Xk8EsNKWwXrDSd4ZUhIwptkNPCFK+kXbLFsMzSApnaBYW0T+wba57nZdiWjOPYmvJr49MDm5NHv2KaRBX2gpw7t7ZLhTgwGEWcZvcDebiLneXcXY5hZ7v2NHJkx/2x1yNXo85xZDy0wK1FGoOOHwPhvqOB+pcQZ/pUOSPTKqGw5l/CexoRm1shFsK+19FnSgimqTHjcuCo4lFW3JlEvlFhtfFXIte2Wjp1ALZgTrSq8zSD5rRxYCUKmM7b3EJwdaIgbvKWPdS4sCXlXU1bHx0g=

View File

@ -1,4 +1,4 @@
ARG FROM=python:3.7-alpine ARG FROM
FROM ${FROM} as builder FROM ${FROM} as builder
RUN apk add --no-cache \ RUN apk add --no-cache \
@ -75,7 +75,7 @@ WORKDIR /opt/netbox/netbox
# container startup. # container startup.
# Must set permissions for '/opt/netbox/netbox/media' directory # Must set permissions for '/opt/netbox/netbox/media' directory
# to g+w so that pictures can be uploaded to netbox. # to g+w so that pictures can be uploaded to netbox.
RUN mkdir static && chmod g+w static media RUN mkdir static && chmod -R g+w static media
ENTRYPOINT [ "/opt/netbox/docker-entrypoint.sh" ] ENTRYPOINT [ "/opt/netbox/docker-entrypoint.sh" ]

View File

@ -52,7 +52,7 @@ There is a more complete [_Getting Started_ guide on our wiki][wiki-getting-star
```bash ```bash
git clone -b release https://github.com/netbox-community/netbox-docker.git git clone -b release https://github.com/netbox-community/netbox-docker.git
cd netbox-docker cd netbox-docker
tee netbox-docker.override.yml <<EOF tee docker-compose.override.yml <<EOF
version: '3.4' version: '3.4'
services: services:
nginx: nginx:

View File

@ -1 +1 @@
0.23.0 0.24.0

View File

@ -1,51 +0,0 @@
#!/bin/bash
# Builds all Docker images this project provides
# Arguments:
# BUILD: The release to build.
# Allowed: release, prerelease, branches, special
# Default: undefined
echo "▶️ $0 $*"
ALL_BUILDS=("release" "prerelease" "branches" "special")
BUILDS=("${BUILD:-"${ALL_BUILDS[@]}"}")
echo "⚙️ Configured builds: ${BUILDS[*]}"
if [ -n "${DEBUG}" ]; then
export DEBUG
fi
ERROR=0
for BUILD in "${BUILDS[@]}"; do
echo "🛠 Building '$BUILD' from '$DOCKERFILE'"
case $BUILD in
release)
# build the latest release
# shellcheck disable=SC2068
./build-latest.sh $@ || ERROR=1
;;
prerelease)
# build the latest pre-release
# shellcheck disable=SC2068
PRERELEASE=true ./build-latest.sh $@ || ERROR=1
;;
branches)
# build all branches
# shellcheck disable=SC2068
./build-branches.sh $@ || ERROR=1
;;
*)
echo "🚨 Unrecognized build '$BUILD'."
if [ -z "$DEBUG" ]; then
exit 1
else
echo "⚠️ Would exit here with code '1', but DEBUG is enabled."
fi
;;
esac
done
exit $ERROR

View File

@ -0,0 +1,8 @@
#!/bin/bash
push_image_to_registry() {
local target_tag=$1
echo "⏫ Pushing '${target_tag}'"
$DRY docker push "${target_tag}"
echo "✅ Finished pushing the Docker image '${target_tag}'."
}

View File

@ -0,0 +1,82 @@
#!/bin/bash
# Retrieves image configuration from public images in DockerHub
# Functions from https://gist.github.com/cirocosta/17ea17be7ac11594cb0f290b0a3ac0d1
# Optimised for our use case
get_image_label() {
local label=$1
local image=$2
local tag=$3
local token
token=$(_get_token "$image")
local digest
digest=$(_get_digest "$image" "$tag" "$token")
local retval="null"
if [ "$digest" != "null" ]; then
retval=$(_get_image_configuration "$image" "$token" "$digest" "$label")
fi
echo "$retval"
}
get_image_layers() {
local image=$1
local tag=$2
local token
token=$(_get_token "$image")
_get_layers "$image" "$tag" "$token"
}
get_image_last_layer() {
local image=$1
local tag=$2
local token
token=$(_get_token "$image")
local layers
mapfile -t layers < <(_get_layers "$image" "$tag" "$token")
echo "${layers[-1]}"
}
_get_image_configuration() {
local image=$1
local token=$2
local digest=$3
local label=$4
curl \
--silent \
--location \
--header "Authorization: Bearer $token" \
"https://registry-1.docker.io/v2/$image/blobs/$digest" \
| jq -r ".config.Labels.\"$label\""
}
_get_token() {
local image=$1
curl \
--silent \
"https://auth.docker.io/token?scope=repository:$image:pull&service=registry.docker.io" \
| jq -r '.token'
}
_get_digest() {
local image=$1
local tag=$2
local token=$3
curl \
--silent \
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
--header "Authorization: Bearer $token" \
"https://registry-1.docker.io/v2/$image/manifests/$tag" \
| jq -r '.config.digest'
}
_get_layers() {
local image=$1
local tag=$2
local token=$3
curl \
--silent \
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
--header "Authorization: Bearer $token" \
"https://registry-1.docker.io/v2/$image/manifests/$tag" \
| jq -r '.layers[].digest'
}

View File

@ -23,25 +23,17 @@ GITHUB_REPO="${GITHUB_REPO-$ORIGINAL_GITHUB_REPO}"
URL_RELEASES="https://api.github.com/repos/${GITHUB_REPO}/branches?${GITHUB_OAUTH_PARAMS}" URL_RELEASES="https://api.github.com/repos/${GITHUB_REPO}/branches?${GITHUB_OAUTH_PARAMS}"
# Composing the JQ commans to extract the most recent version number # Composing the JQ commans to extract the most recent version number
JQ_BRANCHES='map(.name) | .[] | scan("^[^v].+") | match("^(master|develop).*") | .string' JQ_NEXT='map(.name) | .[] | scan("^[^v].+") | match("^(develop-).*") | .string'
CURL="curl -sS" CURL="curl -sS"
# Querying the Github API to fetch all branches # Querying the Github API to fetch all branches
BRANCHES=$($CURL "${URL_RELEASES}" | jq -r "$JQ_BRANCHES") NEXT=$($CURL "${URL_RELEASES}" | jq -r "$JQ_NEXT")
### if [ -n "$NEXT" ]; then
# Building each branch
###
# keeping track whether an error occured
ERROR=0
# calling build.sh for each branch
for BRANCH in $BRANCHES; do
# shellcheck disable=SC2068 # shellcheck disable=SC2068
./build.sh "${BRANCH}" $@ || ERROR=1 ./build.sh "${NEXT}" $@
done else
echo "No branch matching 'develop-*' found"
# returning whether an error occured echo "::set-output name=skipped::true"
exit $ERROR fi

129
build.sh
View File

@ -49,7 +49,7 @@ if [ "${1}x" == "x" ] || [ "${1}" == "--help" ] || [ "${1}" == "-h" ]; then
echo " DOCKERFILE The name of Dockerfile to use." echo " DOCKERFILE The name of Dockerfile to use."
echo " Default: Dockerfile" echo " Default: Dockerfile"
echo " DOCKER_FROM The base image to use." echo " DOCKER_FROM The base image to use."
echo " Default: Whatever is defined as default in the Dockerfile." echo " Default: 'python:3.7-alpine'"
echo " DOCKER_TARGET A specific target to build." echo " DOCKER_TARGET A specific target to build."
echo " It's currently not possible to pass multiple targets." echo " It's currently not possible to pass multiple targets."
echo " Default: main ldap" echo " Default: main ldap"
@ -153,6 +153,13 @@ if [ ! -f "${DOCKERFILE}" ]; then
fi fi
fi fi
###
# Determining the value for DOCKER_FROM
###
if [ -z "$DOCKER_FROM" ]; then
DOCKER_FROM="python:3.7-alpine"
fi
### ###
# Variables for labelling the docker image # Variables for labelling the docker image
### ###
@ -167,9 +174,9 @@ PROJECT_VERSION="${PROJECT_VERSION-$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]
# Get the Git information from the netbox directory # Get the Git information from the netbox directory
if [ -d "${NETBOX_PATH}/.git" ]; then if [ -d "${NETBOX_PATH}/.git" ]; then
NETBOX_GIT_REF=$(cd ${NETBOX_PATH}; git rev-parse HEAD) NETBOX_GIT_REF=$(cd "${NETBOX_PATH}"; git rev-parse HEAD)
NETBOX_GIT_BRANCH=$(cd ${NETBOX_PATH}; git rev-parse --abbrev-ref HEAD) NETBOX_GIT_BRANCH=$(cd "${NETBOX_PATH}"; git rev-parse --abbrev-ref HEAD)
NETBOX_GIT_URL=$(cd ${NETBOX_PATH}; git remote get-url origin) NETBOX_GIT_URL=$(cd "${NETBOX_PATH}"; git remote get-url origin)
fi fi
### ###
@ -217,15 +224,18 @@ for DOCKER_TARGET in "${DOCKER_TARGETS[@]}"; do
# composing the additional DOCKER_SHORT_TAG, # composing the additional DOCKER_SHORT_TAG,
# i.e. "v2.6.1" becomes "v2.6", # i.e. "v2.6.1" becomes "v2.6",
# which is only relevant for version tags # which is only relevant for version tags
# Also let "latest" follow the highest version
### ###
if [[ "${TAG}" =~ ^v([0-9]+)\.([0-9]+)\.[0-9]+$ ]]; then if [[ "${TAG}" =~ ^v([0-9]+)\.([0-9]+)\.[0-9]+$ ]]; then
MAJOR=${BASH_REMATCH[1]} MAJOR=${BASH_REMATCH[1]}
MINOR=${BASH_REMATCH[2]} MINOR=${BASH_REMATCH[2]}
TARGET_DOCKER_SHORT_TAG="${DOCKER_SHORT_TAG-${DOCKER_REGISTRY}/${DOCKER_ORG}/${DOCKER_REPO}:v${MAJOR}.${MINOR}}" TARGET_DOCKER_SHORT_TAG="${DOCKER_SHORT_TAG-${DOCKER_REGISTRY}/${DOCKER_ORG}/${DOCKER_REPO}:v${MAJOR}.${MINOR}}"
TARGET_DOCKER_LATEST_TAG="${DOCKER_REGISTRY}/${DOCKER_ORG}/${DOCKER_REPO}:latest"
if [ "${DOCKER_TARGET}" != "main" ]; then if [ "${DOCKER_TARGET}" != "main" ]; then
TARGET_DOCKER_SHORT_TAG="${TARGET_DOCKER_SHORT_TAG}-${DOCKER_TARGET}" TARGET_DOCKER_SHORT_TAG="${TARGET_DOCKER_SHORT_TAG}-${DOCKER_TARGET}"
TARGET_DOCKER_LATEST_TAG="${TARGET_DOCKER_LATEST_TAG}-${DOCKER_TARGET}"
fi fi
fi fi
@ -233,6 +243,48 @@ for DOCKER_TARGET in "${DOCKER_TARGETS[@]}"; do
# Proceeding to buils stage, except if `--push-only` is passed # Proceeding to buils stage, except if `--push-only` is passed
### ###
if [ "${2}" != "--push-only" ] ; then if [ "${2}" != "--push-only" ] ; then
###
# Checking if the build is necessary,
# meaning build only if one of those values changed:
# - Python base image digest (Label: PYTHON_BASE_DIGEST)
# - netbox git ref (Label: NETBOX_GIT_REF)
# - netbox-docker git ref (Label: org.label-schema.vcs-ref)
###
# Load information from registry (only for docker.io)
SHOULD_BUILD="false"
BUILD_REASON=""
if [ -z "${GH_ACTION}" ]; then
# Asuming non Github builds should always proceed
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} interactive"
elif [ "$DOCKER_REGISTRY" = "docker.io" ]; then
source ./build-functions/get-public-image-config.sh
IFS=':' read -ra DOCKER_FROM_SPLIT <<< "${DOCKER_FROM}"
if ! [[ ${DOCKER_FROM_SPLIT[0]} =~ .*/.* ]]; then
# Need to use "library/..." for images the have no two part name
DOCKER_FROM_SPLIT[0]="library/${DOCKER_FROM_SPLIT[0]}"
fi
PYTHON_LAST_LAYER=$(get_image_last_layer "${DOCKER_FROM_SPLIT[0]}" "${DOCKER_FROM_SPLIT[1]}")
mapfile -t IMAGES_LAYERS_OLD < <(get_image_layers "${DOCKER_ORG}"/"${DOCKER_REPO}" "${TAG}")
NETBOX_GIT_REF_OLD=$(get_image_label NETBOX_GIT_REF "${DOCKER_ORG}"/"${DOCKER_REPO}" "${TAG}")
GIT_REF_OLD=$(get_image_label org.label-schema.vcs-ref "${DOCKER_ORG}"/"${DOCKER_REPO}" "${TAG}")
if ! printf '%s\n' "${IMAGES_LAYERS_OLD[@]}" | grep -q -P "^${PYTHON_LAST_LAYER}\$"; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} python"
fi
if [ "${NETBOX_GIT_REF}" != "${NETBOX_GIT_REF_OLD}" ]; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} netbox"
fi
if [ "${GIT_REF}" != "${GIT_REF_OLD}" ]; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} netbox-docker"
fi
else
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} no-check"
fi
### ###
# Composing all arguments for `docker build` # Composing all arguments for `docker build`
### ###
@ -244,32 +296,35 @@ for DOCKER_TARGET in "${DOCKER_TARGETS[@]}"; do
) )
if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then
DOCKER_BUILD_ARGS+=( -t "${TARGET_DOCKER_SHORT_TAG}" ) DOCKER_BUILD_ARGS+=( -t "${TARGET_DOCKER_SHORT_TAG}" )
DOCKER_BUILD_ARGS+=( -t "${TARGET_DOCKER_LATEST_TAG}" )
fi fi
# --label # --label
if [ "${DOCKER_TARGET}" == "main" ]; then DOCKER_BUILD_ARGS+=(
--label "ORIGINAL_TAG=${TARGET_DOCKER_TAG}"
--label "org.label-schema.build-date=${BUILD_DATE}"
--label "org.opencontainers.image.created=${BUILD_DATE}"
--label "org.label-schema.version=${PROJECT_VERSION}"
--label "org.opencontainers.image.version=${PROJECT_VERSION}"
)
if [ -d ".git" ]; then
DOCKER_BUILD_ARGS+=( DOCKER_BUILD_ARGS+=(
--label "ORIGINAL_TAG=${TARGET_DOCKER_TAG}" --label "org.label-schema.vcs-ref=${GIT_REF}"
--label "org.opencontainers.image.revision=${GIT_REF}"
--label "org.label-schema.build-date=${BUILD_DATE}"
--label "org.opencontainers.image.created=${BUILD_DATE}"
--label "org.label-schema.version=${PROJECT_VERSION}"
--label "org.opencontainers.image.version=${PROJECT_VERSION}"
) )
if [ -d ".git" ]; then fi
DOCKER_BUILD_ARGS+=( if [ -d "${NETBOX_PATH}/.git" ]; then
--label "org.label-schema.vcs-ref=${GIT_REF}" DOCKER_BUILD_ARGS+=(
--label "org.opencontainers.image.revision=${GIT_REF}" --label "NETBOX_GIT_BRANCH=${NETBOX_GIT_BRANCH}"
) --label "NETBOX_GIT_REF=${NETBOX_GIT_REF}"
fi --label "NETBOX_GIT_URL=${NETBOX_GIT_URL}"
if [ -d "${NETBOX_PATH}/.git" ]; then )
DOCKER_BUILD_ARGS+=( fi
--label "NETBOX_GIT_BRANCH=${NETBOX_GIT_BRANCH}" if [ -n "${BUILD_REASON}" ]; then
--label "NETBOX_GIT_REF=${NETBOX_GIT_REF}" BUILD_REASON=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<< "$BUILD_REASON")
--label "NETBOX_GIT_URL=${NETBOX_GIT_URL}" DOCKER_BUILD_ARGS+=( --label "BUILD_REASON=${BUILD_REASON}" )
)
fi
fi fi
# --build-arg # --build-arg
@ -289,23 +344,29 @@ for DOCKER_TARGET in "${DOCKER_TARGETS[@]}"; do
### ###
# Building the docker image # Building the docker image
### ###
echo "🐳 Building the Docker image '${TARGET_DOCKER_TAG}'." if [ "${SHOULD_BUILD}" == "true" ]; then
$DRY docker build "${DOCKER_BUILD_ARGS[@]}" . echo "🐳 Building the Docker image '${TARGET_DOCKER_TAG}'."
echo "✅ Finished building the Docker images '${TARGET_DOCKER_TAG}'" echo " Build reason set to: ${BUILD_REASON}"
$DRY docker build "${DOCKER_BUILD_ARGS[@]}" .
echo "✅ Finished building the Docker images '${TARGET_DOCKER_TAG}'"
echo "🔎 Inspecting labels on '${TARGET_DOCKER_TAG}'"
$DRY docker inspect "${TARGET_DOCKER_TAG}" --format "{{json .Config.Labels}}"
else
echo "Build skipped because sources didn't change"
echo "::set-output name=skipped::true"
fi
fi fi
### ###
# Pushing the docker images if either `--push` or `--push-only` are passed # Pushing the docker images if either `--push` or `--push-only` are passed
### ###
if [ "${2}" == "--push" ] || [ "${2}" == "--push-only" ] ; then if [ "${2}" == "--push" ] || [ "${2}" == "--push-only" ] ; then
echo "⏫ Pushing '${TARGET_DOCKER_TAG}" source ./build-functions/docker-functions.sh
$DRY docker push "${TARGET_DOCKER_TAG}" push_image_to_registry "${TARGET_DOCKER_TAG}"
echo "✅ Finished pushing the Docker image '${TARGET_DOCKER_TAG}'."
if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then
echo "⏫ Pushing '${TARGET_DOCKER_SHORT_TAG}'" push_image_to_registry "${TARGET_DOCKER_SHORT_TAG}"
$DRY docker push "${TARGET_DOCKER_SHORT_TAG}" push_image_to_registry "${TARGET_DOCKER_LATEST_TAG}"
echo "✅ Finished pushing the Docker image '${TARGET_DOCKER_SHORT_TAG}'."
fi fi
fi fi
done done

View File

@ -51,7 +51,15 @@ SECRET_KEY = os.environ.get('SECRET_KEY', read_secret('secret_key'))
# Redis database settings. The Redis database is used for caching and background processing such as webhooks # Redis database settings. The Redis database is used for caching and background processing such as webhooks
REDIS = { REDIS = {
'webhooks': { 'tasks': {
'HOST': os.environ.get('REDIS_HOST', 'localhost'),
'PORT': int(os.environ.get('REDIS_PORT', 6379)),
'PASSWORD': os.environ.get('REDIS_PASSWORD', read_secret('redis_password')),
'DATABASE': int(os.environ.get('REDIS_DATABASE', 0)),
'DEFAULT_TIMEOUT': int(os.environ.get('REDIS_TIMEOUT', 300)),
'SSL': os.environ.get('REDIS_SSL', 'False').lower() == 'true',
},
'webhooks': { # legacy setting, can be removed after Netbox seizes support for it
'HOST': os.environ.get('REDIS_HOST', 'localhost'), 'HOST': os.environ.get('REDIS_HOST', 'localhost'),
'PORT': int(os.environ.get('REDIS_PORT', 6379)), 'PORT': int(os.environ.get('REDIS_PORT', 6379)),
'PASSWORD': os.environ.get('REDIS_PASSWORD', read_secret('redis_password')), 'PASSWORD': os.environ.get('REDIS_PASSWORD', read_secret('redis_password')),
@ -119,6 +127,10 @@ EMAIL = {
'PASSWORD': os.environ.get('EMAIL_PASSWORD', read_secret('email_password')), 'PASSWORD': os.environ.get('EMAIL_PASSWORD', read_secret('email_password')),
'TIMEOUT': int(os.environ.get('EMAIL_TIMEOUT', 10)), # seconds 'TIMEOUT': int(os.environ.get('EMAIL_TIMEOUT', 10)), # seconds
'FROM_EMAIL': os.environ.get('EMAIL_FROM', ''), 'FROM_EMAIL': os.environ.get('EMAIL_FROM', ''),
'USE_SSL': os.environ.get('EMAIL_USE_SSL', 'False').lower() == 'true',
'USE_TLS': os.environ.get('EMAIL_USE_TLS', 'False').lower() == 'true',
'SSL_CERTFILE': os.environ.get('EMAIL_SSL_CERTFILE', ''),
'SSL_KEYFILE': os.environ.get('EMAIL_SSL_KEYFILE', ''),
} }
# Enforcement of unique IP space can be toggled on a per-VRF basis. # Enforcement of unique IP space can be toggled on a per-VRF basis.

View File

@ -37,6 +37,9 @@ AUTH_LDAP_BIND_PASSWORD = os.environ.get('AUTH_LDAP_BIND_PASSWORD', read_secret(
# Set a string template that describes any users distinguished name based on the username. # Set a string template that describes any users distinguished name based on the username.
AUTH_LDAP_USER_DN_TEMPLATE = os.environ.get('AUTH_LDAP_USER_DN_TEMPLATE', None) AUTH_LDAP_USER_DN_TEMPLATE = os.environ.get('AUTH_LDAP_USER_DN_TEMPLATE', None)
# Enable STARTTLS for ldap authentication.
AUTH_LDAP_START_TLS = os.environ.get('AUTH_LDAP_START_TLS', 'False').lower() == 'true'
# Include this setting if you want to ignore certificate errors. This might be needed to accept a self-signed cert. # Include this setting if you want to ignore certificate errors. This might be needed to accept a self-signed cert.
# Note that this is a NetBox-specific setting which sets: # Note that this is a NetBox-specific setting which sets:
# ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) # ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)

5
env/netbox.env vendored
View File

@ -9,6 +9,11 @@ EMAIL_USERNAME=netbox
EMAIL_PASSWORD= EMAIL_PASSWORD=
EMAIL_TIMEOUT=5 EMAIL_TIMEOUT=5
EMAIL_FROM=netbox@bar.com EMAIL_FROM=netbox@bar.com
# EMAIL_USE_SSL and EMAIL_USE_TLS are mutually exclusive, i.e. they can't both be `true`!
EMAIL_USE_SSL=false
EMAIL_USE_TLS=false
EMAIL_SSL_CERTFILE=
EMAIL_SSL_KEYFILE=
MEDIA_ROOT=/opt/netbox/netbox/media MEDIA_ROOT=/opt/netbox/netbox/media
NAPALM_USERNAME= NAPALM_USERNAME=
NAPALM_PASSWORD= NAPALM_PASSWORD=

View File

@ -1,4 +1,4 @@
from dcim.constants import CONNECTION_STATUS_PLANNED, DEVICE_STATUS_ACTIVE from dcim.choices import DeviceStatusChoices
from dcim.models import ConsolePort, Device, PowerPort from dcim.models import ConsolePort, Device, PowerPort
from extras.reports import Report from extras.reports import Report
@ -9,13 +9,14 @@ class DeviceConnectionsReport(Report):
def test_console_connection(self): def test_console_connection(self):
# Check that every console port for every active device has a connection defined. # Check that every console port for every active device has a connection defined.
for console_port in ConsolePort.objects.select_related('device').filter(device__status=DEVICE_STATUS_ACTIVE): active = DeviceStatusChoices.STATUS_ACTIVE
for console_port in ConsolePort.objects.prefetch_related('device').filter(device__status=active):
if console_port.connected_endpoint is None: if console_port.connected_endpoint is None:
self.log_failure( self.log_failure(
console_port.device, console_port.device,
"No console connection defined for {}".format(console_port.name) "No console connection defined for {}".format(console_port.name)
) )
elif console_port.connection_status == CONNECTION_STATUS_PLANNED: elif not console_port.connection_status:
self.log_warning( self.log_warning(
console_port.device, console_port.device,
"Console connection for {} marked as planned".format(console_port.name) "Console connection for {} marked as planned".format(console_port.name)
@ -26,12 +27,12 @@ class DeviceConnectionsReport(Report):
def test_power_connections(self): def test_power_connections(self):
# Check that every active device has at least two connected power supplies. # Check that every active device has at least two connected power supplies.
for device in Device.objects.filter(status=DEVICE_STATUS_ACTIVE): for device in Device.objects.filter(status=DeviceStatusChoices.STATUS_ACTIVE):
connected_ports = 0 connected_ports = 0
for power_port in PowerPort.objects.filter(device=device): for power_port in PowerPort.objects.filter(device=device):
if power_port.connected_endpoint is not None: if power_port.connected_endpoint is not None:
connected_ports += 1 connected_ports += 1
if power_port.connection_status == CONNECTION_STATUS_PLANNED: if not power_port.connection_status:
self.log_warning( self.log_warning(
device, device,
"Power connection for {} marked as planned".format(power_port.name) "Power connection for {} marked as planned".format(power_port.name)
@ -43,4 +44,3 @@ class DeviceConnectionsReport(Report):
) )
else: else:
self.log_success(device) self.log_success(device)

View File

@ -14,5 +14,10 @@ with scandir(dirname(abspath(__file__))) as it:
if f.name.startswith('__') or not f.is_file(): if f.name.startswith('__') or not f.is_file():
continue continue
print(f"Running {f.path}") print(f"▶️ Running the startup script {f.path}")
runpy.run_path(f.path) try:
runpy.run_path(f.path)
except SystemExit as e:
if e.code is not None and e.code != 0:
print(f"‼️ The startup script {f.path} returned with code {e.code}, exiting.")
raise