mirror of
https://github.com/netbox-community/netbox.git
synced 2025-07-27 02:48:38 -06:00
Vapor API Scaffold (#2)
* Updated README * Start of Vapor Netbox Module * Add api/vapor route * Ignore virtualenv * Query devices assigned to users * Add vapor/interfaces route * adds docker-compose file to manage postgres/redis - Initial test suite for the vapor api module. (Django tests are kind of hard and slow) * Init pipeline - Adds a tox harness to run the test suite - Test running in tox - Clone example config to config.py - Add the kubernetes agent + dependent services to complete tests in the podspec - some really hacky sed configuration on the fly * Init Docker build - Adds the dockerfile assets from vapor-ware/netbox-docker - Slight changes to keep the root directory clean (nesting dirs in docker path) - Adds a rudimentary job label to build on micro-k8s builder - adds docker build/publish stages to the pipeline for branch builds. - dockerignore the project dir as its fetching packages from GHAPI * Cleanups * More unittests
This commit is contained in:
parent
a4d8b92cb1
commit
cca2c6b680
4
.dockerignore
Normal file
4
.dockerignore
Normal file
@ -0,0 +1,4 @@
|
||||
netbox
|
||||
scripts
|
||||
.tox
|
||||
.venv
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -14,3 +14,7 @@ fabfile.py
|
||||
gunicorn_config.py
|
||||
.DS_Store
|
||||
.vscode
|
||||
.venv
|
||||
.tox
|
||||
!docker/configuration/gunicorn_config.py
|
||||
.coverage
|
||||
|
117
.jenkins
Normal file
117
.jenkins
Normal file
@ -0,0 +1,117 @@
|
||||
def getSafeBranchName() {
|
||||
return "${env.BRANCH_NAME}".replace('/', '-')
|
||||
}
|
||||
|
||||
def getTagName() {
|
||||
def branchName = getSafeBranchName()
|
||||
return "${branchName}.${env.BUILD_NUMBER}"
|
||||
}
|
||||
|
||||
|
||||
pipeline {
|
||||
// disallow unconfigured stages
|
||||
// each stage will have to declare an agent block and direct the pipeline
|
||||
// how to execute. This skips the implicit agent scheduling.
|
||||
agent none
|
||||
environment {
|
||||
TAG = getTagName()
|
||||
IMAGE = 'vaporio/netbox'
|
||||
}
|
||||
stages {
|
||||
stage('Test') {
|
||||
|
||||
/*
|
||||
# Setup an agent dynamically using the following podspec. Netbox requires
|
||||
# redis and postgres by default (they've disabled all the other backend drivers
|
||||
# so we'll tack those on to the pods with some sane defaults.
|
||||
# Note: this targets units on the vapor-build cluster (implicit) This may not be
|
||||
# desireable in the case of building docker images.
|
||||
*/
|
||||
agent {
|
||||
kubernetes {
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
jenkins/job: netbox
|
||||
spec:
|
||||
containers:
|
||||
- name: python
|
||||
image: vaporio/jenkins-agent-python36:latest
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
- name: postgres
|
||||
image: postgres:10
|
||||
env:
|
||||
- name: POSTGRES_USER
|
||||
value: netbox
|
||||
- name: POSTGRES_PASSWORD
|
||||
value: netbox
|
||||
- name: redis
|
||||
image: redis:latest
|
||||
nodeSelector:
|
||||
cloud.google.com/gke-nodepool: jenkins
|
||||
tolerations:
|
||||
- key: role
|
||||
operator: Equal
|
||||
value: jenkins
|
||||
effect: NoSchedule
|
||||
"""
|
||||
}
|
||||
}
|
||||
steps {
|
||||
container('python') {
|
||||
/*
|
||||
# in the netbox/netbox path there is an example configuration file
|
||||
# clone this file and set up a permissive configuration for CI
|
||||
# using the values we declared in the podspec
|
||||
*/
|
||||
dir('netbox/netbox') {
|
||||
sh """
|
||||
cp configuration.example.py configuration.py
|
||||
sed -i -e "s/ALLOWED_HOSTS = .*/ALLOWED_HOSTS = ['*']/g" configuration.py
|
||||
sed -i -e "s/SECRET_KEY = .*/SECRET_KEY = 'netboxci'/g" configuration.py
|
||||
sed -i -e "s/USER': .*/USER': 'netbox',/g" configuration.py
|
||||
sed -i -e "s/PASSWORD': .*/PASSWORD': 'netbox',/g" configuration.py
|
||||
"""
|
||||
}
|
||||
// finally, kick off tox to run the entire test suite
|
||||
sh 'tox'
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('The Great British Baking Show') {
|
||||
/* the docker-build agent is statically enlisted in jenkins. it runs
|
||||
on the micro-k8s unit in vaporio/foundation:latest and has a uid1000
|
||||
accessible docker */
|
||||
when {
|
||||
not {
|
||||
changeRequest()
|
||||
}
|
||||
}
|
||||
agent {
|
||||
label 'docker-build'
|
||||
}
|
||||
steps {
|
||||
container('docker') {
|
||||
// embed tags from build env to do image tracing later
|
||||
sh '''
|
||||
docker build . \
|
||||
-f Dockerfile \
|
||||
--build-arg BUILD_DATE=$(date -u +%Y-%m-%dT%T 2> /dev/null) \
|
||||
--build-arg VCS_REF=${GIT_COMMIT} \
|
||||
--build-arg BUILD_VERSION=${BUILD_TAG} \
|
||||
--build-arg BRANCH=${BRANCH_NAME} \
|
||||
-t ${IMAGE}:${TAG}
|
||||
'''
|
||||
withDockerRegistry(registry: [credentialsId: 'vio-docker-hub']) {
|
||||
sh "docker push ${env.IMAGE}:${env.TAG}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
84
Dockerfile
Normal file
84
Dockerfile
Normal file
@ -0,0 +1,84 @@
|
||||
FROM vaporio/python:3.6
|
||||
|
||||
RUN apt-get update -qy \
|
||||
&& apt-get install -y \
|
||||
libsasl2-dev \
|
||||
graphviz \
|
||||
libjpeg-dev \
|
||||
libffi-dev \
|
||||
libxml2-dev \
|
||||
libxslt1-dev \
|
||||
libldap2-dev \
|
||||
libpq-dev \
|
||||
ttf-ubuntu-font-family \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip install \
|
||||
# gunicorn is used for launching netbox
|
||||
gunicorn \
|
||||
# napalm is used for gathering information from network devices
|
||||
napalm \
|
||||
# ruamel is used in startup_scripts
|
||||
ruamel.yaml \
|
||||
# pinning django to the version required by netbox
|
||||
# adding it here, to install the correct version of
|
||||
# django-rq
|
||||
'Django>=2.2,<2.3' \
|
||||
# django-rq is used for webhooks
|
||||
django-rq
|
||||
|
||||
ARG BRANCH
|
||||
ARG ORG=vapor-ware
|
||||
|
||||
|
||||
# Set image metadata (see: http://label-schema.org/rc1/)
|
||||
ARG BUILD_VERSION
|
||||
ARG BUILD_DATE
|
||||
ARG VCS_REF
|
||||
|
||||
LABEL maintainer="Vapor IO"\
|
||||
org.label-schema.schema-version="1.0" \
|
||||
org.label-schema.build-date=$BUILD_DATE \
|
||||
org.label-schema.name="vaporio/netbox" \
|
||||
org.label-schema.vcs-url="https://github.com/vapor-ware/netbox" \
|
||||
org.label-schema.vcs-ref=$VCS_REF \
|
||||
org.label-schema.vendor="Vapor IO" \
|
||||
org.label-schema.version=$BUILD_VERSION
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
# As the requirements don't change very often,
|
||||
# and as they take some time to compile,
|
||||
# we try to cache them very agressively.
|
||||
ARG REQUIREMENTS_URL=https://raw.githubusercontent.com/$ORG/netbox/$BRANCH/requirements.txt
|
||||
ADD ${REQUIREMENTS_URL} requirements.txt
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
# Cache bust when the upstream branch changes:
|
||||
# ADD will fetch the file and check if it has changed
|
||||
# If not, Docker will use the existing build cache.
|
||||
# If yes, Docker will bust the cache and run every build step from here on.
|
||||
ARG REF_URL=https://api.github.com/repos/$ORG/netbox/contents?ref=$BRANCH
|
||||
ADD ${REF_URL} version.json
|
||||
|
||||
WORKDIR /opt
|
||||
|
||||
ARG URL=https://github.com/$ORG/netbox/archive/$BRANCH.tar.gz
|
||||
RUN wget -q -O - "${URL}" | tar xz \
|
||||
&& mv netbox* netbox
|
||||
|
||||
COPY docker/configuration.docker.py /opt/netbox/netbox/netbox/configuration.py
|
||||
COPY docker/configuration/gunicorn_config.py /etc/netbox/config/
|
||||
COPY docker/nginx.conf /etc/netbox-nginx/nginx.conf
|
||||
COPY docker/docker-entrypoint.sh /opt/netbox/docker-entrypoint.sh
|
||||
COPY docker/startup_scripts/ /opt/netbox/startup_scripts/
|
||||
COPY docker/initializers/ /opt/netbox/initializers/
|
||||
COPY docker/configuration/configuration.py /etc/netbox/config/configuration.py
|
||||
|
||||
WORKDIR /opt/netbox/netbox
|
||||
|
||||
ENTRYPOINT [ "/opt/netbox/docker-entrypoint.sh" ]
|
||||
|
||||
CMD ["gunicorn", "-c /etc/netbox/config/gunicorn_config.py", "netbox.wsgi"]
|
||||
|
||||
LABEL SRC_URL="$URL"
|
18
README
Normal file
18
README
Normal file
@ -0,0 +1,18 @@
|
||||
# Vapor's HARD FORK of Netbox
|
||||
|
||||

|
||||
|
||||
## Branch layout
|
||||
|
||||
| Branch | Purpose |
|
||||
| ------ | ------- |
|
||||
| *master* | Last point of sync with upstream. Should remain untouched |
|
||||
| *develop* | Base branch used for all new feature branches. PR target |
|
||||
| *stable* | Code currently running in production |
|
||||
|
||||
## Developing
|
||||
|
||||
```
|
||||
docker-compose?
|
||||
```
|
||||
|
12
docker-compose.yml
Normal file
12
docker-compose.yml
Normal file
@ -0,0 +1,12 @@
|
||||
version: '2'
|
||||
services:
|
||||
postgres:
|
||||
image: "postgres:9.6"
|
||||
environment:
|
||||
POSTGRES_PASSWORD: "12345"
|
||||
ports:
|
||||
- 5432:5432
|
||||
redis:
|
||||
image: "redis"
|
||||
ports:
|
||||
- 6379:6379
|
10
docker/configuration.docker.py
Normal file
10
docker/configuration.docker.py
Normal file
@ -0,0 +1,10 @@
|
||||
import importlib.util
|
||||
import sys
|
||||
|
||||
try:
|
||||
spec = importlib.util.spec_from_file_location('configuration', '/etc/netbox/config/configuration.py')
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
sys.modules['netbox.configuration'] = module
|
||||
except:
|
||||
raise ImportError('')
|
178
docker/configuration/configuration.py
Normal file
178
docker/configuration/configuration.py
Normal file
@ -0,0 +1,178 @@
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
|
||||
# For reference see http://netbox.readthedocs.io/en/latest/configuration/mandatory-settings/
|
||||
# Based on https://github.com/netbox-community/netbox/blob/develop/netbox/netbox/configuration.example.py
|
||||
|
||||
# Read secret from file
|
||||
def read_secret(secret_name):
|
||||
try:
|
||||
f = open('/run/secrets/' + secret_name, 'r', encoding='utf-8')
|
||||
except EnvironmentError:
|
||||
return ''
|
||||
else:
|
||||
with f:
|
||||
return f.readline().strip()
|
||||
|
||||
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
#########################
|
||||
# #
|
||||
# Required settings #
|
||||
# #
|
||||
#########################
|
||||
|
||||
# This is a list of valid fully-qualified domain names (FQDNs) for the NetBox server. NetBox will not permit write
|
||||
# access to the server via any other hostnames. The first FQDN in the list will be treated as the preferred name.
|
||||
#
|
||||
# Example: ALLOWED_HOSTS = ['netbox.example.com', 'netbox.internal.local']
|
||||
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', '*').split(' ')
|
||||
|
||||
# PostgreSQL database configuration.
|
||||
DATABASE = {
|
||||
'NAME': os.environ.get('DB_NAME', 'netbox'), # Database name
|
||||
'USER': os.environ.get('DB_USER', ''), # PostgreSQL username
|
||||
'PASSWORD': os.environ.get('DB_PASSWORD', read_secret('db_password')),
|
||||
# PostgreSQL password
|
||||
'HOST': os.environ.get('DB_HOST', 'localhost'), # Database server
|
||||
'PORT': os.environ.get('DB_PORT', ''), # Database port (leave blank for default)
|
||||
}
|
||||
|
||||
# This key is used for secure generation of random numbers and strings. It must never be exposed outside of this file.
|
||||
# For optimal security, SECRET_KEY should be at least 50 characters in length and contain a mix of letters, numbers, and
|
||||
# symbols. NetBox will not run without this defined. For more information, see
|
||||
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-SECRET_KEY
|
||||
SECRET_KEY = os.environ.get('SECRET_KEY', read_secret('secret_key'))
|
||||
|
||||
# Redis database settings. The Redis database is used for caching and background processing such as webhooks
|
||||
REDIS = {
|
||||
'HOST': os.environ.get('REDIS_HOST', 'localhost'),
|
||||
'PORT': int(os.environ.get('REDIS_PORT', 6379)),
|
||||
'PASSWORD': os.environ.get('REDIS_PASSWORD', read_secret('redis_password')),
|
||||
'DATABASE': os.environ.get('REDIS_DATABASE', '0'),
|
||||
'CACHE_DATABASE': os.environ.get('REDIS_CACHE_DATABASE', '1'),
|
||||
'DEFAULT_TIMEOUT': os.environ.get('REDIS_TIMEOUT', '300'),
|
||||
'SSL': os.environ.get('REDIS_SSL', 'False').lower() == 'true',
|
||||
}
|
||||
|
||||
#########################
|
||||
# #
|
||||
# Optional settings #
|
||||
# #
|
||||
#########################
|
||||
|
||||
# Specify one or more name and email address tuples representing NetBox administrators. These people will be notified of
|
||||
# application errors (assuming correct email settings are provided).
|
||||
ADMINS = [
|
||||
# ['John Doe', 'jdoe@example.com'],
|
||||
]
|
||||
|
||||
# Optionally display a persistent banner at the top and/or bottom of every page. HTML is allowed. To display the same
|
||||
# content in both banners, define BANNER_TOP and set BANNER_BOTTOM = BANNER_TOP.
|
||||
BANNER_TOP = os.environ.get('BANNER_TOP', '')
|
||||
BANNER_BOTTOM = os.environ.get('BANNER_BOTTOM', '')
|
||||
|
||||
# Text to include on the login page above the login form. HTML is allowed.
|
||||
BANNER_LOGIN = os.environ.get('BANNER_LOGIN', '')
|
||||
|
||||
# Base URL path if accessing NetBox within a directory. For example, if installed at http://example.com/netbox/, set:
|
||||
# BASE_PATH = 'netbox/'
|
||||
BASE_PATH = os.environ.get('BASE_PATH', '')
|
||||
|
||||
# Cache timeout in seconds. Set to 0 to dissable caching. Defaults to 900 (15 minutes)
|
||||
CACHE_TIMEOUT = int(os.environ.get('CACHE_TIMEOUT', 900))
|
||||
|
||||
# Maximum number of days to retain logged changes. Set to 0 to retain changes indefinitely. (Default: 90)
|
||||
CHANGELOG_RETENTION = int(os.environ.get('CHANGELOG_RETENTION', 90))
|
||||
|
||||
# API Cross-Origin Resource Sharing (CORS) settings. If CORS_ORIGIN_ALLOW_ALL is set to True, all origins will be
|
||||
# allowed. Otherwise, define a list of allowed origins using either CORS_ORIGIN_WHITELIST or
|
||||
# CORS_ORIGIN_REGEX_WHITELIST. For more information, see https://github.com/ottoyiu/django-cors-headers
|
||||
CORS_ORIGIN_ALLOW_ALL = os.environ.get('CORS_ORIGIN_ALLOW_ALL', 'False').lower() == 'true'
|
||||
CORS_ORIGIN_WHITELIST = list(filter(None, os.environ.get('CORS_ORIGIN_WHITELIST', 'https://localhost').split(' ')))
|
||||
CORS_ORIGIN_REGEX_WHITELIST = [re.compile(r) for r in list(filter(None, os.environ.get('CORS_ORIGIN_REGEX_WHITELIST', '').split(' ')))]
|
||||
|
||||
# Set to True to enable server debugging. WARNING: Debugging introduces a substantial performance penalty and may reveal
|
||||
# sensitive information about your installation. Only enable debugging while performing testing. Never enable debugging
|
||||
# on a production system.
|
||||
DEBUG = os.environ.get('DEBUG', 'False').lower() == 'true'
|
||||
|
||||
# Email settings
|
||||
EMAIL = {
|
||||
'SERVER': os.environ.get('EMAIL_SERVER', 'localhost'),
|
||||
'PORT': int(os.environ.get('EMAIL_PORT', 25)),
|
||||
'USERNAME': os.environ.get('EMAIL_USERNAME', ''),
|
||||
'PASSWORD': os.environ.get('EMAIL_PASSWORD', read_secret('email_password')),
|
||||
'TIMEOUT': int(os.environ.get('EMAIL_TIMEOUT', 10)), # seconds
|
||||
'FROM_EMAIL': os.environ.get('EMAIL_FROM', ''),
|
||||
}
|
||||
|
||||
# Enforcement of unique IP space can be toggled on a per-VRF basis.
|
||||
# To enforce unique IP space within the global table (all prefixes and IP addresses not assigned to a VRF),
|
||||
# set ENFORCE_GLOBAL_UNIQUE to True.
|
||||
ENFORCE_GLOBAL_UNIQUE = os.environ.get('ENFORCE_GLOBAL_UNIQUE', 'False').lower() == 'true'
|
||||
|
||||
# Exempt certain models from the enforcement of view permissions. Models listed here will be viewable by all users and
|
||||
# by anonymous users. List models in the form `<app>.<model>`. Add '*' to this list to exempt all models.
|
||||
EXEMPT_VIEW_PERMISSIONS = list(filter(None, os.environ.get('EXEMPT_VIEW_PERMISSIONS', '').split(' ')))
|
||||
|
||||
# Enable custom logging. Please see the Django documentation for detailed guidance on configuring custom logs:
|
||||
# https://docs.djangoproject.com/en/1.11/topics/logging/
|
||||
LOGGING = {}
|
||||
|
||||
# Setting this to True will permit only authenticated users to access any part of NetBox. By default, anonymous users
|
||||
# are permitted to access most data in NetBox (excluding secrets) but not make any changes.
|
||||
LOGIN_REQUIRED = os.environ.get('LOGIN_REQUIRED', 'False').lower() == 'true'
|
||||
|
||||
# Setting this to True will display a "maintenance mode" banner at the top of every page.
|
||||
MAINTENANCE_MODE = os.environ.get('MAINTENANCE_MODE', 'False').lower() == 'true'
|
||||
|
||||
# An API consumer can request an arbitrary number of objects =by appending the "limit" parameter to the URL (e.g.
|
||||
# "?limit=1000"). This setting defines the maximum limit. Setting it to 0 or None will allow an API consumer to request
|
||||
# all objects by specifying "?limit=0".
|
||||
MAX_PAGE_SIZE = int(os.environ.get('MAX_PAGE_SIZE', 1000))
|
||||
|
||||
# The file path where uploaded media such as image attachments are stored. A trailing slash is not needed. Note that
|
||||
# the default value of this setting is derived from the installed location.
|
||||
MEDIA_ROOT = os.environ.get('MEDIA_ROOT', os.path.join(BASE_DIR, 'media'))
|
||||
|
||||
# Expose Prometheus monitoring metrics at the HTTP endpoint '/metrics'
|
||||
METRICS_ENABLED = os.environ.get('METRICS_ENABLED', 'False').lower() == 'true'
|
||||
|
||||
# Credentials that NetBox will use to access live devices.
|
||||
NAPALM_USERNAME = os.environ.get('NAPALM_USERNAME', '')
|
||||
NAPALM_PASSWORD = os.environ.get('NAPALM_PASSWORD', read_secret('napalm_password'))
|
||||
|
||||
# NAPALM timeout (in seconds). (Default: 30)
|
||||
NAPALM_TIMEOUT = int(os.environ.get('NAPALM_TIMEOUT', 30))
|
||||
|
||||
# NAPALM optional arguments (see http://napalm.readthedocs.io/en/latest/support/#optional-arguments). Arguments must
|
||||
# be provided as a dictionary.
|
||||
NAPALM_ARGS = {}
|
||||
|
||||
# Determine how many objects to display per page within a list. (Default: 50)
|
||||
PAGINATE_COUNT = int(os.environ.get('PAGINATE_COUNT', 50))
|
||||
|
||||
# When determining the primary IP address for a device, IPv6 is preferred over IPv4 by default. Set this to True to
|
||||
# prefer IPv4 instead.
|
||||
PREFER_IPV4 = os.environ.get('PREFER_IPV4', 'False').lower() == 'true'
|
||||
# The file path where custom reports will be stored. A trailing slash is not needed. Note that the default value of
|
||||
# this setting is derived from the installed location.
|
||||
REPORTS_ROOT = os.environ.get('REPORTS_ROOT', '/etc/netbox/reports')
|
||||
|
||||
# Time zone (default: UTC)
|
||||
TIME_ZONE = os.environ.get('TIME_ZONE', 'UTC')
|
||||
|
||||
# The Webhook event backend is disabled by default. Set this to True to enable it. Note that this requires a Redis
|
||||
# database be configured and accessible by NetBox (see `REDIS` below).
|
||||
WEBHOOKS_ENABLED = os.environ.get('WEBHOOKS_ENABLED', 'False').lower() == 'true'
|
||||
|
||||
# Date/time formatting. See the following link for supported formats:
|
||||
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
|
||||
DATE_FORMAT = os.environ.get('DATE_FORMAT', 'N j, Y')
|
||||
SHORT_DATE_FORMAT = os.environ.get('SHORT_DATE_FORMAT', 'Y-m-d')
|
||||
TIME_FORMAT = os.environ.get('TIME_FORMAT', 'g:i a')
|
||||
SHORT_TIME_FORMAT = os.environ.get('SHORT_TIME_FORMAT', 'H:i:s')
|
||||
DATETIME_FORMAT = os.environ.get('DATETIME_FORMAT', 'N j, Y g:i a')
|
||||
SHORT_DATETIME_FORMAT = os.environ.get('SHORT_DATETIME_FORMAT', 'Y-m-d H:i')
|
8
docker/configuration/gunicorn_config.py
Normal file
8
docker/configuration/gunicorn_config.py
Normal file
@ -0,0 +1,8 @@
|
||||
command = '/usr/bin/gunicorn'
|
||||
pythonpath = '/opt/netbox/netbox'
|
||||
bind = '0.0.0.0:8001'
|
||||
workers = 3
|
||||
errorlog = '-'
|
||||
accesslog = '-'
|
||||
capture_output = False
|
||||
loglevel = 'debug'
|
81
docker/configuration/ldap_config.py
Normal file
81
docker/configuration/ldap_config.py
Normal file
@ -0,0 +1,81 @@
|
||||
import ldap
|
||||
import os
|
||||
|
||||
from django_auth_ldap.config import LDAPSearch
|
||||
from importlib import import_module
|
||||
|
||||
# Read secret from file
|
||||
def read_secret(secret_name):
|
||||
try:
|
||||
f = open('/run/secrets/' + secret_name, 'r', encoding='utf-8')
|
||||
except EnvironmentError:
|
||||
return ''
|
||||
else:
|
||||
with f:
|
||||
return f.readline().strip()
|
||||
|
||||
# Import and return the group type based on string name
|
||||
def import_group_type(group_type_name):
|
||||
mod = import_module('django_auth_ldap.config')
|
||||
try:
|
||||
return getattr(mod, group_type_name)()
|
||||
except:
|
||||
return None
|
||||
|
||||
# Server URI
|
||||
AUTH_LDAP_SERVER_URI = os.environ.get('AUTH_LDAP_SERVER_URI', '')
|
||||
|
||||
# The following may be needed if you are binding to Active Directory.
|
||||
AUTH_LDAP_CONNECTION_OPTIONS = {
|
||||
ldap.OPT_REFERRALS: 0
|
||||
}
|
||||
|
||||
# Set the DN and password for the NetBox service account.
|
||||
AUTH_LDAP_BIND_DN = os.environ.get('AUTH_LDAP_BIND_DN', '')
|
||||
AUTH_LDAP_BIND_PASSWORD = os.environ.get('AUTH_LDAP_BIND_PASSWORD', read_secret('auth_ldap_bind_password'))
|
||||
|
||||
# Set a string template that describes any user’s distinguished name based on the username.
|
||||
AUTH_LDAP_USER_DN_TEMPLATE = os.environ.get('AUTH_LDAP_USER_DN_TEMPLATE', None)
|
||||
|
||||
# Include this setting if you want to ignore certificate errors. This might be needed to accept a self-signed cert.
|
||||
# Note that this is a NetBox-specific setting which sets:
|
||||
# ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
|
||||
LDAP_IGNORE_CERT_ERRORS = os.environ.get('LDAP_IGNORE_CERT_ERRORS', 'False').lower() == 'true'
|
||||
|
||||
AUTH_LDAP_USER_SEARCH_BASEDN = os.environ.get('AUTH_LDAP_USER_SEARCH_BASEDN', '')
|
||||
AUTH_LDAP_USER_SEARCH_ATTR = os.environ.get('AUTH_LDAP_USER_SEARCH_ATTR', 'sAMAccountName')
|
||||
AUTH_LDAP_USER_SEARCH = LDAPSearch(AUTH_LDAP_USER_SEARCH_BASEDN,
|
||||
ldap.SCOPE_SUBTREE,
|
||||
"(" + AUTH_LDAP_USER_SEARCH_ATTR + "=%(user)s)")
|
||||
|
||||
# This search ought to return all groups to which the user belongs. django_auth_ldap uses this to determine group
|
||||
# heirarchy.
|
||||
AUTH_LDAP_GROUP_SEARCH_BASEDN = os.environ.get('AUTH_LDAP_GROUP_SEARCH_BASEDN', '')
|
||||
AUTH_LDAP_GROUP_SEARCH_CLASS = os.environ.get('AUTH_LDAP_GROUP_SEARCH_CLASS', 'group')
|
||||
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(AUTH_LDAP_GROUP_SEARCH_BASEDN, ldap.SCOPE_SUBTREE,
|
||||
"(objectClass=" + AUTH_LDAP_GROUP_SEARCH_CLASS + ")")
|
||||
AUTH_LDAP_GROUP_TYPE = import_group_type(os.environ.get('AUTH_LDAP_GROUP_TYPE', 'GroupOfNamesType'))
|
||||
|
||||
# Define a group required to login.
|
||||
AUTH_LDAP_REQUIRE_GROUP = os.environ.get('AUTH_LDAP_REQUIRE_GROUP_DN', '')
|
||||
|
||||
# Define special user types using groups. Exercise great caution when assigning superuser status.
|
||||
AUTH_LDAP_USER_FLAGS_BY_GROUP = {
|
||||
"is_active": os.environ.get('AUTH_LDAP_REQUIRE_GROUP_DN', ''),
|
||||
"is_staff": os.environ.get('AUTH_LDAP_IS_ADMIN_DN', ''),
|
||||
"is_superuser": os.environ.get('AUTH_LDAP_IS_SUPERUSER_DN', '')
|
||||
}
|
||||
|
||||
# For more granular permissions, we can map LDAP groups to Django groups.
|
||||
AUTH_LDAP_FIND_GROUP_PERMS = os.environ.get('AUTH_LDAP_FIND_GROUP_PERMS', 'True').lower() == 'true'
|
||||
|
||||
# Cache groups for one hour to reduce LDAP traffic
|
||||
AUTH_LDAP_CACHE_GROUPS = os.environ.get('AUTH_LDAP_CACHE_GROUPS', 'True').lower() == 'true'
|
||||
AUTH_LDAP_GROUP_CACHE_TIMEOUT = int(os.environ.get('AUTH_LDAP_GROUP_CACHE_TIMEOUT', 3600))
|
||||
|
||||
# Populate the Django user from the LDAP directory.
|
||||
AUTH_LDAP_USER_ATTR_MAP = {
|
||||
"first_name": os.environ.get('AUTH_LDAP_ATTR_FIRSTNAME', 'givenName'),
|
||||
"last_name": os.environ.get('AUTH_LDAP_ATTR_LASTNAME', 'sn'),
|
||||
"email": os.environ.get('AUTH_LDAP_ATTR_MAIL', 'mail')
|
||||
}
|
58
docker/docker-entrypoint.sh
Executable file
58
docker/docker-entrypoint.sh
Executable file
@ -0,0 +1,58 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# wait shortly and then run db migrations (retry on error)
|
||||
while ! ./manage.py migrate 2>&1; do
|
||||
echo "⏳ Waiting on DB..."
|
||||
sleep 3
|
||||
done
|
||||
|
||||
# create superuser silently
|
||||
if [ -z ${SUPERUSER_NAME+x} ]; then
|
||||
SUPERUSER_NAME='admin'
|
||||
fi
|
||||
if [ -z ${SUPERUSER_EMAIL+x} ]; then
|
||||
SUPERUSER_EMAIL='admin@example.com'
|
||||
fi
|
||||
if [ -z ${SUPERUSER_PASSWORD+x} ]; then
|
||||
if [ -f "/run/secrets/superuser_password" ]; then
|
||||
SUPERUSER_PASSWORD="$(< /run/secrets/superuser_password)"
|
||||
else
|
||||
SUPERUSER_PASSWORD='admin'
|
||||
fi
|
||||
fi
|
||||
if [ -z ${SUPERUSER_API_TOKEN+x} ]; then
|
||||
if [ -f "/run/secrets/superuser_api_token" ]; then
|
||||
SUPERUSER_API_TOKEN="$(< /run/secrets/superuser_api_token)"
|
||||
else
|
||||
SUPERUSER_API_TOKEN='0123456789abcdef0123456789abcdef01234567'
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "💡 Username: ${SUPERUSER_NAME}, E-Mail: ${SUPERUSER_EMAIL}"
|
||||
|
||||
./manage.py shell --interface python << END
|
||||
from django.contrib.auth.models import User
|
||||
from users.models import Token
|
||||
if not User.objects.filter(username='${SUPERUSER_NAME}'):
|
||||
u=User.objects.create_superuser('${SUPERUSER_NAME}', '${SUPERUSER_EMAIL}', '${SUPERUSER_PASSWORD}')
|
||||
Token.objects.create(user=u, key='${SUPERUSER_API_TOKEN}')
|
||||
END
|
||||
|
||||
if [ "$SKIP_STARTUP_SCRIPTS" == "true" ]; then
|
||||
echo "☇ Skipping startup scripts"
|
||||
else
|
||||
for script in /opt/netbox/startup_scripts/*.py; do
|
||||
echo "⚙️ Executing '$script'"
|
||||
./manage.py shell --interface python < "${script}"
|
||||
done
|
||||
fi
|
||||
|
||||
# copy static files
|
||||
./manage.py collectstatic --no-input
|
||||
|
||||
echo "✅ Initialisation is done."
|
||||
|
||||
# launch whatever is passed by docker
|
||||
# (i.e. the RUN instruction in the Dockerfile)
|
||||
exec ${@}
|
82
docker/initializers/custom_fields.yml
Normal file
82
docker/initializers/custom_fields.yml
Normal file
@ -0,0 +1,82 @@
|
||||
# text_field:
|
||||
# type: text
|
||||
# label: Custom Text
|
||||
# description: Enter text in a text field.
|
||||
# required: false
|
||||
# weight: 0
|
||||
# on_objects:
|
||||
# - dcim.models.Device
|
||||
# - dcim.models.Rack
|
||||
# - dcim.models.Site
|
||||
# - dcim.models.DeviceType
|
||||
# - ipam.models.IPAddress
|
||||
# - ipam.models.Prefix
|
||||
# - tenancy.models.Tenant
|
||||
# - virtualization.models.VirtualMachine
|
||||
# integer_field:
|
||||
# type: integer
|
||||
# label: Custom Number
|
||||
# description: Enter numbers into an integer field.
|
||||
# required: true
|
||||
# filter_logic: loose
|
||||
# weight: 10
|
||||
# on_objects:
|
||||
# - tenancy.models.Tenant
|
||||
# selection_field:
|
||||
# type: selection
|
||||
# label: Choose between items
|
||||
# required: false
|
||||
# filter_logic: exact
|
||||
# weight: 30
|
||||
# on_objects:
|
||||
# - dcim.models.Device
|
||||
# choices:
|
||||
# - value: First Item
|
||||
# weight: 10
|
||||
# - value: Second Item
|
||||
# weight: 20
|
||||
# - value: Third Item
|
||||
# weight: 30
|
||||
# - value: Fifth Item
|
||||
# weight: 50
|
||||
# - value: Fourth Item
|
||||
# weight: 40
|
||||
# selection_field_auto_weight:
|
||||
# type: selection
|
||||
# label: Choose between items
|
||||
# required: false
|
||||
# filter_logic: loose
|
||||
# weight: 30
|
||||
# on_objects:
|
||||
# - dcim.models.Device
|
||||
# choices:
|
||||
# - value: A
|
||||
# - value: B
|
||||
# - value: C
|
||||
# - value: "D like deprecated"
|
||||
# weight: 999
|
||||
# - value: E
|
||||
# boolean_field:
|
||||
# type: boolean
|
||||
# label: Yes Or No?
|
||||
# required: true
|
||||
# filter_logic: loose
|
||||
# default: "false" # important: but "false" in quotes!
|
||||
# weight: 90
|
||||
# on_objects:
|
||||
# - dcim.models.Device
|
||||
# url_field:
|
||||
# type: url
|
||||
# label: Hyperlink
|
||||
# description: Link to something nice.
|
||||
# required: true
|
||||
# filter_logic: disabled
|
||||
# on_objects:
|
||||
# - tenancy.models.Tenant
|
||||
# date_field:
|
||||
# type: date
|
||||
# label: Important Date
|
||||
# required: false
|
||||
# filter_logic: disabled
|
||||
# on_objects:
|
||||
# - dcim.models.Device
|
15
docker/initializers/device_roles.yml
Normal file
15
docker/initializers/device_roles.yml
Normal file
@ -0,0 +1,15 @@
|
||||
# - name: switch
|
||||
# slug: switch
|
||||
# color: Grey
|
||||
# - name: router
|
||||
# slug: router
|
||||
# color: Cyan
|
||||
# - name: load-balancer
|
||||
# slug: load-balancer
|
||||
# color: Red
|
||||
# - name: server
|
||||
# slug: server
|
||||
# color: Blue
|
||||
# - name: patchpanel
|
||||
# slug: patchpanel
|
||||
# color: Black
|
23
docker/initializers/device_types.yml
Normal file
23
docker/initializers/device_types.yml
Normal file
@ -0,0 +1,23 @@
|
||||
# - model: Model 1
|
||||
# manufacturer: Manufacturer 1
|
||||
# slug: model-1
|
||||
# u_height: 2
|
||||
# custom_fields:
|
||||
# text_field: Description
|
||||
# - model: Model 2
|
||||
# manufacturer: Manufacturer 1
|
||||
# slug: model-2
|
||||
# custom_fields:
|
||||
# text_field: Description
|
||||
# - model: Model 3
|
||||
# manufacturer: Manufacturer 1
|
||||
# slug: model-3
|
||||
# is_full_depth: false
|
||||
# u_height: 0
|
||||
# custom_fields:
|
||||
# text_field: Description
|
||||
# - model: Other
|
||||
# manufacturer: NoName
|
||||
# slug: other
|
||||
# custom_fields:
|
||||
# text_field: Description
|
27
docker/initializers/devices.yml
Normal file
27
docker/initializers/devices.yml
Normal file
@ -0,0 +1,27 @@
|
||||
# - name: server01
|
||||
# device_role: server
|
||||
# device_type: Other
|
||||
# site: AMS 1
|
||||
# rack: rack-01
|
||||
# face: Front
|
||||
# position: 1
|
||||
# custom_fields:
|
||||
# text_field: Description
|
||||
# - name: server02
|
||||
# device_role: server
|
||||
# device_type: Other
|
||||
# site: AMS 2
|
||||
# rack: rack-02
|
||||
# face: Front
|
||||
# position: 2
|
||||
# custom_fields:
|
||||
# text_field: Description
|
||||
# - name: server03
|
||||
# device_role: server
|
||||
# device_type: Other
|
||||
# site: SING 1
|
||||
# rack: rack-03
|
||||
# face: Front
|
||||
# position: 3
|
||||
# custom_fields:
|
||||
# text_field: Description
|
16
docker/initializers/groups.yml
Normal file
16
docker/initializers/groups.yml
Normal file
@ -0,0 +1,16 @@
|
||||
# applications:
|
||||
# users:
|
||||
# - technical_user
|
||||
# readers:
|
||||
# users:
|
||||
# - reader
|
||||
# writers:
|
||||
# users:
|
||||
# - writer
|
||||
# permissions:
|
||||
# - add_device
|
||||
# - change_device
|
||||
# - delete_device
|
||||
# - add_virtualmachine
|
||||
# - change_virtualmachine
|
||||
# - delete_virtualmachine
|
6
docker/initializers/manufacturers.yml
Normal file
6
docker/initializers/manufacturers.yml
Normal file
@ -0,0 +1,6 @@
|
||||
# - name: Manufacturer 1
|
||||
# slug: manufacturer-1
|
||||
# - name: Manufacturer 2
|
||||
# slug: manufacturer-2
|
||||
# - name: NoName
|
||||
# slug: noname
|
19
docker/initializers/platforms.yml
Normal file
19
docker/initializers/platforms.yml
Normal file
@ -0,0 +1,19 @@
|
||||
# # Allowed rpc clients are: juniper-junos, cisco-ios, opengear
|
||||
# - name: Platform 1
|
||||
# slug: platform-1
|
||||
# manufacturer: Manufacturer 1
|
||||
# napalm_driver: driver1
|
||||
# napalm_args: "{'arg1': 'value1', 'arg2': 'value2'}"
|
||||
# rpc_client: juniper-junos
|
||||
# - name: Platform 2
|
||||
# slug: platform-2
|
||||
# manufacturer: Manufacturer 2
|
||||
# napalm_driver: driver2
|
||||
# napalm_args: "{'arg1': 'value1', 'arg2': 'value2'}"
|
||||
# rpc_client: opengear
|
||||
# - name: Platform 3
|
||||
# slug: platform-3
|
||||
# manufacturer: NoName
|
||||
# napalm_driver: driver3
|
||||
# napalm_args: "{'arg1': 'value1', 'arg2': 'value2'}"
|
||||
# rpc_client: juniper-junos
|
12
docker/initializers/rack_roles.yml
Normal file
12
docker/initializers/rack_roles.yml
Normal file
@ -0,0 +1,12 @@
|
||||
# - name: Role 1
|
||||
# slug: role-1
|
||||
# color: Pink
|
||||
# - name: Role 2
|
||||
# slug: role-2
|
||||
# color: Cyan
|
||||
# - name: Role 3
|
||||
# slug: role-3
|
||||
# color: Grey
|
||||
# - name: Role 4
|
||||
# slug: role-4
|
||||
# color: Teal
|
24
docker/initializers/racks.yml
Normal file
24
docker/initializers/racks.yml
Normal file
@ -0,0 +1,24 @@
|
||||
# - site: AMS 1
|
||||
# name: rack-01
|
||||
# role: Role 1
|
||||
# type: 4-post cabinet
|
||||
# width: 19 inches
|
||||
# u_height: 47
|
||||
# custom_fields:
|
||||
# text_field: Description
|
||||
# - site: AMS 2
|
||||
# name: rack-02
|
||||
# role: Role 2
|
||||
# type: 4-post cabinet
|
||||
# width: 19 inches
|
||||
# u_height: 47
|
||||
# custom_fields:
|
||||
# text_field: Description
|
||||
# - site: SING 1
|
||||
# name: rack-03
|
||||
# role: Role 3
|
||||
# type: 4-post cabinet
|
||||
# width: 19 inches
|
||||
# u_height: 47
|
||||
# custom_fields:
|
||||
# text_field: Description
|
10
docker/initializers/regions.yml
Normal file
10
docker/initializers/regions.yml
Normal file
@ -0,0 +1,10 @@
|
||||
# - name: Singapore
|
||||
# slug: singapore
|
||||
# - name: Amsterdam
|
||||
# slug: amsterdam
|
||||
# - name: Downtown
|
||||
# slug: downtown
|
||||
# parent: Amsterdam
|
||||
# - name: Suburbs
|
||||
# slug: suburbs
|
||||
# parent: Amsterdam
|
32
docker/initializers/sites.yml
Normal file
32
docker/initializers/sites.yml
Normal file
@ -0,0 +1,32 @@
|
||||
# - name: AMS 1
|
||||
# slug: ams1
|
||||
# region: Downtown
|
||||
# status: 1
|
||||
# facility: Amsterdam 1
|
||||
# asn: 12345
|
||||
# custom_fields:
|
||||
# text_field: Description
|
||||
# - name: AMS 2
|
||||
# slug: ams2
|
||||
# region: Downtown
|
||||
# status: 1
|
||||
# facility: Amsterdam 2
|
||||
# asn: 54321
|
||||
# custom_fields:
|
||||
# text_field: Description
|
||||
# - name: AMS 3
|
||||
# slug: ams3
|
||||
# region: Suburbs
|
||||
# status: 1
|
||||
# facility: Amsterdam 3
|
||||
# asn: 67890
|
||||
# custom_fields:
|
||||
# text_field: Description
|
||||
# - name: SING 1
|
||||
# slug: sing1
|
||||
# region: Singapore
|
||||
# status: 1
|
||||
# facility: Singapore 1
|
||||
# asn: 09876
|
||||
# custom_fields:
|
||||
# text_field: Description
|
13
docker/initializers/users.yml
Normal file
13
docker/initializers/users.yml
Normal file
@ -0,0 +1,13 @@
|
||||
# technical_user:
|
||||
# api_token: 0123456789technicaluser789abcdef01234567 # must be looooong!
|
||||
# reader:
|
||||
# password: reader
|
||||
# writer:
|
||||
# password: writer
|
||||
# permissions:
|
||||
# - add_device
|
||||
# - change_device
|
||||
# - delete_device
|
||||
# - add_virtualmachine
|
||||
# - change_virtualmachine
|
||||
# - delete_virtualmachine
|
36
docker/nginx.conf
Normal file
36
docker/nginx.conf
Normal file
@ -0,0 +1,36 @@
|
||||
daemon off;
|
||||
worker_processes 1;
|
||||
|
||||
error_log /dev/stderr info;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
keepalive_timeout 65;
|
||||
gzip on;
|
||||
server_tokens off;
|
||||
client_max_body_size 10M;
|
||||
|
||||
server {
|
||||
listen 8080;
|
||||
access_log off;
|
||||
|
||||
location /static/ {
|
||||
alias /opt/netbox/netbox/static/;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://netbox:8001;
|
||||
proxy_set_header X-Forwarded-Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
add_header P3P 'CP="ALL DSP COR PSAa PSDa OUR NOR ONL UNI COM NAV"';
|
||||
}
|
||||
}
|
||||
}
|
34
docker/startup_scripts/000_users.py
Normal file
34
docker/startup_scripts/000_users.py
Normal file
@ -0,0 +1,34 @@
|
||||
from django.contrib.auth.models import Permission, Group, User
|
||||
from users.models import Token
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
file = Path('/opt/netbox/initializers/users.yml')
|
||||
if not file.is_file():
|
||||
sys.exit()
|
||||
|
||||
with file.open('r') as stream:
|
||||
yaml=YAML(typ='safe')
|
||||
users = yaml.load(stream)
|
||||
|
||||
if users is not None:
|
||||
for username, user_details in users.items():
|
||||
if not User.objects.filter(username=username):
|
||||
user = User.objects.create_user(
|
||||
username = username,
|
||||
password = user_details.get('password', 0) or User.objects.make_random_password)
|
||||
|
||||
print("👤 Created user ",username)
|
||||
|
||||
if user_details.get('api_token', 0):
|
||||
Token.objects.create(user=user, key=user_details['api_token'])
|
||||
|
||||
user_permissions = user_details.get('permissions', [])
|
||||
if user_permissions:
|
||||
user.user_permissions.clear()
|
||||
for permission_codename in user_details.get('permissions', []):
|
||||
for permission in Permission.objects.filter(codename=permission_codename):
|
||||
user.user_permissions.add(permission)
|
||||
user.save()
|
33
docker/startup_scripts/010_groups.py
Normal file
33
docker/startup_scripts/010_groups.py
Normal file
@ -0,0 +1,33 @@
|
||||
from django.contrib.auth.models import Permission, Group, User
|
||||
from ruamel.yaml import YAML
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
file = Path('/opt/netbox/initializers/groups.yml')
|
||||
if not file.is_file():
|
||||
sys.exit()
|
||||
|
||||
with file.open('r') as stream:
|
||||
yaml=YAML(typ='safe')
|
||||
groups = yaml.load(stream)
|
||||
|
||||
if groups is not None:
|
||||
for groupname, group_details in groups.items():
|
||||
group, created = Group.objects.get_or_create(name=groupname)
|
||||
|
||||
if created:
|
||||
print("👥 Created group", groupname)
|
||||
|
||||
for username in group_details.get('users', []):
|
||||
user = User.objects.get(username=username)
|
||||
|
||||
if user:
|
||||
user.groups.add(group)
|
||||
|
||||
group_permissions = group_details.get('permissions', [])
|
||||
if group_permissions:
|
||||
group.permissions.clear()
|
||||
print("Permissions:", group.permissions.all())
|
||||
for permission_codename in group_details.get('permissions', []):
|
||||
for permission in Permission.objects.filter(codename=permission_codename):
|
||||
group.permissions.add(permission)
|
75
docker/startup_scripts/020_custom_fields.py
Normal file
75
docker/startup_scripts/020_custom_fields.py
Normal file
@ -0,0 +1,75 @@
|
||||
from extras.constants import CF_TYPE_TEXT, CF_TYPE_INTEGER, CF_TYPE_BOOLEAN, CF_TYPE_DATE, CF_TYPE_URL, CF_TYPE_SELECT, CF_FILTER_CHOICES
|
||||
from extras.models import CustomField, CustomFieldChoice
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
text_to_fields = {
|
||||
'boolean': CF_TYPE_BOOLEAN,
|
||||
'date': CF_TYPE_DATE,
|
||||
'integer': CF_TYPE_INTEGER,
|
||||
'selection': CF_TYPE_SELECT,
|
||||
'text': CF_TYPE_TEXT,
|
||||
'url': CF_TYPE_URL,
|
||||
}
|
||||
|
||||
def get_class_for_class_path(class_path):
|
||||
import importlib
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
module_name, class_name = class_path.rsplit(".", 1)
|
||||
module = importlib.import_module(module_name)
|
||||
clazz = getattr(module, class_name)
|
||||
return ContentType.objects.get_for_model(clazz)
|
||||
|
||||
file = Path('/opt/netbox/initializers/custom_fields.yml')
|
||||
if not file.is_file():
|
||||
sys.exit()
|
||||
|
||||
with file.open('r') as stream:
|
||||
yaml = YAML(typ='safe')
|
||||
customfields = yaml.load(stream)
|
||||
|
||||
if customfields is not None:
|
||||
for cf_name, cf_details in customfields.items():
|
||||
custom_field, created = CustomField.objects.get_or_create(name = cf_name)
|
||||
|
||||
if created:
|
||||
if cf_details.get('default', 0):
|
||||
custom_field.default = cf_details['default']
|
||||
|
||||
if cf_details.get('description', 0):
|
||||
custom_field.description = cf_details['description']
|
||||
|
||||
# If no filter_logic is specified then it will default to 'Loose'
|
||||
if cf_details.get('filter_logic', 0):
|
||||
for choice_id, choice_text in CF_FILTER_CHOICES:
|
||||
if choice_text.lower() == cf_details['filter_logic']:
|
||||
custom_field.filter_logic = choice_id
|
||||
|
||||
if cf_details.get('label', 0):
|
||||
custom_field.label = cf_details['label']
|
||||
|
||||
for object_type in cf_details.get('on_objects', []):
|
||||
custom_field.obj_type.add(get_class_for_class_path(object_type))
|
||||
|
||||
if cf_details.get('required', 0):
|
||||
custom_field.required = cf_details['required']
|
||||
|
||||
if cf_details.get('type', 0):
|
||||
custom_field.type = text_to_fields[cf_details['type']]
|
||||
|
||||
if cf_details.get('weight', 0):
|
||||
custom_field.weight = cf_details['weight']
|
||||
|
||||
custom_field.save()
|
||||
|
||||
for idx, choice_details in enumerate(cf_details.get('choices', [])):
|
||||
choice, _ = CustomFieldChoice.objects.get_or_create(
|
||||
field=custom_field,
|
||||
value=choice_details['value'],
|
||||
defaults={'weight': idx * 10}
|
||||
)
|
||||
|
||||
print("🔧 Created custom field", cf_name)
|
31
docker/startup_scripts/030_regions.py
Normal file
31
docker/startup_scripts/030_regions.py
Normal file
@ -0,0 +1,31 @@
|
||||
from dcim.models import Region
|
||||
from ruamel.yaml import YAML
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
file = Path('/opt/netbox/initializers/regions.yml')
|
||||
if not file.is_file():
|
||||
sys.exit()
|
||||
|
||||
with file.open('r') as stream:
|
||||
yaml=YAML(typ='safe')
|
||||
regions = yaml.load(stream)
|
||||
|
||||
optional_assocs = {
|
||||
'parent': (Region, 'name')
|
||||
}
|
||||
|
||||
if regions is not None:
|
||||
for params in regions:
|
||||
|
||||
for assoc, details in optional_assocs.items():
|
||||
if assoc in params:
|
||||
model, field = details
|
||||
query = { field: params.pop(assoc) }
|
||||
|
||||
params[assoc] = model.objects.get(**query)
|
||||
|
||||
region, created = Region.objects.get_or_create(**params)
|
||||
|
||||
if created:
|
||||
print("🌐 Created region", region.name)
|
46
docker/startup_scripts/040_sites.py
Normal file
46
docker/startup_scripts/040_sites.py
Normal file
@ -0,0 +1,46 @@
|
||||
from dcim.models import Region, Site
|
||||
from extras.models import CustomField, CustomFieldValue
|
||||
from tenancy.models import Tenant
|
||||
from ruamel.yaml import YAML
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
file = Path('/opt/netbox/initializers/sites.yml')
|
||||
if not file.is_file():
|
||||
sys.exit()
|
||||
|
||||
with file.open('r') as stream:
|
||||
yaml = YAML(typ='safe')
|
||||
sites = yaml.load(stream)
|
||||
|
||||
optional_assocs = {
|
||||
'region': (Region, 'name'),
|
||||
'tenant': (Tenant, 'name')
|
||||
}
|
||||
|
||||
if sites is not None:
|
||||
for params in sites:
|
||||
custom_fields = params.pop('custom_fields', None)
|
||||
|
||||
for assoc, details in optional_assocs.items():
|
||||
if assoc in params:
|
||||
model, field = details
|
||||
query = { field: params.pop(assoc) }
|
||||
|
||||
params[assoc] = model.objects.get(**query)
|
||||
|
||||
site, created = Site.objects.get_or_create(**params)
|
||||
|
||||
if created:
|
||||
if custom_fields is not None:
|
||||
for cf_name, cf_value in custom_fields.items():
|
||||
custom_field = CustomField.objects.get(name=cf_name)
|
||||
custom_field_value = CustomFieldValue.objects.create(
|
||||
field=custom_field,
|
||||
obj=site,
|
||||
value=cf_value
|
||||
)
|
||||
|
||||
site.custom_field_values.add(custom_field_value)
|
||||
|
||||
print("📍 Created site", site.name)
|
19
docker/startup_scripts/050_manufacturers.py
Normal file
19
docker/startup_scripts/050_manufacturers.py
Normal file
@ -0,0 +1,19 @@
|
||||
from dcim.models import Manufacturer
|
||||
from ruamel.yaml import YAML
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
file = Path('/opt/netbox/initializers/manufacturers.yml')
|
||||
if not file.is_file():
|
||||
sys.exit()
|
||||
|
||||
with file.open('r') as stream:
|
||||
yaml = YAML(typ='safe')
|
||||
manufacturers = yaml.load(stream)
|
||||
|
||||
if manufacturers is not None:
|
||||
for params in manufacturers:
|
||||
manufacturer, created = Manufacturer.objects.get_or_create(**params)
|
||||
|
||||
if created:
|
||||
print("🏭 Created Manufacturer", manufacturer.name)
|
56
docker/startup_scripts/060_device_types.py
Normal file
56
docker/startup_scripts/060_device_types.py
Normal file
@ -0,0 +1,56 @@
|
||||
from dcim.models import DeviceType, Manufacturer, Region
|
||||
from tenancy.models import Tenant
|
||||
from extras.models import CustomField, CustomFieldValue
|
||||
from ruamel.yaml import YAML
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
file = Path('/opt/netbox/initializers/device_types.yml')
|
||||
if not file.is_file():
|
||||
sys.exit()
|
||||
|
||||
with file.open('r') as stream:
|
||||
yaml = YAML(typ='safe')
|
||||
device_types = yaml.load(stream)
|
||||
|
||||
required_assocs = {
|
||||
'manufacturer': (Manufacturer, 'name')
|
||||
}
|
||||
|
||||
optional_assocs = {
|
||||
'region': (Region, 'name'),
|
||||
'tenant': (Tenant, 'name')
|
||||
}
|
||||
|
||||
if device_types is not None:
|
||||
for params in device_types:
|
||||
custom_fields = params.pop('custom_fields', None)
|
||||
|
||||
for assoc, details in required_assocs.items():
|
||||
model, field = details
|
||||
query = { field: params.pop(assoc) }
|
||||
|
||||
params[assoc] = model.objects.get(**query)
|
||||
|
||||
for assoc, details in optional_assocs.items():
|
||||
if assoc in params:
|
||||
model, field = details
|
||||
query = { field: params.pop(assoc) }
|
||||
|
||||
params[assoc] = model.objects.get(**query)
|
||||
|
||||
device_type, created = DeviceType.objects.get_or_create(**params)
|
||||
|
||||
if created:
|
||||
if custom_fields is not None:
|
||||
for cf_name, cf_value in custom_fields.items():
|
||||
custom_field = CustomField.objects.get(name=cf_name)
|
||||
custom_field_value = CustomFieldValue.objects.create(
|
||||
field=custom_field,
|
||||
obj=device_type,
|
||||
value=cf_value
|
||||
)
|
||||
|
||||
device_type.custom_field_values.add(custom_field_value)
|
||||
|
||||
print("🔡 Created device type", device_type.manufacturer, device_type.model)
|
28
docker/startup_scripts/070_rack_roles.py
Normal file
28
docker/startup_scripts/070_rack_roles.py
Normal file
@ -0,0 +1,28 @@
|
||||
from dcim.models import RackRole
|
||||
from ruamel.yaml import YAML
|
||||
from utilities.forms import COLOR_CHOICES
|
||||
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
file = Path('/opt/netbox/initializers/rack_roles.yml')
|
||||
if not file.is_file():
|
||||
sys.exit()
|
||||
|
||||
with file.open('r') as stream:
|
||||
yaml=YAML(typ='safe')
|
||||
rack_roles = yaml.load(stream)
|
||||
|
||||
if rack_roles is not None:
|
||||
for params in rack_roles:
|
||||
if 'color' in params:
|
||||
color = params.pop('color')
|
||||
|
||||
for color_tpl in COLOR_CHOICES:
|
||||
if color in color_tpl:
|
||||
params['color'] = color_tpl[0]
|
||||
|
||||
rack_role, created = RackRole.objects.get_or_create(**params)
|
||||
|
||||
if created:
|
||||
print("🎨 Created rack role", rack_role.name)
|
66
docker/startup_scripts/080_racks.py
Normal file
66
docker/startup_scripts/080_racks.py
Normal file
@ -0,0 +1,66 @@
|
||||
from dcim.models import Site, RackRole, Rack, RackGroup
|
||||
from tenancy.models import Tenant
|
||||
from extras.models import CustomField, CustomFieldValue
|
||||
from dcim.constants import RACK_TYPE_CHOICES, RACK_WIDTH_CHOICES
|
||||
from ruamel.yaml import YAML
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
file = Path('/opt/netbox/initializers/racks.yml')
|
||||
if not file.is_file():
|
||||
sys.exit()
|
||||
|
||||
with file.open('r') as stream:
|
||||
yaml = YAML(typ='safe')
|
||||
racks = yaml.load(stream)
|
||||
|
||||
required_assocs = {
|
||||
'site': (Site, 'name')
|
||||
}
|
||||
|
||||
optional_assocs = {
|
||||
'role': (RackRole, 'name'),
|
||||
'tenant': (Tenant, 'name'),
|
||||
'group': (RackGroup, 'name')
|
||||
}
|
||||
|
||||
if racks is not None:
|
||||
for params in racks:
|
||||
custom_fields = params.pop('custom_fields', None)
|
||||
|
||||
for assoc, details in required_assocs.items():
|
||||
model, field = details
|
||||
query = { field: params.pop(assoc) }
|
||||
|
||||
params[assoc] = model.objects.get(**query)
|
||||
|
||||
for assoc, details in optional_assocs.items():
|
||||
if assoc in params:
|
||||
model, field = details
|
||||
query = { field: params.pop(assoc) }
|
||||
|
||||
params[assoc] = model.objects.get(**query)
|
||||
|
||||
for rack_type in RACK_TYPE_CHOICES:
|
||||
if params['type'] in rack_type:
|
||||
params['type'] = rack_type[0]
|
||||
|
||||
for rack_width in RACK_WIDTH_CHOICES:
|
||||
if params['width'] in rack_width:
|
||||
params['width'] = rack_width[0]
|
||||
|
||||
rack, created = Rack.objects.get_or_create(**params)
|
||||
|
||||
if created:
|
||||
if custom_fields is not None:
|
||||
for cf_name, cf_value in custom_fields.items():
|
||||
custom_field = CustomField.objects.get(name=cf_name)
|
||||
custom_field_value = CustomFieldValue.objects.create(
|
||||
field=custom_field,
|
||||
obj=rack,
|
||||
value=cf_value
|
||||
)
|
||||
|
||||
rack.custom_field_values.add(custom_field_value)
|
||||
|
||||
print("🔳 Created rack", rack.site, rack.name)
|
29
docker/startup_scripts/090_device_roles.py
Normal file
29
docker/startup_scripts/090_device_roles.py
Normal file
@ -0,0 +1,29 @@
|
||||
from dcim.models import DeviceRole
|
||||
from ruamel.yaml import YAML
|
||||
from utilities.forms import COLOR_CHOICES
|
||||
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
file = Path('/opt/netbox/initializers/device_roles.yml')
|
||||
if not file.is_file():
|
||||
sys.exit()
|
||||
|
||||
with file.open('r') as stream:
|
||||
yaml=YAML(typ='safe')
|
||||
device_roles = yaml.load(stream)
|
||||
|
||||
if device_roles is not None:
|
||||
for params in device_roles:
|
||||
|
||||
if 'color' in params:
|
||||
color = params.pop('color')
|
||||
|
||||
for color_tpl in COLOR_CHOICES:
|
||||
if color in color_tpl:
|
||||
params['color'] = color_tpl[0]
|
||||
|
||||
device_role, created = DeviceRole.objects.get_or_create(**params)
|
||||
|
||||
if created:
|
||||
print("🎨 Created device role", device_role.name)
|
32
docker/startup_scripts/100_platforms.py
Normal file
32
docker/startup_scripts/100_platforms.py
Normal file
@ -0,0 +1,32 @@
|
||||
from dcim.models import Manufacturer, Platform
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
file = Path('/opt/netbox/initializers/platforms.yml')
|
||||
if not file.is_file():
|
||||
sys.exit()
|
||||
|
||||
with file.open('r') as stream:
|
||||
yaml = YAML(typ='safe')
|
||||
platforms = yaml.load(stream)
|
||||
|
||||
optional_assocs = {
|
||||
'manufacturer': (Manufacturer, 'name'),
|
||||
}
|
||||
|
||||
if platforms is not None:
|
||||
for params in platforms:
|
||||
|
||||
for assoc, details in optional_assocs.items():
|
||||
if assoc in params:
|
||||
model, field = details
|
||||
query = { field: params.pop(assoc) }
|
||||
|
||||
params[assoc] = model.objects.get(**query)
|
||||
|
||||
platform, created = Platform.objects.get_or_create(**params)
|
||||
|
||||
if created:
|
||||
print("💾 Created platform", platform.name)
|
71
docker/startup_scripts/110_devices.py
Normal file
71
docker/startup_scripts/110_devices.py
Normal file
@ -0,0 +1,71 @@
|
||||
from dcim.models import Site, Rack, DeviceRole, DeviceType, Device, Platform
|
||||
from dcim.constants import RACK_FACE_CHOICES
|
||||
from ipam.models import IPAddress
|
||||
from virtualization.models import Cluster
|
||||
from tenancy.models import Tenant
|
||||
from extras.models import CustomField, CustomFieldValue
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
file = Path('/opt/netbox/initializers/devices.yml')
|
||||
if not file.is_file():
|
||||
sys.exit()
|
||||
|
||||
with file.open('r') as stream:
|
||||
yaml = YAML(typ='safe')
|
||||
devices = yaml.load(stream)
|
||||
|
||||
required_assocs = {
|
||||
'device_role': (DeviceRole, 'name'),
|
||||
'device_type': (DeviceType, 'model'),
|
||||
'site': (Site, 'name')
|
||||
}
|
||||
|
||||
optional_assocs = {
|
||||
'tenant': (Tenant, 'name'),
|
||||
'platform': (Platform, 'name'),
|
||||
'rack': (Rack, 'name'),
|
||||
'cluster': (Cluster, 'name'),
|
||||
'primary_ip4': (IPAddress, 'address'),
|
||||
'primary_ip6': (IPAddress, 'address')
|
||||
}
|
||||
|
||||
if devices is not None:
|
||||
for params in devices:
|
||||
custom_fields = params.pop('custom_fields', None)
|
||||
|
||||
for assoc, details in required_assocs.items():
|
||||
model, field = details
|
||||
query = { field: params.pop(assoc) }
|
||||
|
||||
params[assoc] = model.objects.get(**query)
|
||||
|
||||
for assoc, details in optional_assocs.items():
|
||||
if assoc in params:
|
||||
model, field = details
|
||||
query = { field: params.pop(assoc) }
|
||||
|
||||
params[assoc] = model.objects.get(**query)
|
||||
|
||||
if 'face' in params:
|
||||
for rack_face in RACK_FACE_CHOICES:
|
||||
if params['face'] in rack_face:
|
||||
params['face'] = rack_face[0]
|
||||
|
||||
device, created = Device.objects.get_or_create(**params)
|
||||
|
||||
if created:
|
||||
if custom_fields is not None:
|
||||
for cf_name, cf_value in custom_fields.items():
|
||||
custom_field = CustomField.objects.get(name=cf_name)
|
||||
custom_field_value = CustomFieldValue.objects.create(
|
||||
field=custom_field,
|
||||
obj=device,
|
||||
value=cf_value
|
||||
)
|
||||
|
||||
device.custom_field_values.add(custom_field_value)
|
||||
|
||||
print("🖥️ Created device", device.name)
|
@ -184,6 +184,7 @@ INSTALLED_APPS = [
|
||||
'utilities',
|
||||
'virtualization',
|
||||
'drf_yasg',
|
||||
'vapor',
|
||||
]
|
||||
|
||||
# Only load django-rq if the webhook backend is enabled
|
||||
|
@ -50,6 +50,7 @@ _patterns = [
|
||||
path(r'api/secrets/', include('secrets.api.urls')),
|
||||
path(r'api/tenancy/', include('tenancy.api.urls')),
|
||||
path(r'api/virtualization/', include('virtualization.api.urls')),
|
||||
path(r'api/vapor/', include('vapor.api.urls')),
|
||||
path(r'api/docs/', schema_view.with_ui('swagger'), name='api_docs'),
|
||||
path(r'api/redoc/', schema_view.with_ui('redoc'), name='api_redocs'),
|
||||
re_path(r'^api/swagger(?P<format>.json|.yaml)$', schema_view.without_ui(), name='schema_swagger'),
|
||||
|
1
netbox/vapor/__init__.py
Normal file
1
netbox/vapor/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
default_app_config = 'vapor.apps.VaporConfig'
|
0
netbox/vapor/api/__init__.py
Normal file
0
netbox/vapor/api/__init__.py
Normal file
79
netbox/vapor/api/serializers.py
Normal file
79
netbox/vapor/api/serializers.py
Normal file
@ -0,0 +1,79 @@
|
||||
from rest_framework import serializers
|
||||
from taggit_serializer.serializers import TaggitSerializer, TagListSerializerField
|
||||
|
||||
from extras.api.customfields import CustomFieldModelSerializer
|
||||
from dcim.api.nested_serializers import (
|
||||
NestedDeviceSerializer,
|
||||
NestedInterfaceSerializer,
|
||||
NestedCableSerializer,
|
||||
)
|
||||
from dcim.api.serializers import (
|
||||
InterfaceConnectionSerializer,
|
||||
ConnectedEndpointSerializer,
|
||||
IFACE_TYPE_CHOICES,
|
||||
IFACE_MODE_CHOICES,
|
||||
)
|
||||
from dcim.models import Interface
|
||||
from ipam.api.nested_serializers import NestedVLANSerializer
|
||||
from ipam.models import VLAN
|
||||
from tenancy.api.nested_serializers import NestedTenantGroupSerializer
|
||||
from utilities.api import ChoiceField, ValidatedModelSerializer, SerializedPKRelatedField
|
||||
from tenancy.models import Tenant as Customer
|
||||
|
||||
|
||||
class CustomerSerializer(TaggitSerializer, CustomFieldModelSerializer):
|
||||
group = NestedTenantGroupSerializer(required=False)
|
||||
tags = TagListSerializerField(required=False)
|
||||
devices = NestedDeviceSerializer(required=False, many=True)
|
||||
|
||||
class Meta:
|
||||
model = Customer
|
||||
fields = [
|
||||
'id', 'name', 'slug', 'group', 'description', 'comments', 'tags', 'custom_fields', 'created',
|
||||
'last_updated', 'devices',
|
||||
]
|
||||
|
||||
|
||||
class InterfaceSerializer(TaggitSerializer, ConnectedEndpointSerializer):
|
||||
device = NestedDeviceSerializer()
|
||||
type = ChoiceField(choices=IFACE_TYPE_CHOICES, required=False)
|
||||
# TODO: Remove in v2.7 (backward-compatibility for form_factor)
|
||||
form_factor = ChoiceField(choices=IFACE_TYPE_CHOICES, required=False)
|
||||
lag = NestedInterfaceSerializer(required=False, allow_null=True)
|
||||
mode = ChoiceField(choices=IFACE_MODE_CHOICES, required=False, allow_null=True)
|
||||
untagged_vlan = NestedVLANSerializer(required=False, allow_null=True)
|
||||
tagged_vlans = SerializedPKRelatedField(
|
||||
queryset=VLAN.objects.all(),
|
||||
serializer=NestedVLANSerializer,
|
||||
required=False,
|
||||
many=True
|
||||
)
|
||||
cable = NestedCableSerializer(read_only=True)
|
||||
tags = TagListSerializerField(required=False)
|
||||
|
||||
class Meta:
|
||||
model = Interface
|
||||
fields = [
|
||||
'id', 'device', 'name', 'type', 'form_factor', 'enabled', 'lag', 'mtu', 'mac_address', 'mgmt_only',
|
||||
'description', 'connected_endpoint_type', 'connected_endpoint', 'connection_status', 'cable', 'mode',
|
||||
'untagged_vlan', 'tagged_vlans', 'tags', 'count_ipaddresses',
|
||||
]
|
||||
|
||||
def validate(self, data):
|
||||
|
||||
# All associated VLANs be global or assigned to the parent device's site.
|
||||
device = self.instance.device if self.instance else data.get('device')
|
||||
untagged_vlan = data.get('untagged_vlan')
|
||||
if untagged_vlan and untagged_vlan.site not in [device.site, None]:
|
||||
raise serializers.ValidationError({
|
||||
'untagged_vlan': "VLAN {} must belong to the same site as the interface's parent device, or it must be "
|
||||
"global.".format(untagged_vlan)
|
||||
})
|
||||
for vlan in data.get('tagged_vlans', []):
|
||||
if vlan.site not in [device.site, None]:
|
||||
raise serializers.ValidationError({
|
||||
'tagged_vlans': "VLAN {} must belong to the same site as the interface's parent device, or it must "
|
||||
"be global.".format(vlan)
|
||||
})
|
||||
|
||||
return super().validate(data)
|
21
netbox/vapor/api/urls.py
Normal file
21
netbox/vapor/api/urls.py
Normal file
@ -0,0 +1,21 @@
|
||||
from rest_framework import routers
|
||||
|
||||
from . import views
|
||||
|
||||
|
||||
class VaporRootView(routers.APIRootView):
|
||||
"""
|
||||
Vapor API root view
|
||||
"""
|
||||
def get_view_name(self):
|
||||
return 'Vapor'
|
||||
|
||||
|
||||
router = routers.DefaultRouter()
|
||||
router.APIRootView = VaporRootView
|
||||
|
||||
router.register(r'customers', views.CustomerViewSet)
|
||||
router.register(r'interfaces', views.InterfaceViewSet)
|
||||
|
||||
app_name = 'vapor-api'
|
||||
urlpatterns = router.urls
|
40
netbox/vapor/api/views.py
Normal file
40
netbox/vapor/api/views.py
Normal file
@ -0,0 +1,40 @@
|
||||
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.decorators import action
|
||||
|
||||
from dcim.api.views import CableTraceMixin
|
||||
from dcim.models import Interface
|
||||
from extras.api.views import CustomFieldModelViewSet
|
||||
from tenancy.models import Tenant as Customer
|
||||
from utilities.api import ModelViewSet
|
||||
from vapor import filters
|
||||
|
||||
from . import serializers
|
||||
|
||||
|
||||
class CustomerViewSet(CustomFieldModelViewSet):
|
||||
queryset = Customer.objects.prefetch_related(
|
||||
'group', 'tags', 'devices'
|
||||
)
|
||||
serializer_class = serializers.CustomerSerializer
|
||||
filterset_class = filters.CustomerFilter
|
||||
|
||||
|
||||
class InterfaceViewSet(CableTraceMixin, ModelViewSet):
|
||||
queryset = Interface.objects.prefetch_related(
|
||||
'device', '_connected_interface', '_connected_circuittermination', 'cable', 'ip_addresses', 'tags'
|
||||
).filter(
|
||||
device__isnull=False
|
||||
)
|
||||
serializer_class = serializers.InterfaceSerializer
|
||||
filterset_class = filters.InterfaceFilter
|
||||
|
||||
@action(detail=True)
|
||||
def graphs(self, request, pk=None):
|
||||
"""
|
||||
A convenience method for rendering graphs for a particular interface.
|
||||
"""
|
||||
interface = get_object_or_404(Interface, pk=pk)
|
||||
queryset = Graph.objects.filter(type=GRAPH_TYPE_INTERFACE)
|
||||
serializer = RenderedGraphSerializer(queryset, many=True, context={'graphed_object': interface})
|
||||
return Response(serializer.data)
|
6
netbox/vapor/apps.py
Normal file
6
netbox/vapor/apps.py
Normal file
@ -0,0 +1,6 @@
|
||||
from django.apps import AppConfig
|
||||
|
||||
|
||||
class VaporConfig(AppConfig):
|
||||
name = "vapor"
|
||||
verbose_name = "Vapor"
|
167
netbox/vapor/filters.py
Normal file
167
netbox/vapor/filters.py
Normal file
@ -0,0 +1,167 @@
|
||||
import django_filters
|
||||
from django.db.models import Q
|
||||
|
||||
from extras.filters import CustomFieldFilterSet
|
||||
from utilities.filters import NameSlugSearchFilterSet, NumericInFilter, TagFilter, MultiValueNumberFilter
|
||||
from tenancy.models import Tenant, TenantGroup
|
||||
from dcim.models import Device, DeviceRole, Interface
|
||||
from dcim.api.serializers import (
|
||||
IFACE_TYPE_CHOICES,
|
||||
IFACE_MODE_CHOICES,
|
||||
)
|
||||
from dcim.filters import MultiValueMACAddressFilter
|
||||
|
||||
|
||||
class CustomerFilter(CustomFieldFilterSet):
|
||||
id__in = NumericInFilter(
|
||||
field_name='id',
|
||||
lookup_expr='in'
|
||||
)
|
||||
q = django_filters.CharFilter(
|
||||
method='search',
|
||||
label='Search',
|
||||
)
|
||||
group_id = django_filters.ModelMultipleChoiceFilter(
|
||||
queryset=TenantGroup.objects.all(),
|
||||
label='Group (ID)',
|
||||
)
|
||||
group = django_filters.ModelMultipleChoiceFilter(
|
||||
field_name='group__slug',
|
||||
queryset=TenantGroup.objects.all(),
|
||||
to_field_name='slug',
|
||||
label='Group (slug)',
|
||||
)
|
||||
tag = TagFilter()
|
||||
|
||||
device_role = django_filters.ModelMultipleChoiceFilter(
|
||||
field_name='devices__device_role__slug',
|
||||
queryset=DeviceRole.objects.all(),
|
||||
to_field_name='slug',
|
||||
label='Device Role (slug)',
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Tenant
|
||||
fields = ['name', 'slug']
|
||||
|
||||
def search(self, queryset, name, value):
|
||||
if not value.strip():
|
||||
return queryset
|
||||
return queryset.filter(
|
||||
Q(name__icontains=value) |
|
||||
Q(slug__icontains=value) |
|
||||
Q(description__icontains=value) |
|
||||
Q(comments__icontains=value)
|
||||
)
|
||||
|
||||
|
||||
class InterfaceFilter(django_filters.FilterSet):
|
||||
"""
|
||||
Not using DeviceComponentFilterSet for Interfaces because we need to check for VirtualChassis membership.
|
||||
"""
|
||||
q = django_filters.CharFilter(
|
||||
method='search',
|
||||
label='Search',
|
||||
)
|
||||
device = django_filters.CharFilter(
|
||||
method='filter_device',
|
||||
field_name='name',
|
||||
label='Device',
|
||||
)
|
||||
device_id = MultiValueNumberFilter(
|
||||
method='filter_device_id',
|
||||
field_name='pk',
|
||||
label='Device (ID)',
|
||||
)
|
||||
cabled = django_filters.BooleanFilter(
|
||||
field_name='cable',
|
||||
lookup_expr='isnull',
|
||||
exclude=True
|
||||
)
|
||||
kind = django_filters.CharFilter(
|
||||
method='filter_kind',
|
||||
label='Kind of interface',
|
||||
)
|
||||
lag_id = django_filters.ModelMultipleChoiceFilter(
|
||||
field_name='lag',
|
||||
queryset=Interface.objects.all(),
|
||||
label='LAG interface (ID)',
|
||||
)
|
||||
mac_address = MultiValueMACAddressFilter()
|
||||
tag = TagFilter()
|
||||
vlan_id = django_filters.CharFilter(
|
||||
method='filter_vlan_id',
|
||||
label='Assigned VLAN'
|
||||
)
|
||||
vlan = django_filters.CharFilter(
|
||||
method='filter_vlan',
|
||||
label='Assigned VID'
|
||||
)
|
||||
type = django_filters.MultipleChoiceFilter(
|
||||
choices=IFACE_TYPE_CHOICES,
|
||||
null_value=None
|
||||
)
|
||||
|
||||
customer = django_filters.ModelMultipleChoiceFilter(
|
||||
field_name='_connected_interface__device__tenant__slug',
|
||||
queryset=Tenant.objects.all(),
|
||||
to_field_name='slug',
|
||||
label='Customer (slug)',
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Interface
|
||||
fields = ['id', 'name', 'connection_status', 'type', 'enabled', 'mtu', 'mgmt_only', 'mode', 'description']
|
||||
|
||||
def search(self, queryset, name, value):
|
||||
if not value.strip():
|
||||
return queryset
|
||||
return queryset.filter(
|
||||
Q(name__icontains=value) |
|
||||
Q(description__icontains=value)
|
||||
).distinct()
|
||||
|
||||
def filter_device(self, queryset, name, value):
|
||||
try:
|
||||
device = Device.objects.get(**{name: value})
|
||||
vc_interface_ids = device.vc_interfaces.values_list('id', flat=True)
|
||||
return queryset.filter(pk__in=vc_interface_ids)
|
||||
except Device.DoesNotExist:
|
||||
return queryset.none()
|
||||
|
||||
def filter_device_id(self, queryset, name, id_list):
|
||||
# Include interfaces belonging to peer virtual chassis members
|
||||
vc_interface_ids = []
|
||||
try:
|
||||
devices = Device.objects.filter(pk__in=id_list)
|
||||
for device in devices:
|
||||
vc_interface_ids += device.vc_interfaces.values_list('id', flat=True)
|
||||
return queryset.filter(pk__in=vc_interface_ids)
|
||||
except Device.DoesNotExist:
|
||||
return queryset.none()
|
||||
|
||||
def filter_vlan_id(self, queryset, name, value):
|
||||
value = value.strip()
|
||||
if not value:
|
||||
return queryset
|
||||
return queryset.filter(
|
||||
Q(untagged_vlan_id=value) |
|
||||
Q(tagged_vlans=value)
|
||||
)
|
||||
|
||||
def filter_vlan(self, queryset, name, value):
|
||||
value = value.strip()
|
||||
if not value:
|
||||
return queryset
|
||||
return queryset.filter(
|
||||
Q(untagged_vlan_id__vid=value) |
|
||||
Q(tagged_vlans__vid=value)
|
||||
)
|
||||
|
||||
def filter_kind(self, queryset, name, value):
|
||||
value = value.strip().lower()
|
||||
return {
|
||||
'physical': queryset.exclude(type__in=NONCONNECTABLE_IFACE_TYPES),
|
||||
'virtual': queryset.filter(type__in=VIRTUAL_IFACE_TYPES),
|
||||
'wireless': queryset.filter(type__in=WIRELESS_IFACE_TYPES),
|
||||
}.get(value, queryset.none())
|
0
netbox/vapor/tests/__init__.py
Normal file
0
netbox/vapor/tests/__init__.py
Normal file
97
netbox/vapor/tests/test_api.py
Normal file
97
netbox/vapor/tests/test_api.py
Normal file
@ -0,0 +1,97 @@
|
||||
from dcim.models import (
|
||||
Cable,
|
||||
Device,
|
||||
DeviceRole,
|
||||
DeviceType,
|
||||
Interface,
|
||||
Manufacturer,
|
||||
Site,
|
||||
)
|
||||
from django.urls import reverse
|
||||
from rest_framework import status
|
||||
from tenancy.models import Tenant as Customer
|
||||
from utilities.testing import APITestCase, create_test_user
|
||||
|
||||
|
||||
class VaporTestCustomers(APITestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
cls.customer1 = Customer.objects.create(name='Test Customer 1', slug='test-customer-1')
|
||||
|
||||
def test_get_a_customer(self):
|
||||
""" Inspect a single customer """
|
||||
url = reverse('vapor-api:tenant-detail', kwargs={'pk': self.customer1.pk})
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.data['name'], self.customer1.name)
|
||||
|
||||
def test_get_customers(self):
|
||||
""" List all customers """
|
||||
url = reverse('vapor-api:tenant-list')
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.data['results'][0]['name'], self.customer1.name)
|
||||
|
||||
def test_create_customer(self):
|
||||
""" Post and create a customer """
|
||||
data = {
|
||||
'name': 'Test Customer 2',
|
||||
'slug': 'test-customer-2'
|
||||
}
|
||||
|
||||
url = reverse('vapor-api:tenant-list')
|
||||
|
||||
response = self.client.post(url, data, format='json', **self.header)
|
||||
self.assertHttpStatus(response, status.HTTP_201_CREATED)
|
||||
self.assertEqual(Customer.objects.count(), 2)
|
||||
|
||||
custo = Customer.objects.get(pk=response.data['id'])
|
||||
self.assertEqual(custo.name, data['name'])
|
||||
self.assertEqual(custo.slug, data['slug'])
|
||||
|
||||
|
||||
class VaporTestInterfaces(APITestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
|
||||
cls.site1 = Site.objects.create(name='test', slug='test')
|
||||
cls.manufacturer1 = Manufacturer.objects.create(name='Vapor', slug='vapor')
|
||||
cls.devicetype1 = DeviceType.objects.create(
|
||||
model='chamber-locker',
|
||||
slug='chamber-locker',
|
||||
manufacturer=cls.manufacturer1
|
||||
)
|
||||
cls.devicerole1 = DeviceRole.objects.create(name='locker', slug='locker')
|
||||
cls.customer1 = Customer.objects.create(name='Test Customer 1', slug='test-customer-1')
|
||||
cls.device1 = Device.objects.create(
|
||||
name='network-locker',
|
||||
device_role=cls.devicerole1,
|
||||
device_type=cls.devicetype1,
|
||||
site=cls.site1,
|
||||
tenant=cls.customer1,
|
||||
)
|
||||
cls.device2 = Device.objects.create(
|
||||
name='network-locker2',
|
||||
device_role=cls.devicerole1,
|
||||
device_type=cls.devicetype1,
|
||||
site=cls.site1,
|
||||
)
|
||||
cls.interface1 = Interface.objects.create(name='e1', device=cls.device1)
|
||||
cls.interface2 = Interface.objects.create(name='xe-0/0/0', device=cls.device2)
|
||||
|
||||
cls.cable = Cable(termination_a=cls.interface1, termination_b=cls.interface2)
|
||||
cls.cable.save()
|
||||
|
||||
def test_get_interfaces(self):
|
||||
url = reverse('vapor-api:interface-list')
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.data['count'], 2)
|
||||
|
||||
def test_get_customers(self):
|
||||
""" Inspect a single customers interfaces """
|
||||
base_url = reverse('vapor-api:interface-list')
|
||||
query = {'customer': self.customer1.slug}
|
||||
url = '{}?{}'.format(base_url, '&'.join(['{}={}'.format(k, v) for k, v in query.items()]))
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.data['count'], 1)
|
||||
self.assertEqual(response.data['results'][0]['name'], self.interface2.name)
|
Loading…
Reference in New Issue
Block a user