mirror of
https://github.com/TheNetworkGuy/netbox-zabbix-sync.git
synced 2025-07-13 15:24:48 -06:00
Compare commits
186 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
d056a20de2 | ||
![]() |
a57b51870f | ||
![]() |
dbc7acaf98 | ||
![]() |
87b33706c0 | ||
![]() |
affd4c6998 | ||
![]() |
22982c3607 | ||
![]() |
dec2cf6996 | ||
![]() |
940f2d6afb | ||
![]() |
d79f96a5b4 | ||
![]() |
2f40ec467b | ||
![]() |
e0d28633c3 | ||
![]() |
0a20e270ed | ||
![]() |
a5be9538d9 | ||
![]() |
b31e41ca6b | ||
![]() |
ba530ecd58 | ||
![]() |
a3259c4fe3 | ||
![]() |
5e390396ba | ||
![]() |
ee6d13bfdf | ||
![]() |
8fe7e5763b | ||
![]() |
a7a79ea81e | ||
![]() |
b62e8203b6 | ||
![]() |
bfadd88542 | ||
![]() |
bd4d21c5d8 | ||
![]() |
148ce47c10 | ||
![]() |
7969de50bf | ||
![]() |
7394bf8d1d | ||
![]() |
8ce2cab86f | ||
![]() |
76723d2823 | ||
![]() |
c58e5aba1e | ||
![]() |
baf23403a0 | ||
![]() |
3115eaa04e | ||
![]() |
c8fda04ce8 | ||
![]() |
7b8827fa94 | ||
![]() |
b705e1341f | ||
![]() |
8df17f208c | ||
![]() |
22d735dd82 | ||
![]() |
a325863aec | ||
![]() |
9e1a90833d | ||
![]() |
45e633b5ed | ||
![]() |
298e6c4370 | ||
![]() |
77b0798b65 | ||
![]() |
27ee4c341f | ||
![]() |
f7eb47a8a8 | ||
![]() |
bc53737e02 | ||
![]() |
539ad64c8d | ||
![]() |
bbe28d9705 | ||
![]() |
2998dfde54 | ||
![]() |
d60eb1cb2d | ||
![]() |
98edf0ad99 | ||
![]() |
772fef0930 | ||
![]() |
68cf28565d | ||
![]() |
0c715d4f96 | ||
![]() |
819126ce36 | ||
![]() |
04a610cf84 | ||
![]() |
e91eecffaa | ||
![]() |
eb307337f6 | ||
![]() |
5fd89a1f8a | ||
![]() |
cb0500d0c0 | ||
![]() |
7383583c43 | ||
![]() |
dad7d2911f | ||
![]() |
4fd582970d | ||
![]() |
ad2ace942a | ||
![]() |
989f6fa96e | ||
![]() |
f303e7e01d | ||
![]() |
38d61dcde7 | ||
![]() |
feb719542d | ||
![]() |
ea5b7d3196 | ||
![]() |
28193cc120 | ||
![]() |
908e7eeda9 | ||
![]() |
e9a86334d9 | ||
![]() |
2ea2edb6a6 | ||
![]() |
37b3bfc7fb | ||
![]() |
6abdac2eb4 | ||
![]() |
13fe406b63 | ||
![]() |
20a3c67fd4 | ||
![]() |
b56a4332b9 | ||
![]() |
73d34851fb | ||
![]() |
10313ef5cf | ||
![]() |
93c88333a6 | ||
![]() |
50b7ede81b | ||
![]() |
3e52edef2d | ||
![]() |
4449e040ce | ||
![]() |
aa6be1312e | ||
![]() |
50c13c20cb | ||
![]() |
964045f53e | ||
![]() |
6bdaf4e5b7 | ||
![]() |
5a3467538e | ||
![]() |
50918e43fa | ||
![]() |
7781bc6732 | ||
![]() |
9ab5e09dd5 | ||
![]() |
886c5b24b9 | ||
![]() |
b314b2c883 | ||
![]() |
0c798ec968 | ||
![]() |
a5312365f9 | ||
![]() |
53066d2d51 | ||
![]() |
525904cf43 | ||
![]() |
1e269780ce | ||
![]() |
15d63ce3b8 | ||
![]() |
c810b06718 | ||
![]() |
825d788cfe | ||
![]() |
48a04c58e3 | ||
![]() |
733df33b71 | ||
![]() |
593c8707af | ||
![]() |
523393308d | ||
![]() |
d65fa5b699 | ||
![]() |
fd70045c6d | ||
![]() |
f9453cc23c | ||
![]() |
3d4e7803cc | ||
![]() |
edb9cd6ab6 | ||
![]() |
53d679e638 | ||
![]() |
72558d3825 | ||
![]() |
eea7df660a | ||
![]() |
1b831a2d39 | ||
![]() |
6d4e250b23 | ||
![]() |
cebefd681e | ||
![]() |
4264dc9b31 | ||
![]() |
c67180138e | ||
![]() |
b8bb3fb3f0 | ||
![]() |
5f78a2c789 | ||
![]() |
1157ed9e64 | ||
![]() |
c7d3dab27c | ||
![]() |
ba2f77a640 | ||
![]() |
4c91c660a8 | ||
![]() |
8272e34c12 | ||
![]() |
4c982ff0f5 | ||
![]() |
7a671d6625 | ||
![]() |
5617275594 | ||
![]() |
1673f7bb59 | ||
![]() |
c76e36ad38 | ||
![]() |
b0eee8ad9b | ||
![]() |
9ff6b66c96 | ||
![]() |
ffb8d5239c | ||
![]() |
73d5306898 | ||
![]() |
f301244306 | ||
![]() |
867749ddd6 | ||
![]() |
d0941ff909 | ||
![]() |
434722df53 | ||
![]() |
9131c940c5 | ||
![]() |
8b670ba395 | ||
![]() |
4ec8036c88 | ||
![]() |
81764b589a | ||
![]() |
acab7dd6d2 | ||
![]() |
2177234d7f | ||
![]() |
3f4d173ac0 | ||
![]() |
0996059c5f | ||
![]() |
0155c29fcc | ||
![]() |
5d4ff9c5ed | ||
![]() |
204937b784 | ||
![]() |
e0827ac428 | ||
![]() |
09a6906a63 | ||
![]() |
30545ec0f3 | ||
![]() |
56c19d97de | ||
![]() |
ffc2aa1947 | ||
![]() |
9417908994 | ||
![]() |
06f97b132a | ||
![]() |
20096a215b | ||
![]() |
f1da1cfb50 | ||
![]() |
5093823287 | ||
![]() |
c1504987f1 | ||
![]() |
d598a9739a | ||
![]() |
7bf72de0f9 | ||
![]() |
66f24e6891 | ||
![]() |
bff34a8e38 | ||
![]() |
886ef2a172 | ||
![]() |
9c07d7dbc4 | ||
![]() |
9f29d2b27b | ||
![]() |
e827953d8d | ||
![]() |
053028b283 | ||
![]() |
2e867d1129 | ||
![]() |
a0ea21d731 | ||
![]() |
70a5c3e384 | ||
![]() |
91796395ef | ||
![]() |
610a73c061 | ||
![]() |
4de022496e | ||
![]() |
0603d8c244 | ||
![]() |
2b92f8da9b | ||
![]() |
d1ec1114ac | ||
![]() |
acad07eed4 | ||
![]() |
da4fec6bf1 | ||
![]() |
07049ea6d8 | ||
![]() |
2094799a51 | ||
![]() |
c0c52f973e | ||
![]() |
39b63aa420 | ||
![]() |
017b5623f5 | ||
![]() |
9be09bca10 | ||
![]() |
23997f9423 |
22
.devcontainer/devcontainer.json
Normal file
22
.devcontainer/devcontainer.json
Normal file
@ -0,0 +1,22 @@
|
||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||
// README at: https://github.com/devcontainers/templates/tree/main/src/python
|
||||
{
|
||||
"name": "Python 3",
|
||||
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
||||
"image": "mcr.microsoft.com/devcontainers/python:1-3.12-bullseye",
|
||||
|
||||
// Features to add to the dev container. More info: https://containers.dev/features.
|
||||
// "features": {},
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
|
||||
// Use 'postCreateCommand' to run commands after the container is created.
|
||||
"postCreateCommand": "pip3 install --user -r requirements.txt && pip3 install --user pylint pytest coverage pytest-cov"
|
||||
|
||||
// Configure tool-specific properties.
|
||||
// "customizations": {},
|
||||
|
||||
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
||||
// "remoteUser": "root"
|
||||
}
|
79
.github/workflows/publish-image.yml
vendored
79
.github/workflows/publish-image.yml
vendored
@ -1,46 +1,55 @@
|
||||
name: Publish Docker image to GHCR on a new version
|
||||
---
|
||||
name: Build and Push Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- dockertest
|
||||
# tags:
|
||||
# - [0-9]+.*
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
release:
|
||||
types: [published]
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
|
||||
jobs:
|
||||
test_quality:
|
||||
uses: ./.github/workflows/quality.yml
|
||||
build_and_publish:
|
||||
uses: ./.github/workflows/quality.yml
|
||||
test_code:
|
||||
uses: ./.github/workflows/run_tests.yml
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
- name: Log in to the container registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GHCR_PAT }}
|
||||
- name: Extract metadata (tags, labels)
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=semver,pattern={{ version }}
|
||||
type=ref,event=branch
|
||||
type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'master') }}
|
||||
type=sha
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@369eb591f429131d6889c46b94e711f089e6ca96
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: |
|
||||
index:org.opencontainers.image.description=Python script to synchronise NetBox devices to Zabbix.
|
||||
|
12
.github/workflows/quality.yml
vendored
12
.github/workflows/quality.yml
vendored
@ -1,15 +1,17 @@
|
||||
---
|
||||
name: Pylint Quality control
|
||||
|
||||
on:
|
||||
workflow_call
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
python_quality_testing:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.11","3.12"]
|
||||
python-version: ["3.12","3.13"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
@ -23,4 +25,4 @@ jobs:
|
||||
pip install -r requirements.txt
|
||||
- name: Analysing the code with pylint
|
||||
run: |
|
||||
pylint --module-naming-style=any $(git ls-files '*.py')
|
||||
pylint --module-naming-style=any modules/* netbox_zabbix_sync.py
|
||||
|
33
.github/workflows/run_tests.yml
vendored
Normal file
33
.github/workflows/run_tests.yml
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
name: Pytest code testing
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test_code:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.12
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pytest pytest-mock coverage pytest-cov
|
||||
pip install -r requirements.txt
|
||||
- name: Testing the code with PyTest
|
||||
run: |
|
||||
cp config.py.example config.py
|
||||
pytest tests
|
||||
- name: Run tests with coverage
|
||||
run: |
|
||||
cp config.py.example config.py
|
||||
coverage run -m pytest tests
|
||||
- name: Check coverage percentage
|
||||
run: |
|
||||
coverage report --fail-under=70
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -1,5 +1,11 @@
|
||||
*.log
|
||||
.venv
|
||||
config.py
|
||||
Pipfile
|
||||
Pipfile.lock
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
.vscode
|
||||
.flake
|
||||
.coverage
|
13
Dockerfile
13
Dockerfile
@ -1,9 +1,20 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
FROM python:3.12-alpine
|
||||
RUN mkdir -p /opt/netbox-zabbix && chown -R 1000:1000 /opt/netbox-zabbix
|
||||
|
||||
RUN mkdir -p /opt/netbox-zabbix
|
||||
COPY . /opt/netbox-zabbix
|
||||
RUN addgroup -g 1000 -S netbox-zabbix && adduser -u 1000 -S netbox-zabbix -G netbox-zabbix
|
||||
RUN chown -R 1000:1000 /opt/netbox-zabbix
|
||||
|
||||
WORKDIR /opt/netbox-zabbix
|
||||
|
||||
COPY --chown=1000:1000 . /opt/netbox-zabbix
|
||||
|
||||
USER 1000:1000
|
||||
|
||||
RUN if ! [ -f ./config.py ]; then cp ./config.py.example ./config.py; fi
|
||||
USER root
|
||||
RUN pip install -r ./requirements.txt
|
||||
USER 1000:1000
|
||||
ENTRYPOINT ["python"]
|
||||
CMD ["/opt/netbox-zabbix/netbox_zabbix_sync.py", "-v"]
|
||||
|
634
README.md
634
README.md
@ -1,58 +1,78 @@
|
||||
# NetBox to Zabbix synchronization
|
||||
|
||||
# Netbox to Zabbix synchronization
|
||||
|
||||
A script to create, update and delete Zabbix hosts using Netbox device objects.
|
||||
|
||||
A script to create, update and delete Zabbix hosts using NetBox device objects. Tested and compatible with all [currently supported Zabbix releases](https://www.zabbix.com/life_cycle_and_release_policy).
|
||||
|
||||
## Installation via Docker
|
||||
|
||||
To pull the latest stable version to your local cache, use the following docker pull command:
|
||||
```
|
||||
To pull the latest stable version to your local cache, use the following docker
|
||||
pull command:
|
||||
|
||||
```bash
|
||||
docker pull ghcr.io/thenetworkguy/netbox-zabbix-sync:main
|
||||
```
|
||||
|
||||
Make sure to specify the needed environment variables for the script to work (see [here](#set-environment-variables))
|
||||
on the command line or use an [env file](https://docs.docker.com/reference/cli/docker/container/run/#env).
|
||||
Make sure to specify the needed environment variables for the script to work
|
||||
(see [here](#set-environment-variables)) on the command line or use an
|
||||
[env file](https://docs.docker.com/reference/cli/docker/container/run/#env).
|
||||
|
||||
```
|
||||
```bash
|
||||
docker run -d -t -i -e ZABBIX_HOST='https://zabbix.local' \
|
||||
-e ZABBIX_TOKEN='othersecrettoken' \
|
||||
-e NETBOX_HOST='https://netbox.local' \
|
||||
-e NETBOX_TOKEN='secrettoken' \
|
||||
--name netbox-zabbix-sync ghcr.io/TheNetworkGuy/netbox-zabbix-sync:latest
|
||||
--name netbox-zabbix-sync ghcr.io/thenetworkguy/netbox-zabbix-sync:main
|
||||
```
|
||||
|
||||
This should run a one-time sync, you can check the sync with `docker logs netbox-zabbix-sync`.
|
||||
This should run a one-time sync. You can check the sync with
|
||||
`docker logs netbox-zabbix-sync`.
|
||||
|
||||
The image uses the default `config.py` for it's configuration, you can use a volume mount in the docker run command
|
||||
to override with your own config file if needed (see [config file](#config-file)):
|
||||
The image uses the default `config.py` for its configuration, you can use a
|
||||
volume mount in the docker run command to override with your own config file if
|
||||
needed (see [config file](#config-file)):
|
||||
|
||||
```bash
|
||||
docker run -d -t -i -v $(pwd)/config.py:/opt/netbox-zabbix/config.py ...
|
||||
```
|
||||
docker run -d -t -i -v $(pwd)/config.py:/opt/netbox-zabbix/config.py ...
|
||||
```
|
||||
|
||||
## Installation from Source
|
||||
|
||||
### Cloning the repository
|
||||
```
|
||||
|
||||
```bash
|
||||
git clone https://github.com/TheNetworkGuy/netbox-zabbix-sync.git
|
||||
```
|
||||
|
||||
### Packages
|
||||
Make sure that you have a python environment with the following packages installed. You can also use the `requirements.txt` file for installation with pip.
|
||||
```
|
||||
|
||||
Make sure that you have a python environment with the following packages
|
||||
installed. You can also use the `requirements.txt` file for installation with
|
||||
pip.
|
||||
|
||||
```sh
|
||||
# Packages:
|
||||
pynetbox
|
||||
pyzabbix
|
||||
|
||||
# Install them through requirements.txt from a venv:
|
||||
virtualenv .venv
|
||||
source .venv/bin/activate
|
||||
.venv/bin/pip --require-virtualenv install -r requirements.txt
|
||||
```
|
||||
|
||||
### Config file
|
||||
First time user? Copy the `config.py.example` file to `config.py`. This file is used for modifying filters and setting variables such as custom field names.
|
||||
```
|
||||
|
||||
First time user? Copy the `config.py.example` file to `config.py`. This file is
|
||||
used for modifying filters and setting variables such as custom field names.
|
||||
|
||||
```sh
|
||||
cp config.py.example config.py
|
||||
```
|
||||
|
||||
### Set environment variables
|
||||
|
||||
Set the following environment variables:
|
||||
```
|
||||
|
||||
```bash
|
||||
ZABBIX_HOST="https://zabbix.local"
|
||||
ZABBIX_USER="username"
|
||||
ZABBIX_PASS="Password"
|
||||
@ -60,15 +80,25 @@ NETBOX_HOST="https://netbox.local"
|
||||
NETBOX_TOKEN="secrettoken"
|
||||
```
|
||||
|
||||
Or, you can use a Zabbix API token to login instead of using a username and password.
|
||||
In that case `ZABBIX_USER` and `ZABBIX_PASS` will be ignored.
|
||||
Or, you can use a Zabbix API token to login instead of using a username and
|
||||
password. In that case `ZABBIX_USER` and `ZABBIX_PASS` will be ignored.
|
||||
|
||||
```
|
||||
```bash
|
||||
ZABBIX_TOKEN=othersecrettoken
|
||||
```
|
||||
|
||||
### Netbox custom fields
|
||||
Use the following custom fields in Netbox (if you are using config context for the template information then the zabbix_template field is not required):
|
||||
If you are using custom SSL certificates for NetBox and/or Zabbix, you can set
|
||||
the following environment variable to the path of your CA bundle file:
|
||||
|
||||
```sh
|
||||
export REQUESTS_CA_BUNDLE=/path/to/your/ca-bundle.crt
|
||||
```
|
||||
|
||||
### NetBox custom fields
|
||||
|
||||
Use the following custom fields in NetBox (if you are using config context for
|
||||
the template information then the zabbix_template field is not required):
|
||||
|
||||
```
|
||||
* Type: Integer
|
||||
* Name: zabbix_hostid
|
||||
@ -76,6 +106,7 @@ Use the following custom fields in Netbox (if you are using config context for t
|
||||
* Default: null
|
||||
* Object: dcim > device
|
||||
```
|
||||
|
||||
```
|
||||
* Type: Text
|
||||
* Name: zabbix_template
|
||||
@ -83,124 +114,239 @@ Use the following custom fields in Netbox (if you are using config context for t
|
||||
* Default: null
|
||||
* Object: dcim > device_type
|
||||
```
|
||||
You can make the `zabbix_hostid` field hidden or read-only to prevent human intervention.
|
||||
|
||||
This is optional and there is a use case for leaving it read-write in the UI to manually change the ID. For example to re-run a sync.
|
||||
You can make the `zabbix_hostid` field hidden or read-only to prevent human
|
||||
intervention.
|
||||
|
||||
This is optional, but there may be cases where you want to leave it
|
||||
read-write in the UI. For example to manually change or clear the ID and re-run a sync.
|
||||
|
||||
## Virtual Machine (VM) Syncing
|
||||
|
||||
In order to use VM syncing, make sure that the `zabbix_id` custom field is also
|
||||
present on Virtual machine objects in NetBox.
|
||||
|
||||
Use the `config.py` file and set the `sync_vms` variable to `True`.
|
||||
|
||||
You can set the `vm_hostgroup_format` variable to a customizable value for VM
|
||||
hostgroups. The default is `cluster_type/cluster/role`.
|
||||
|
||||
To enable filtering for VM's, check the `nb_vm_filter` variable out. It works
|
||||
the same as with the device filter (see documentation under "Hostgroup layout").
|
||||
Note that not all filtering capabilities and properties of devices are
|
||||
applicable to VM's and vice-versa. Check the NetBox API documentation to see
|
||||
which filtering options are available for each object type.
|
||||
|
||||
## Config file
|
||||
|
||||
### Hostgroup
|
||||
Setting the `create_hostgroups` variable to `False` requires manual hostgroup creation for devices in a new category.
|
||||
|
||||
The format can be set with the `hostgroup_format` variable.
|
||||
Setting the `create_hostgroups` variable to `False` requires manual hostgroup
|
||||
creation for devices in a new category. I would recommend setting this variable
|
||||
to `True` since leaving it on `False` results in a lot of manual work.
|
||||
|
||||
Make sure that the Zabbix user has proper permissions to create hosts.
|
||||
The hostgroups are in a nested format. This means that proper permissions only need to be applied to the site name hostgroup and cascaded to any child hostgroups.
|
||||
The format can be set with the `hostgroup_format` variable for devices and
|
||||
`vm_hostgroup_format` for virtual machines.
|
||||
|
||||
Any nested parent hostgroups will also be created automatically. For instance
|
||||
the region `Berlin` with parent region `Germany` will create the hostgroup
|
||||
`Germany/Berlin`.
|
||||
|
||||
Make sure that the Zabbix user has proper permissions to create hosts. The
|
||||
hostgroups are in a nested format. This means that proper permissions only need
|
||||
to be applied to the site name hostgroup and cascaded to any child hostgroups.
|
||||
|
||||
#### Layout
|
||||
The default hostgroup layout is "site/manufacturer/device_role".
|
||||
|
||||
**Variables**
|
||||
The default hostgroup layout is "site/manufacturer/device_role". You can change
|
||||
this behaviour with the hostgroup_format variable. The following values can be
|
||||
used:
|
||||
|
||||
You can change this behaviour with the hostgroup_format variable. The following values can be used:
|
||||
| name | description |
|
||||
| ------------ | ------------ |
|
||||
|dev_location|The device location name|
|
||||
|dev_role|The device role name|
|
||||
|manufacturer|Manufacturer name|
|
||||
|region|The region name of the device|
|
||||
|site|Site name|
|
||||
|site_group|Site group name|
|
||||
|tenant|Tenant name|
|
||||
|tenant_group|Tenant group name|
|
||||
**Both devices and virtual machines**
|
||||
|
||||
| name | description |
|
||||
| ------------- | ------------------------------------------------------------------------------------ |
|
||||
| role | Role name of a device or VM |
|
||||
| region | The region name |
|
||||
| site | Site name |
|
||||
| site_group | Site group name |
|
||||
| tenant | Tenant name |
|
||||
| tenant_group | Tenant group name |
|
||||
| platform | Software platform of a device or VM |
|
||||
| custom fields | See the section "Layout -> Custom Fields" to use custom fields as hostgroup variable |
|
||||
|
||||
You can specify the value like so, sperated by a "/":
|
||||
```
|
||||
hostgroup_format = "tenant/site/dev_location/dev_role"
|
||||
**Only for devices**
|
||||
|
||||
| name | description |
|
||||
| ------------ | ------------------------ |
|
||||
| location | The device location name |
|
||||
| manufacturer | Device manufacturer name |
|
||||
| rack | Rack |
|
||||
|
||||
**Only for VMs**
|
||||
|
||||
| name | description |
|
||||
| ------------ | --------------- |
|
||||
| cluster | VM cluster name |
|
||||
| cluster_type | VM cluster type |
|
||||
| device | parent device |
|
||||
|
||||
You can specify the value separated by a "/" like so:
|
||||
|
||||
```python
|
||||
hostgroup_format = "tenant/site/location/role"
|
||||
```
|
||||
|
||||
You can also provice a list of groups like so:
|
||||
|
||||
```python
|
||||
hostgroup_format = ["region/site_group/site", "role", "tenant_group/tenant"]
|
||||
```
|
||||
|
||||
|
||||
**Group traversal**
|
||||
|
||||
The default behaviour for `region` is to only use the directly assigned region in the rendered hostgroup name.
|
||||
However, by setting `traverse_region` to `True` in `config.py` the script will render a full region path of all parent regions for the hostgroup name.
|
||||
`traverse_site_groups` controls the same behaviour for site_groups.
|
||||
The default behaviour for `region` is to only use the directly assigned region
|
||||
in the rendered hostgroup name. However, by setting `traverse_region` to `True`
|
||||
in `config.py` the script will render a full region path of all parent regions
|
||||
for the hostgroup name. `traverse_site_groups` controls the same behaviour for
|
||||
site_groups.
|
||||
|
||||
**Custom fields**
|
||||
|
||||
You can also use the value of custom fields under the device object.
|
||||
You can use the value of custom fields for hostgroup generation. This allows
|
||||
more freedom and even allows a full static mapping instead of a dynamic rendered
|
||||
hostgroup name.
|
||||
|
||||
For instance a custom field with the name `mycustomfieldname` and type string
|
||||
has the following values for 2 devices:
|
||||
|
||||
This allows more freedom and even allows a full static mapping instead of a dynamic rendered hostgroup name.
|
||||
```
|
||||
hostgroup_format = "site/mycustomfieldname"
|
||||
Device A has the value Train for custom field mycustomfieldname.
|
||||
Device B has the value Bus for custom field mycustomfieldname.
|
||||
Both devices are located in the site Paris.
|
||||
```
|
||||
|
||||
With the hostgroup format `site/mycustomfieldname` the following hostgroups will
|
||||
be generated:
|
||||
|
||||
```
|
||||
Device A: Paris/Train
|
||||
Device B: Paris/Bus
|
||||
```
|
||||
|
||||
**Empty variables or hostgroups**
|
||||
|
||||
Should the content of a variable be empty, then the hostgroup position is skipped.
|
||||
Should the content of a variable be empty, then the hostgroup position is
|
||||
skipped.
|
||||
|
||||
For example, consider the following scenario with 2 devices, both the same
|
||||
device type and site. One of them is linked to a tenant, the other one does not
|
||||
have a relationship with a tenant.
|
||||
|
||||
For example, consider the following scenario with 2 devices, both the same device type and site. One of them is linked to a tenant, the other one does not have a relationship with a tenant.
|
||||
- Device_role: PDU
|
||||
- Site: HQ-AMS
|
||||
|
||||
```python
|
||||
hostgroup_format = "site/tenant/role"
|
||||
```
|
||||
hostgroup_format = "site/tenant/device_role"
|
||||
```
|
||||
When running the script like above, the following hostgroup (HG) will be generated for both hosts:
|
||||
- Device A with no relationship with a tenant: HQ-AMS/PDU
|
||||
- Device B with a relationship to tenant "Fork Industries": HQ-AMS/Fork Industries/PDU
|
||||
|
||||
When running the script like above, the following hostgroup (HG) will be
|
||||
generated for both hosts:
|
||||
|
||||
- Device A with no relationship with a tenant: HQ-AMS/PDU
|
||||
- Device B with a relationship to tenant "Fork Industries": HQ-AMS/Fork
|
||||
Industries/PDU
|
||||
|
||||
The same logic applies to custom fields being used in the HG format:
|
||||
```
|
||||
|
||||
```python
|
||||
hostgroup_format = "site/mycustomfieldname"
|
||||
```
|
||||
For device A with the value "ABC123" in the custom field "mycustomfieldname" -> HQ-AMS/ABC123
|
||||
For a device which does not have a value in the custom field "mycustomfieldname" -> HQ-AMS
|
||||
|
||||
Should there be a scenario where a custom field does not have a value under a device, and the HG format only uses this single variable, then this will result in an error:
|
||||
For device A with the value "ABC123" in the custom field "mycustomfieldname" ->
|
||||
HQ-AMS/ABC123 For a device which does not have a value in the custom field
|
||||
"mycustomfieldname" -> HQ-AMS
|
||||
|
||||
Should there be a scenario where a custom field does not have a value under a
|
||||
device, and the HG format only uses this single variable, then this will result
|
||||
in an error:
|
||||
|
||||
```
|
||||
hostgroup_format = "mycustomfieldname"
|
||||
|
||||
Netbox-Zabbix-sync - ERROR - ESXI1 has no reliable hostgroup. This is most likely due to the use of custom fields that are empty.
|
||||
NetBox-Zabbix-sync - ERROR - ESXI1 has no reliable hostgroup. This is most likely due to the use of custom fields that are empty.
|
||||
```
|
||||
### Device status
|
||||
By setting a status on a Netbox device you determine how the host is added (or updated) in Zabbix. There are, by default, 3 options:
|
||||
* Delete the host from Zabbix (triggered by Netbox status "Decommissioning" and "Inventory")
|
||||
* Create the host in Zabbix but with a disabled status (Trigger by "Offline", "Planned", "Staged" and "Failed")
|
||||
* Create the host in Zabbix with an enabled status (For now only enabled with the "Active" status)
|
||||
|
||||
You can modify this behaviour by changing the following list variables in the script:
|
||||
- `zabbix_device_removal`
|
||||
- `zabbix_device_disable`
|
||||
### Device status
|
||||
|
||||
By setting a status on a NetBox device you determine how the host is added (or
|
||||
updated) in Zabbix. There are, by default, 3 options:
|
||||
|
||||
- Delete the host from Zabbix (triggered by NetBox status "Decommissioning" and
|
||||
"Inventory")
|
||||
- Create the host in Zabbix but with a disabled status (Trigger by "Offline",
|
||||
"Planned", "Staged" and "Failed")
|
||||
- Create the host in Zabbix with an enabled status (For now only enabled with
|
||||
the "Active" status)
|
||||
|
||||
You can modify this behaviour by changing the following list variables in the
|
||||
script:
|
||||
|
||||
- `zabbix_device_removal`
|
||||
- `zabbix_device_disable`
|
||||
|
||||
### Zabbix Inventory
|
||||
This script allows you to enable the inventory on managed Zabbix hosts and sync NetBox device properties to the specified inventory fields.
|
||||
To enable, set `inventory_sync` to `True`.
|
||||
Set `inventory_automatic` to `False` to use manual inventory, or `True` for automatic.
|
||||
See [Zabix Manual](https://www.zabbix.com/documentation/current/en/manual/config/hosts/inventory#building-inventory) for more information about the modes.
|
||||
|
||||
Use the `inventory_map` variable to map which NetBox properties are used in which Zabbix Inventory fields.
|
||||
For nested properties, you can use the '/' seperator.
|
||||
For example, the following map will assign the custom field 'mycustomfield' to the 'alias' Zabbix inventory field:
|
||||
```
|
||||
This script allows you to enable the inventory on managed Zabbix hosts and sync
|
||||
NetBox device properties to the specified inventory fields. To map NetBox
|
||||
information to NetBox inventory fields, set `inventory_sync` to `True`.
|
||||
|
||||
You can set the inventory mode to "disabled", "manual" or "automatic" with the
|
||||
`inventory_mode` variable. See
|
||||
[Zabbix Manual](https://www.zabbix.com/documentation/current/en/manual/config/hosts/inventory#building-inventory)
|
||||
for more information about the modes.
|
||||
|
||||
Use the `device_inventory_map` variable to map which NetBox properties are used in
|
||||
which Zabbix Inventory fields. For nested properties, you can use the '/'
|
||||
seperator. For example, the following map will assign the custom field
|
||||
'mycustomfield' to the 'alias' Zabbix inventory field:
|
||||
|
||||
For Virtual Machines, use `vm_inventory_map`.
|
||||
|
||||
```python
|
||||
inventory_sync = True
|
||||
inventory_automatic = True
|
||||
inventory_map = { "custom_fields/mycustomfield/name": "alias"}
|
||||
inventory_mode = "manual"
|
||||
device_inventory_map = {"custom_fields/mycustomfield/name": "alias"}
|
||||
vm_inventory_map = {"custom_fields/mycustomfield/name": "alias"}
|
||||
```
|
||||
See `config.py.example` for an extensive example map.
|
||||
Any Zabix Inventory fields that are not included in the map will not be touched by the script,
|
||||
so you can safely add manual values or use items to automatically add values to other fields.
|
||||
|
||||
See `config.py.example` for an extensive example map. Any Zabbix Inventory fields
|
||||
that are not included in the map will not be touched by the script, so you can
|
||||
safely add manual values or use items to automatically add values to other
|
||||
fields.
|
||||
|
||||
### Template source
|
||||
You can either use a Netbox device type custom field or Netbox config context for the Zabbix template information.
|
||||
|
||||
Using a custom field allows for only one template. You can assign multiple templates to one host using the config context source.
|
||||
Should you make use of an advanced templating structure with lots of nesting then i would recommend sticking to the custom field.
|
||||
You can either use a NetBox device type custom field or NetBox config context
|
||||
for the Zabbix template information.
|
||||
|
||||
You can change the behaviour in the config file. By default this setting is false but you can set it to true to use config context:
|
||||
```
|
||||
Using a custom field allows for only one template. You can assign multiple
|
||||
templates to one host using the config context source. Should you make use of an
|
||||
advanced templating structure with lots of nesting then i would recommend
|
||||
sticking to the custom field.
|
||||
|
||||
You can change the behaviour in the config file. By default this setting is
|
||||
false but you can set it to true to use config context:
|
||||
|
||||
```python
|
||||
templates_config_context = True
|
||||
```
|
||||
|
||||
After that make sure that for each host there is at least one template defined in the config context in this format:
|
||||
```
|
||||
After that make sure that for each host there is at least one template defined
|
||||
in the config context in this format:
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
"templates": [
|
||||
@ -213,41 +359,249 @@ After that make sure that for each host there is at least one template defined i
|
||||
}
|
||||
```
|
||||
|
||||
You can also opt for the default device type custom field behaviour but with the added benefit of overwriting the template should a device in Netbox have a device specific context defined. In this case the device specific context template(s) will take priority over the device type custom field template.
|
||||
```
|
||||
You can also opt for the default device type custom field behaviour but with the
|
||||
added benefit of overwriting the template should a device in NetBox have a
|
||||
device specific context defined. In this case the device specific context
|
||||
template(s) will take priority over the device type custom field template.
|
||||
|
||||
```python
|
||||
templates_config_context_overrule = True
|
||||
```
|
||||
|
||||
### Tags
|
||||
|
||||
This script can sync host tags to your Zabbix hosts for use in filtering,
|
||||
SLA calculations and event correlation.
|
||||
|
||||
Tags can be synced from the following sources:
|
||||
|
||||
1. NetBox device/vm tags
|
||||
2. NetBox config context
|
||||
3. NetBox fields
|
||||
|
||||
Syncing tags will override any tags that were set manually on the host,
|
||||
making NetBox the single source-of-truth for managing tags.
|
||||
|
||||
To enable syncing, turn on tag_sync in the config file.
|
||||
By default, this script will modify tag names and tag values to lowercase.
|
||||
You can change this behaviour by setting tag_lower to False.
|
||||
|
||||
```python
|
||||
tag_sync = True
|
||||
tag_lower = True
|
||||
```
|
||||
|
||||
#### Device tags
|
||||
|
||||
As NetBox doesn't follow the tag/value pattern for tags, we will need a tag
|
||||
name set to register the netbox tags.
|
||||
|
||||
By default the tag name is "NetBox", but you can change this to whatever you want.
|
||||
The value for the tag can be set to 'name', 'display', or 'slug', which refers to the property of the NetBox tag object that will be used as the value in Zabbix.
|
||||
|
||||
```python
|
||||
tag_name = 'NetBox'
|
||||
tag_value = 'name'
|
||||
```
|
||||
|
||||
#### Config context
|
||||
|
||||
You can supply custom tags via config context by adding the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
"tags": [
|
||||
{
|
||||
"MyTagName": "MyTagValue"
|
||||
},
|
||||
{
|
||||
"environment": "production"
|
||||
}
|
||||
],
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This will allow you to assign tags based on the config context rules.
|
||||
|
||||
#### NetBox Field
|
||||
|
||||
NetBox field can also be used as input for tags, just like inventory and usermacros.
|
||||
To enable syncing from fields, make sure to configure a `device_tag_map` and/or a `vm_tag_map`.
|
||||
|
||||
```python
|
||||
device_tag_map = {"site/name": "site",
|
||||
"rack/name": "rack",
|
||||
"platform/name": "target"}
|
||||
|
||||
vm_tag_map = {"site/name": "site",
|
||||
"cluster/name": "cluster",
|
||||
"platform/name": "target"}
|
||||
```
|
||||
|
||||
To turn off field syncing, set the maps to empty dictionaries:
|
||||
|
||||
```python
|
||||
device_tag_map = {}
|
||||
vm_tag_map = {}
|
||||
```
|
||||
|
||||
|
||||
### Usermacros
|
||||
|
||||
You can choose to use NetBox as a source for Host usermacros by
|
||||
enabling the following option in the configuration file:
|
||||
|
||||
```python
|
||||
usermacro_sync = True
|
||||
```
|
||||
|
||||
Please be advised that enabling this option will _clear_ any usermacros
|
||||
manually set on the managed hosts and override them with the usermacros
|
||||
from NetBox.
|
||||
|
||||
There are two NetBox sources that can be used to populate usermacros:
|
||||
|
||||
1. NetBox config context
|
||||
2. NetBox fields
|
||||
|
||||
#### Config context
|
||||
|
||||
By defining a dictionary `usermacros` within the `zabbix` key in
|
||||
config context, you can dynamically assign usermacro values based on
|
||||
anything that you can target based on
|
||||
[config contexts](https://netboxlabs.com/docs/netbox/en/stable/features/context-data/)
|
||||
within NetBox.
|
||||
|
||||
Through this method, it is possible to define the following types of usermacros:
|
||||
|
||||
1. Text
|
||||
2. Secret
|
||||
3. Vault
|
||||
|
||||
The default macro type is text if no `type` and `value` have been set.
|
||||
It is also possible to create usermacros with
|
||||
[context](https://www.zabbix.com/documentation/7.0/en/manual/config/macros/user_macros_context).
|
||||
|
||||
Examples:
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
"usermacros": {
|
||||
"{$USER_MACRO}": "test value",
|
||||
"{$CONTEXT_MACRO:\"test\"}": "test value",
|
||||
"{$CONTEXT_REGEX_MACRO:regex:\".*\"}": "test value",
|
||||
"{$SECRET_MACRO}": {
|
||||
"type": "secret",
|
||||
"value": "PaSsPhRaSe"
|
||||
},
|
||||
"{$VAULT_MACRO}": {
|
||||
"type": "vault",
|
||||
"value": "secret/vmware:password"
|
||||
},
|
||||
"{$USER_MACRO2}": {
|
||||
"type": "text",
|
||||
"value": "another test value"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Please be aware that secret usermacros are only synced _once_ by default.
|
||||
This is the default behavior because Zabbix API won't return the value of
|
||||
secrets so the script cannot compare the values with those set in NetBox.
|
||||
|
||||
If you update a secret usermacro value, just remove the value from the host
|
||||
in Zabbix and the new value will be synced during the next run.
|
||||
|
||||
Alternatively, you can set the following option in the config file:
|
||||
|
||||
```python
|
||||
usermacro_sync = "full"
|
||||
```
|
||||
|
||||
This will force a full usermacro sync on every run on hosts that have secret usermacros set.
|
||||
That way, you will know for sure the secret values are always up to date.
|
||||
|
||||
Keep in mind that NetBox will show your secrets in plain text.
|
||||
If true secrecy is required, consider switching to
|
||||
[vault](https://www.zabbix.com/documentation/current/en/manual/config/macros/secret_macros#vault-secret)
|
||||
usermacros.
|
||||
|
||||
#### Netbox Fields
|
||||
|
||||
To use NetBox fields as a source for usermacros, you will need to set up usermacro maps
|
||||
for devices and/or virtual machines in the configuration file.
|
||||
This method only supports `text` type usermacros.
|
||||
|
||||
For example:
|
||||
|
||||
```python
|
||||
usermacro_sync = True
|
||||
device_usermacro_map = {"serial": "{$HW_SERIAL}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"}
|
||||
vm_usermacro_map = {"memory": "{$TOTAL_MEMORY}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"}
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Permissions
|
||||
|
||||
### Netbox
|
||||
Make sure that the Netbox user has proper permissions for device read and modify (modify to set the Zabbix HostID custom field) operations. The user should also have read-only access to the device types.
|
||||
### NetBox
|
||||
|
||||
Make sure that the NetBox user has proper permissions for device read and modify
|
||||
(modify to set the Zabbix HostID custom field) operations. The user should also
|
||||
have read-only access to the device types.
|
||||
|
||||
### Zabbix
|
||||
Make sure that the Zabbix user has permissions to read hostgroups and proxy servers. The user should have full rights on creating, modifying and deleting hosts.
|
||||
|
||||
If you want to automatically create hostgroups then the create permission on host-groups should also be applied.
|
||||
Make sure that the Zabbix user has permissions to read hostgroups and proxy
|
||||
servers. The user should have full rights on creating, modifying and deleting
|
||||
hosts.
|
||||
|
||||
If you want to automatically create hostgroups then the create permission on
|
||||
host-groups should also be applied.
|
||||
|
||||
### Custom links
|
||||
To make the user experience easier you could add a custom link that redirects users to the Zabbix latest data.
|
||||
|
||||
To make the user experience easier you could add a custom link that redirects
|
||||
users to the Zabbix latest data.
|
||||
|
||||
```
|
||||
* Name: zabbix_latestData
|
||||
* Text: {% if object.cf["zabbix_hostid"] %}Show host in Zabbix{% endif %}
|
||||
* URL: http://myzabbixserver.local/zabbix.php?action=latest.view&hostids[]={{ object.cf["zabbix_hostid"] }}
|
||||
```
|
||||
|
||||
## Running the script
|
||||
|
||||
```
|
||||
python3 netbox_zabbix_sync.py
|
||||
```
|
||||
|
||||
### Flags
|
||||
| Flag | Option | Description |
|
||||
| ------------ | ------------ | ------------ |
|
||||
| -v | verbose | Log with debugging on. |
|
||||
|
||||
| Flag | Option | Description |
|
||||
| ---- | --------- | ------------------------------------- |
|
||||
| -v | verbose | Log with info on. |
|
||||
| -vv | debug | Log with debugging on. |
|
||||
| -vvv | debug-all | Log with debugging on for all modules |
|
||||
|
||||
## Config context
|
||||
|
||||
### Zabbix proxy
|
||||
|
||||
You can set the proxy for a device using the 'proxy' key in config context.
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
@ -255,7 +609,10 @@ You can set the proxy for a device using the 'proxy' key in config context.
|
||||
}
|
||||
}
|
||||
```
|
||||
It is now posible to specify proxy groups with the introduction of Proxy groups in Zabbix 7. Specifying a group in the config context on older Zabbix releases will have no impact and the script will ignore the statement.
|
||||
|
||||
It is now possible to specify proxy groups with the introduction of Proxy groups
|
||||
in Zabbix 7. Specifying a group in the config context on older Zabbix releases
|
||||
will have no impact and the script will ignore the statement.
|
||||
|
||||
```json
|
||||
{
|
||||
@ -265,7 +622,11 @@ It is now posible to specify proxy groups with the introduction of Proxy groups
|
||||
}
|
||||
```
|
||||
|
||||
The script will prefer groups when specifying both a proxy and group. This is done with the assumption that groups are more resiliant and HA ready, making it a more logical choice to use for proxy linkage. This also makes migrating from a proxy to proxy group easier since the group take priority over the invidivual proxy.
|
||||
The script will prefer groups when specifying both a proxy and group. This is
|
||||
done with the assumption that groups are more resilient and HA ready, making it
|
||||
a more logical choice to use for proxy linkage. This also makes migrating from a
|
||||
proxy to proxy group easier since the group take priority over the individual
|
||||
proxy.
|
||||
|
||||
```json
|
||||
{
|
||||
@ -275,33 +636,45 @@ The script will prefer groups when specifying both a proxy and group. This is do
|
||||
}
|
||||
}
|
||||
```
|
||||
In the example above the host will use the group on Zabbix 7. On Zabbix 6 and below the host will use the proxy. Zabbix 7 will use the proxy value when ommiting the proxy_group value.
|
||||
|
||||
Because of the possible amount of destruction when setting up Netbox but forgetting the proxy command, the sync works a bit different. By default everything is synced except in a situation where the Zabbix host has a proxy configured but nothing is configured in Netbox. To force deletion and a full sync, set the `full_proxy_sync` variable in the config file.
|
||||
In the example above the host will use the group on Zabbix 7. On Zabbix 6 and
|
||||
below the host will use the proxy. Zabbix 7 will use the proxy value when
|
||||
omitting the proxy_group value.
|
||||
|
||||
### Set interface parameters within Netbox
|
||||
When adding a new device, you can set the interface type with custom context. By default, the following configuration is applied when no config context is provided:
|
||||
### Set interface parameters within NetBox
|
||||
|
||||
* SNMPv2
|
||||
* UDP 161
|
||||
* Bulk requests enabled
|
||||
* SNMP community: {$SNMP_COMMUNITY}
|
||||
When adding a new device, you can set the interface type with custom context. By
|
||||
default, the following configuration is applied when no config context is
|
||||
provided:
|
||||
|
||||
Due to Zabbix limitations of changing interface type with a linked template, changing the interface type from within Netbox is not supported and the script will generate an error.
|
||||
- SNMPv2
|
||||
- UDP 161
|
||||
- Bulk requests enabled
|
||||
- SNMP community: {$SNMP_COMMUNITY}
|
||||
|
||||
Due to Zabbix limitations of changing interface type with a linked template,
|
||||
changing the interface type from within NetBox is not supported and the script
|
||||
will generate an error.
|
||||
|
||||
For example, when changing a SNMP interface to an Agent interface:
|
||||
|
||||
For example when changing a SNMP interface to an Agent interface:
|
||||
```
|
||||
Netbox-Zabbix-sync - WARNING - Device: Interface OUT of sync.
|
||||
Netbox-Zabbix-sync - ERROR - Device: changing interface type to 1 is not supported.
|
||||
NetBox-Zabbix-sync - WARNING - Device: Interface OUT of sync.
|
||||
NetBox-Zabbix-sync - ERROR - Device: changing interface type to 1 is not supported.
|
||||
```
|
||||
|
||||
To configure the interface parameters you'll need to use custom context. Custom context was used to make this script as customizable as posible for each environment. For example, you could:
|
||||
* Set the custom context directly on a device
|
||||
* Set the custom context on a label, which you would add to a device (for instance, SNMPv3)
|
||||
* Set the custom context on a device role
|
||||
* Set the custom context on a site or region
|
||||
To configure the interface parameters you'll need to use custom context. Custom
|
||||
context was used to make this script as customizable as possible for each
|
||||
environment. For example, you could:
|
||||
|
||||
- Set the custom context directly on a device
|
||||
- Set the custom context on a tag, which you would add to a device (for
|
||||
instance, SNMPv3)
|
||||
- Set the custom context on a device role
|
||||
- Set the custom context on a site or region
|
||||
|
||||
##### Agent interface configuration example
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
@ -310,7 +683,9 @@ To configure the interface parameters you'll need to use custom context. Custom
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### SNMPv2 interface configuration example
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
@ -324,7 +699,9 @@ To configure the interface parameters you'll need to use custom context. Custom
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### SNMPv3 interface configuration example
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
@ -341,6 +718,13 @@ To configure the interface parameters you'll need to use custom context. Custom
|
||||
}
|
||||
```
|
||||
|
||||
I would recommend using macros for sensitive data such as community strings since the data in Netbox is plain-text.
|
||||
I would recommend using usermacros for sensitive data such as community strings
|
||||
since the data in NetBox is plain-text.
|
||||
|
||||
> **_NOTE:_** Not all SNMP data is required for a working configuration.
|
||||
> [The following parameters are allowed](https://www.zabbix.com/documentation/current/manual/api/reference/hostinterface/object#details_tag "The following parameters are allowed") but
|
||||
> are not all required, depending on your environment.
|
||||
|
||||
|
||||
|
||||
|
||||
> **_NOTE:_** Not all SNMP data is required for a working configuration. [The following parameters are allowed ](https://www.zabbix.com/documentation/current/manual/api/reference/hostinterface/object#details_tag "The following parameters are allowed ")but are not all required, depending on your environment.
|
||||
|
@ -3,11 +3,11 @@
|
||||
# coming from config context instead of a custom field.
|
||||
templates_config_context = False
|
||||
|
||||
# Set to true to give config context templates a
|
||||
# Set to true to give config context templates a
|
||||
# higher priority then custom field templates
|
||||
templates_config_context_overrule = False
|
||||
|
||||
# Set template and device Netbox "custom field" names
|
||||
# Set template and device NetBox "custom field" names
|
||||
# Template_cf is not used when templates_config_context is enabled
|
||||
template_cf = "zabbix_template"
|
||||
device_cf = "zabbix_hostid"
|
||||
@ -21,18 +21,26 @@ create_hostgroups = True
|
||||
## Create journal entries
|
||||
create_journal = False
|
||||
|
||||
## Virtual machine sync
|
||||
# Set sync_vms to True in order to use this new feature
|
||||
# Use the hostgroup vm_hostgroup_format mapper for specific
|
||||
# hostgroup atributes of VM's such as cluster_type and cluster
|
||||
sync_vms = False
|
||||
# Check the README documentation for values to use in the VM hostgroup format.
|
||||
vm_hostgroup_format = "cluster_type/cluster/role"
|
||||
|
||||
## Proxy Sync
|
||||
# Set to true to enable removal of proxy's under hosts. Use with caution and make sure that you specified
|
||||
# all the required proxy's in the device config context before enabeling this option.
|
||||
# With this option disabled proxy's will only be added and modified for Zabbix hosts.
|
||||
full_proxy_sync = False
|
||||
|
||||
## Netbox to Zabbix device state convertion
|
||||
## NetBox to Zabbix device state convertion
|
||||
zabbix_device_removal = ["Decommissioning", "Inventory"]
|
||||
zabbix_device_disable = ["Offline", "Planned", "Staged", "Failed"]
|
||||
|
||||
## Hostgroup mapping
|
||||
# Available choices: dev_location, dev_role, manufacturer, region, site, site_group, tenant, tenant_group
|
||||
# See the README documentation for available options
|
||||
# You can also use CF (custom field) names under the device. The CF content will be used for the hostgroup generation.
|
||||
#
|
||||
# When using region in the group name, the default behaviour is to use name of the directly assigned region.
|
||||
@ -40,8 +48,8 @@ zabbix_device_disable = ["Offline", "Planned", "Staged", "Failed"]
|
||||
#
|
||||
# 'Global/Europe/Netherlands/Amsterdam' instead of just 'Amsterdam'.
|
||||
#
|
||||
# traverse_site_groups controls the same behaviour for any assigned site_groups.
|
||||
hostgroup_format = "site/manufacturer/dev_role"
|
||||
# traverse_site_groups controls the same behaviour for any assigned site_groups.
|
||||
hostgroup_format = "site/manufacturer/role"
|
||||
traverse_regions = False
|
||||
traverse_site_groups = False
|
||||
|
||||
@ -54,34 +62,92 @@ traverse_site_groups = False
|
||||
# nb_device_filter = {"site": ["HQ-AMS", "HQ-FRA"]} #Device must be in either one of these sites
|
||||
# nb_device_filter = {"site": "HQ-AMS", "tag": "zabbix", "role__n": ["PDU", "console-server"]} #Device must be in site HQ-AMS, have the tag zabbix and must not be part of the PDU or console-server role
|
||||
|
||||
# Default device filter, only get devices which have a name in Netbox:
|
||||
# Default device filter, only get devices which have a name in NetBox:
|
||||
nb_device_filter = {"name__n": "null"}
|
||||
# Default filter for VMs
|
||||
nb_vm_filter = {"name__n": "null"}
|
||||
|
||||
## Inventory
|
||||
# See https://www.zabbix.com/documentation/current/en/manual/config/hosts/inventory#building-inventory
|
||||
# Choice between disabled, manual or automatic.
|
||||
# Make sure to select at least manual or automatic in use with the inventory_sync function.
|
||||
inventory_mode = "disabled"
|
||||
|
||||
# To allow syncing of NetBox device properties, set inventory_sync to True
|
||||
inventory_sync = False
|
||||
|
||||
# Set inventory_automatic to False to use manual inventory, True for automatic
|
||||
# See https://www.zabbix.com/documentation/current/en/manual/config/hosts/inventory#building-inventory
|
||||
inventory_automatic = True
|
||||
|
||||
# inventory_map is used to map NetBox properties to Zabbix Inventory fields.
|
||||
# For nested properties, you can use the '/' seperator.
|
||||
# For example, the following map will assign the custom field 'mycustomfield' to the 'alias' Zabbix inventory field:
|
||||
#
|
||||
# inventory_map = { "custom_fields/mycustomfield/name": "alias"}
|
||||
# device_inventory_map = { "custom_fields/mycustomfield/name": "alias"}
|
||||
#
|
||||
# The following map should provide some nice defaults:
|
||||
inventory_map = { "asset_tag": "asset_tag",
|
||||
"virtual_chassis/name": "chassis",
|
||||
"status/label": "deployment_status",
|
||||
"location/name": "location",
|
||||
"latitude": "location_lat",
|
||||
"longitude": "location_lon",
|
||||
"comments": "notes",
|
||||
"name": "name",
|
||||
"rack/name": "site_rack",
|
||||
"serial": "serialno_a",
|
||||
"device_type/model": "type",
|
||||
"device_type/manufacturer/name": "vendor",
|
||||
"oob_ip/address": "oob_ip" }
|
||||
# The following maps should provide some nice defaults:
|
||||
device_inventory_map = { "asset_tag": "asset_tag",
|
||||
"virtual_chassis/name": "chassis",
|
||||
"status/label": "deployment_status",
|
||||
"location/name": "location",
|
||||
"latitude": "location_lat",
|
||||
"longitude": "location_lon",
|
||||
"comments": "notes",
|
||||
"name": "name",
|
||||
"rack/name": "site_rack",
|
||||
"serial": "serialno_a",
|
||||
"device_type/model": "type",
|
||||
"device_type/manufacturer/name": "vendor",
|
||||
"oob_ip/address": "oob_ip" }
|
||||
|
||||
# We also support inventory mapping on Virtual Machines.
|
||||
vm_inventory_map = { "status/label": "deployment_status",
|
||||
"comments": "notes",
|
||||
"name": "name" }
|
||||
|
||||
# To allow syncing of usermacros from NetBox, set to True.
|
||||
# this will enable both field mapping and config context usermacros.
|
||||
#
|
||||
# If set to "full", it will force the update of secret usermacros every run.
|
||||
# Please see the README.md for more information.
|
||||
usermacro_sync = False
|
||||
|
||||
# device usermacro_map to map NetBox fields to usermacros.
|
||||
device_usermacro_map = {"serial": "{$HW_SERIAL}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"}
|
||||
|
||||
# virtual machine usermacro_map to map NetBox fields to usermacros.
|
||||
vm_usermacro_map = {"memory": "{$TOTAL_MEMORY}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"}
|
||||
|
||||
# To sync host tags to Zabbix, set to True.
|
||||
tag_sync = False
|
||||
|
||||
# Setting tag_lower to True will lower capital letters ain tag names and values
|
||||
# This is more inline with the Zabbix way of working with tags.
|
||||
#
|
||||
# You can however set this to False to ensure capital letters are synced to Zabbix tags.
|
||||
tag_lower = True
|
||||
|
||||
# We can sync NetBox device/VM tags to Zabbix, but as NetBox tags don't follow the key/value
|
||||
# pattern, we need to specify a tag name to register the NetBox tags in Zabbix.
|
||||
#
|
||||
#
|
||||
#
|
||||
# If tag_name is set to False, we won't sync NetBox device/VM tags to Zabbix.
|
||||
tag_name = 'NetBox'
|
||||
|
||||
# We can choose to use 'name', 'slug' or 'display' NetBox tag properties as a value in Zabbix.
|
||||
# 'name'is used by default.
|
||||
tag_value = "name"
|
||||
|
||||
# device tag_map to map NetBox fields to host tags.
|
||||
device_tag_map = {"site/name": "site",
|
||||
"rack/name": "rack",
|
||||
"platform/name": "target"}
|
||||
|
||||
# Virtual machine tag_map to map NetBox fields to host tags.
|
||||
vm_tag_map = {"site/name": "site",
|
||||
"cluster/name": "cluster",
|
||||
"platform/name": "target"}
|
||||
|
0
modules/__init__.py
Normal file
0
modules/__init__.py
Normal file
128
modules/config.py
Normal file
128
modules/config.py
Normal file
@ -0,0 +1,128 @@
|
||||
"""
|
||||
Module for parsing configuration from the top level config.py file
|
||||
"""
|
||||
from pathlib import Path
|
||||
from importlib import util
|
||||
from os import environ, path
|
||||
from logging import getLogger
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
# PLEASE NOTE: This is a sample config file. Please do NOT make any edits in this file!
|
||||
# You should create your own config.py and it will overwrite the default config.
|
||||
|
||||
DEFAULT_CONFIG = {
|
||||
"templates_config_context": False,
|
||||
"templates_config_context_overrule": False,
|
||||
"template_cf": "zabbix_template",
|
||||
"device_cf": "zabbix_hostid",
|
||||
"clustering": False,
|
||||
"create_hostgroups": True,
|
||||
"create_journal": False,
|
||||
"sync_vms": False,
|
||||
"vm_hostgroup_format": "cluster_type/cluster/role",
|
||||
"full_proxy_sync": False,
|
||||
"zabbix_device_removal": ["Decommissioning", "Inventory"],
|
||||
"zabbix_device_disable": ["Offline", "Planned", "Staged", "Failed"],
|
||||
"hostgroup_format": "site/manufacturer/role",
|
||||
"traverse_regions": False,
|
||||
"traverse_site_groups": False,
|
||||
"nb_device_filter": {"name__n": "null"},
|
||||
"nb_vm_filter": {"name__n": "null"},
|
||||
"inventory_mode": "disabled",
|
||||
"inventory_sync": False,
|
||||
"device_inventory_map": {
|
||||
"asset_tag": "asset_tag",
|
||||
"virtual_chassis/name": "chassis",
|
||||
"status/label": "deployment_status",
|
||||
"location/name": "location",
|
||||
"latitude": "location_lat",
|
||||
"longitude": "location_lon",
|
||||
"comments": "notes",
|
||||
"name": "name",
|
||||
"rack/name": "site_rack",
|
||||
"serial": "serialno_a",
|
||||
"device_type/model": "type",
|
||||
"device_type/manufacturer/name": "vendor",
|
||||
"oob_ip/address": "oob_ip"
|
||||
},
|
||||
"vm_inventory_map": {
|
||||
"status/label": "deployment_status",
|
||||
"comments": "notes",
|
||||
"name": "name"
|
||||
},
|
||||
"usermacro_sync": False,
|
||||
"device_usermacro_map": {
|
||||
"serial": "{$HW_SERIAL}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"
|
||||
},
|
||||
"vm_usermacro_map": {
|
||||
"memory": "{$TOTAL_MEMORY}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"
|
||||
},
|
||||
"tag_sync": False,
|
||||
"tag_lower": True,
|
||||
"tag_name": 'NetBox',
|
||||
"tag_value": "name",
|
||||
"device_tag_map": {
|
||||
"site/name": "site",
|
||||
"rack/name": "rack",
|
||||
"platform/name": "target"
|
||||
},
|
||||
"vm_tag_map": {
|
||||
"site/name": "site",
|
||||
"cluster/name": "cluster",
|
||||
"platform/name": "target"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def load_config():
|
||||
"""Returns combined config from all sources"""
|
||||
# Overwrite default config with config.py
|
||||
conf = load_config_file(config_default=DEFAULT_CONFIG)
|
||||
# Overwrite default config and config.py with environment variables
|
||||
for key in conf:
|
||||
value_setting = load_env_variable(key)
|
||||
if value_setting is not None:
|
||||
conf[key] = value_setting
|
||||
return conf
|
||||
|
||||
|
||||
def load_env_variable(config_environvar):
|
||||
"""Returns config from environment variable"""
|
||||
prefix = "NBZX_"
|
||||
config_environvar = prefix + config_environvar.upper()
|
||||
if config_environvar in environ:
|
||||
return environ[config_environvar]
|
||||
return None
|
||||
|
||||
|
||||
def load_config_file(config_default, config_file="config.py"):
|
||||
"""Returns config from config.py file"""
|
||||
# Find the script path and config file next to it.
|
||||
script_dir = path.dirname(path.dirname(path.abspath(__file__)))
|
||||
config_path = Path(path.join(script_dir, config_file))
|
||||
|
||||
# If the script directory is not found, try the current working directory
|
||||
if not config_path.exists():
|
||||
config_path = Path(config_file)
|
||||
|
||||
# If both checks fail then fallback to the default config
|
||||
if not config_path.exists():
|
||||
return config_default
|
||||
|
||||
dconf = config_default.copy()
|
||||
# Dynamically import the config module
|
||||
spec = util.spec_from_file_location("config", config_path)
|
||||
config_module = util.module_from_spec(spec)
|
||||
spec.loader.exec_module(config_module)
|
||||
# Update DEFAULT_CONFIG with variables from the config module
|
||||
for key in dconf:
|
||||
if hasattr(config_module, key):
|
||||
dconf[key] = getattr(config_module, key)
|
||||
return dconf
|
File diff suppressed because it is too large
Load Diff
@ -2,32 +2,47 @@
|
||||
"""
|
||||
All custom exceptions used for Exception generation
|
||||
"""
|
||||
|
||||
|
||||
class SyncError(Exception):
|
||||
""" Class SyncError """
|
||||
"""Class SyncError"""
|
||||
|
||||
|
||||
class JournalError(Exception):
|
||||
""" Class SyncError """
|
||||
"""Class SyncError"""
|
||||
|
||||
|
||||
class SyncExternalError(SyncError):
|
||||
""" Class SyncExternalError """
|
||||
"""Class SyncExternalError"""
|
||||
|
||||
|
||||
class SyncInventoryError(SyncError):
|
||||
""" Class SyncInventoryError """
|
||||
"""Class SyncInventoryError"""
|
||||
|
||||
|
||||
class SyncDuplicateError(SyncError):
|
||||
""" Class SyncDuplicateError """
|
||||
"""Class SyncDuplicateError"""
|
||||
|
||||
|
||||
class EnvironmentVarError(SyncError):
|
||||
""" Class EnvironmentVarError """
|
||||
"""Class EnvironmentVarError"""
|
||||
|
||||
|
||||
class InterfaceConfigError(SyncError):
|
||||
""" Class InterfaceConfigError """
|
||||
"""Class InterfaceConfigError"""
|
||||
|
||||
|
||||
class ProxyConfigError(SyncError):
|
||||
""" Class ProxyConfigError """
|
||||
"""Class ProxyConfigError"""
|
||||
|
||||
|
||||
class HostgroupError(SyncError):
|
||||
""" Class HostgroupError """
|
||||
"""Class HostgroupError"""
|
||||
|
||||
|
||||
class TemplateError(SyncError):
|
||||
""" Class TemplateError """
|
||||
"""Class TemplateError"""
|
||||
|
||||
|
||||
class UsermacroError(SyncError):
|
||||
"""Class UsermacroError"""
|
||||
|
196
modules/hostgroups.py
Normal file
196
modules/hostgroups.py
Normal file
@ -0,0 +1,196 @@
|
||||
"""Module for all hostgroup related code"""
|
||||
|
||||
from logging import getLogger
|
||||
|
||||
from modules.exceptions import HostgroupError
|
||||
from modules.tools import build_path
|
||||
|
||||
|
||||
class Hostgroup:
|
||||
"""Hostgroup class for devices and VM's
|
||||
Takes type (vm or dev) and NB object"""
|
||||
|
||||
# pylint: disable=too-many-arguments, disable=too-many-positional-arguments
|
||||
def __init__(
|
||||
self,
|
||||
obj_type,
|
||||
nb_obj,
|
||||
version,
|
||||
logger=None,
|
||||
nested_sitegroup_flag=False,
|
||||
nested_region_flag=False,
|
||||
nb_regions=None,
|
||||
nb_groups=None,
|
||||
):
|
||||
self.logger = logger if logger else getLogger(__name__)
|
||||
if obj_type not in ("vm", "dev"):
|
||||
msg = f"Unable to create hostgroup with type {type}"
|
||||
self.logger.error()
|
||||
raise HostgroupError(msg)
|
||||
self.type = str(obj_type)
|
||||
self.nb = nb_obj
|
||||
self.name = self.nb.name
|
||||
self.nb_version = version
|
||||
# Used for nested data objects
|
||||
self.set_nesting(
|
||||
nested_sitegroup_flag, nested_region_flag, nb_groups, nb_regions
|
||||
)
|
||||
self._set_format_options()
|
||||
|
||||
def __str__(self):
|
||||
return f"Hostgroup for {self.type} {self.name}"
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
def _set_format_options(self):
|
||||
"""
|
||||
Set all available variables
|
||||
for hostgroup generation
|
||||
"""
|
||||
format_options = {}
|
||||
# Set variables for both type of devices
|
||||
if self.type in ("vm", "dev"):
|
||||
# Role fix for NetBox <=3
|
||||
role = None
|
||||
if self.nb_version.startswith(("2", "3")) and self.type == "dev":
|
||||
role = self.nb.device_role.name if self.nb.device_role else None
|
||||
else:
|
||||
role = self.nb.role.name if self.nb.role else None
|
||||
# Add default formatting options
|
||||
# Check if a site is configured. A site is optional for VMs
|
||||
format_options["region"] = None
|
||||
format_options["site_group"] = None
|
||||
if self.nb.site:
|
||||
if self.nb.site.region:
|
||||
format_options["region"] = self.generate_parents(
|
||||
"region", str(self.nb.site.region)
|
||||
)
|
||||
if self.nb.site.group:
|
||||
format_options["site_group"] = self.generate_parents(
|
||||
"site_group", str(self.nb.site.group)
|
||||
)
|
||||
format_options["role"] = role
|
||||
format_options["site"] = self.nb.site.name if self.nb.site else None
|
||||
format_options["tenant"] = str(self.nb.tenant) if self.nb.tenant else None
|
||||
format_options["tenant_group"] = (
|
||||
str(self.nb.tenant.group) if self.nb.tenant else None
|
||||
)
|
||||
format_options["platform"] = (
|
||||
self.nb.platform.name if self.nb.platform else None
|
||||
)
|
||||
# Variables only applicable for devices
|
||||
if self.type == "dev":
|
||||
format_options["manufacturer"] = self.nb.device_type.manufacturer.name
|
||||
format_options["location"] = (
|
||||
str(self.nb.location) if self.nb.location else None
|
||||
)
|
||||
format_options["rack"] = self.nb.rack.name if self.nb.rack else None
|
||||
# Variables only applicable for VM's
|
||||
if self.type == "vm":
|
||||
# Check if a cluster is configured. Could also be configured in a site.
|
||||
if self.nb.cluster:
|
||||
format_options["cluster"] = self.nb.cluster.name
|
||||
format_options["cluster_type"] = self.nb.cluster.type.name
|
||||
self.format_options = format_options
|
||||
|
||||
def set_nesting(
|
||||
self, nested_sitegroup_flag, nested_region_flag, nb_groups, nb_regions
|
||||
):
|
||||
"""Set nesting options for this Hostgroup"""
|
||||
self.nested_objects = {
|
||||
"site_group": {"flag": nested_sitegroup_flag, "data": nb_groups},
|
||||
"region": {"flag": nested_region_flag, "data": nb_regions},
|
||||
}
|
||||
|
||||
def generate(self, hg_format=None):
|
||||
"""Generate hostgroup based on a provided format"""
|
||||
# Set format to default in case its not specified
|
||||
if not hg_format:
|
||||
hg_format = (
|
||||
"site/manufacturer/role" if self.type == "dev" else "cluster/role"
|
||||
)
|
||||
# Split all given names
|
||||
hg_output = []
|
||||
hg_items = hg_format.split("/")
|
||||
for hg_item in hg_items:
|
||||
# Check if requested data is available as option for this host
|
||||
if hg_item not in self.format_options:
|
||||
# Check if a custom field exists with this name
|
||||
cf_data = self.custom_field_lookup(hg_item)
|
||||
# CF does not exist
|
||||
if not cf_data["result"]:
|
||||
msg = (
|
||||
f"Unable to generate hostgroup for host {self.name}. "
|
||||
f"Item type {hg_item} not supported."
|
||||
)
|
||||
self.logger.error(msg)
|
||||
raise HostgroupError(msg)
|
||||
# CF data is populated
|
||||
if cf_data["cf"]:
|
||||
hg_output.append(cf_data["cf"])
|
||||
continue
|
||||
# Check if there is a value associated to the variable.
|
||||
# For instance, if a device has no location, do not use it with hostgroup calculation
|
||||
hostgroup_value = self.format_options[hg_item]
|
||||
if hostgroup_value:
|
||||
hg_output.append(hostgroup_value)
|
||||
# Check if the hostgroup is populated with at least one item.
|
||||
if bool(hg_output):
|
||||
return "/".join(hg_output)
|
||||
msg = (
|
||||
f"Unable to generate hostgroup for host {self.name}."
|
||||
" Not enough valid items. This is most likely"
|
||||
" due to the use of custom fields that are empty"
|
||||
" or an invalid hostgroup format."
|
||||
)
|
||||
self.logger.error(msg)
|
||||
raise HostgroupError(msg)
|
||||
|
||||
def list_formatoptions(self):
|
||||
"""
|
||||
Function to easily troubleshoot which values
|
||||
are generated for a specific device or VM.
|
||||
"""
|
||||
print(f"The following options are available for host {self.name}")
|
||||
for option_type, value in self.format_options.items():
|
||||
if value is not None:
|
||||
print(f"{option_type} - {value}")
|
||||
print("The following options are not available")
|
||||
for option_type, value in self.format_options.items():
|
||||
if value is None:
|
||||
print(f"{option_type}")
|
||||
|
||||
def custom_field_lookup(self, hg_category):
|
||||
"""
|
||||
Checks if a valid custom field is present in NetBox.
|
||||
INPUT: Custom field name
|
||||
OUTPUT: dictionary with 'result' and 'cf' keys.
|
||||
"""
|
||||
# Check if the custom field exists
|
||||
if hg_category not in self.nb.custom_fields:
|
||||
return {"result": False, "cf": None}
|
||||
# Checks if the custom field has been populated
|
||||
if not bool(self.nb.custom_fields[hg_category]):
|
||||
return {"result": True, "cf": None}
|
||||
# Custom field exists and is populated
|
||||
return {"result": True, "cf": self.nb.custom_fields[hg_category]}
|
||||
|
||||
def generate_parents(self, nest_type, child_object):
|
||||
"""
|
||||
Generates parent objects to implement nested regions / nested site groups
|
||||
INPUT: nest_type to set which type of nesting is going to be processed
|
||||
child_object: the name of the child object (for instance the last NB region)
|
||||
OUTPUT: STRING - Either the single child name or child and parents.
|
||||
"""
|
||||
# Check if this type of nesting is supported.
|
||||
if not nest_type in self.nested_objects:
|
||||
return child_object
|
||||
# If the nested flag is True, perform parent calculation
|
||||
if self.nested_objects[nest_type]["flag"]:
|
||||
final_nested_object = build_path(
|
||||
child_object, self.nested_objects[nest_type]["data"]
|
||||
)
|
||||
return "/".join(final_nested_object)
|
||||
# Nesting is not allowed for this object. Return child_object
|
||||
return child_object
|
@ -4,7 +4,8 @@ All of the Zabbix interface related configuration
|
||||
"""
|
||||
from modules.exceptions import InterfaceConfigError
|
||||
|
||||
class ZabbixInterface():
|
||||
|
||||
class ZabbixInterface:
|
||||
"""Class that represents a Zabbix interface."""
|
||||
|
||||
def __init__(self, context, ip):
|
||||
@ -13,22 +14,35 @@ class ZabbixInterface():
|
||||
self.skelet = {"main": "1", "useip": "1", "dns": "", "ip": self.ip}
|
||||
self.interface = self.skelet
|
||||
|
||||
def _set_default_port(self):
|
||||
"""Sets default TCP / UDP port for different interface types"""
|
||||
interface_mapping = {1: 10050, 2: 161, 3: 623, 4: 12345}
|
||||
# Check if interface type is listed in mapper.
|
||||
if self.interface["type"] not in interface_mapping:
|
||||
return False
|
||||
# Set default port to interface
|
||||
self.interface["port"] = str(interface_mapping[self.interface["type"]])
|
||||
return True
|
||||
|
||||
def get_context(self):
|
||||
""" check if Netbox custom context has been defined. """
|
||||
"""check if NetBox custom context has been defined."""
|
||||
if "zabbix" in self.context:
|
||||
zabbix = self.context["zabbix"]
|
||||
if("interface_type" in zabbix and "interface_port" in zabbix):
|
||||
if "interface_type" in zabbix:
|
||||
self.interface["type"] = zabbix["interface_type"]
|
||||
if not "interface_port" in zabbix:
|
||||
self._set_default_port()
|
||||
return True
|
||||
self.interface["port"] = zabbix["interface_port"]
|
||||
return True
|
||||
return False
|
||||
return False
|
||||
|
||||
def set_snmp(self):
|
||||
""" Check if interface is type SNMP """
|
||||
"""Check if interface is type SNMP"""
|
||||
# pylint: disable=too-many-branches
|
||||
if self.interface["type"] == 2:
|
||||
# Checks if SNMP settings are defined in Netbox
|
||||
# Checks if SNMP settings are defined in NetBox
|
||||
if "snmp" in self.context["zabbix"]:
|
||||
snmp = self.context["zabbix"]["snmp"]
|
||||
self.interface["details"] = {}
|
||||
@ -38,14 +52,14 @@ class ZabbixInterface():
|
||||
else:
|
||||
# Fallback to bulk enabled if not specified
|
||||
self.interface["details"]["bulk"] = "1"
|
||||
# SNMP Version config is required in Netbox config context
|
||||
# SNMP Version config is required in NetBox config context
|
||||
if snmp.get("version"):
|
||||
self.interface["details"]["version"] = str(snmp.pop("version"))
|
||||
else:
|
||||
e = "SNMP version option is not defined."
|
||||
raise InterfaceConfigError(e)
|
||||
# If version 1 or 2 is used, get community string
|
||||
if self.interface["details"]["version"] in ['1','2']:
|
||||
if self.interface["details"]["version"] in ["1", "2"]:
|
||||
if "community" in snmp:
|
||||
# Set SNMP community to confix context value
|
||||
community = snmp["community"]
|
||||
@ -54,11 +68,17 @@ class ZabbixInterface():
|
||||
community = "{$SNMP_COMMUNITY}"
|
||||
self.interface["details"]["community"] = str(community)
|
||||
# If version 3 has been used, get all
|
||||
# SNMPv3 Netbox related configs
|
||||
elif self.interface["details"]["version"] == '3':
|
||||
items = ["securityname", "securitylevel", "authpassphrase",
|
||||
"privpassphrase", "authprotocol", "privprotocol",
|
||||
"contextname"]
|
||||
# SNMPv3 NetBox related configs
|
||||
elif self.interface["details"]["version"] == "3":
|
||||
items = [
|
||||
"securityname",
|
||||
"securitylevel",
|
||||
"authpassphrase",
|
||||
"privpassphrase",
|
||||
"authprotocol",
|
||||
"privprotocol",
|
||||
"contextname",
|
||||
]
|
||||
for key, item in snmp.items():
|
||||
if key in items:
|
||||
self.interface["details"][key] = str(item)
|
||||
@ -72,11 +92,18 @@ class ZabbixInterface():
|
||||
e = "Interface type is not SNMP, unable to set SNMP details"
|
||||
raise InterfaceConfigError(e)
|
||||
|
||||
def set_default(self):
|
||||
""" Set default config to SNMPv2, port 161 and community macro. """
|
||||
def set_default_snmp(self):
|
||||
"""Set default config to SNMPv2, port 161 and community macro."""
|
||||
self.interface = self.skelet
|
||||
self.interface["type"] = "2"
|
||||
self.interface["port"] = "161"
|
||||
self.interface["details"] = {"version": "2",
|
||||
"community": "{$SNMP_COMMUNITY}",
|
||||
"bulk": "1"}
|
||||
self.interface["details"] = {
|
||||
"version": "2",
|
||||
"community": "{$SNMP_COMMUNITY}",
|
||||
"bulk": "1",
|
||||
}
|
||||
|
||||
def set_default_agent(self):
|
||||
"""Sets interface to Zabbix agent defaults"""
|
||||
self.interface["type"] = "1"
|
||||
self.interface["port"] = "10050"
|
||||
|
41
modules/logging.py
Normal file
41
modules/logging.py
Normal file
@ -0,0 +1,41 @@
|
||||
"""
|
||||
Logging module for Netbox-Zabbix-sync
|
||||
"""
|
||||
|
||||
import logging
|
||||
from os import path
|
||||
|
||||
logger = logging.getLogger("NetBox-Zabbix-sync")
|
||||
|
||||
|
||||
def get_logger():
|
||||
"""
|
||||
Return the logger for Netbox Zabbix Sync
|
||||
"""
|
||||
return logger
|
||||
|
||||
|
||||
def setup_logger():
|
||||
"""
|
||||
Prepare a logger with stream and file handlers
|
||||
"""
|
||||
# Set logging
|
||||
lgout = logging.StreamHandler()
|
||||
# Logfile in the project root
|
||||
project_root = path.dirname(path.dirname(path.realpath(__file__)))
|
||||
logfile_path = path.join(project_root, "sync.log")
|
||||
lgfile = logging.FileHandler(logfile_path)
|
||||
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
||||
level=logging.WARNING,
|
||||
handlers=[lgout, lgfile],
|
||||
)
|
||||
|
||||
|
||||
def set_log_levels(root_level, own_level):
|
||||
"""
|
||||
Configure log levels for root and Netbox-Zabbix-sync logger
|
||||
"""
|
||||
logging.getLogger().setLevel(root_level)
|
||||
logger.setLevel(own_level)
|
133
modules/tags.py
Normal file
133
modules/tags.py
Normal file
@ -0,0 +1,133 @@
|
||||
#!/usr/bin/env python3
|
||||
# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments, logging-fstring-interpolation
|
||||
"""
|
||||
All of the Zabbix Usermacro related configuration
|
||||
"""
|
||||
from logging import getLogger
|
||||
|
||||
from modules.tools import field_mapper, remove_duplicates
|
||||
|
||||
|
||||
class ZabbixTags:
|
||||
"""Class that represents a Zabbix interface."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
nb,
|
||||
tag_map,
|
||||
tag_sync,
|
||||
tag_lower=True,
|
||||
tag_name=None,
|
||||
tag_value=None,
|
||||
logger=None,
|
||||
host=None,
|
||||
):
|
||||
self.nb = nb
|
||||
self.name = host if host else nb.name
|
||||
self.tag_map = tag_map
|
||||
self.logger = logger if logger else getLogger(__name__)
|
||||
self.tags = {}
|
||||
self.lower = tag_lower
|
||||
self.tag_name = tag_name
|
||||
self.tag_value = tag_value
|
||||
self.tag_sync = tag_sync
|
||||
self.sync = False
|
||||
self._set_config()
|
||||
|
||||
def __repr__(self):
|
||||
return self.name
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def _set_config(self):
|
||||
"""
|
||||
Setup class
|
||||
"""
|
||||
if self.tag_sync:
|
||||
self.sync = True
|
||||
|
||||
return True
|
||||
|
||||
def validate_tag(self, tag_name):
|
||||
"""
|
||||
Validates tag name
|
||||
"""
|
||||
if tag_name and isinstance(tag_name, str) and len(tag_name) <= 256:
|
||||
return True
|
||||
return False
|
||||
|
||||
def validate_value(self, tag_value):
|
||||
"""
|
||||
Validates tag value
|
||||
"""
|
||||
if tag_value and isinstance(tag_value, str) and len(tag_value) <= 256:
|
||||
return True
|
||||
return False
|
||||
|
||||
def render_tag(self, tag_name, tag_value):
|
||||
"""
|
||||
Renders a tag
|
||||
"""
|
||||
tag = {}
|
||||
if self.validate_tag(tag_name):
|
||||
if self.lower:
|
||||
tag["tag"] = tag_name.lower()
|
||||
else:
|
||||
tag["tag"] = tag_name
|
||||
else:
|
||||
self.logger.warning(f"Tag {tag_name} is not a valid tag name, skipping.")
|
||||
return False
|
||||
|
||||
if self.validate_value(tag_value):
|
||||
if self.lower:
|
||||
tag["value"] = tag_value.lower()
|
||||
else:
|
||||
tag["value"] = tag_value
|
||||
else:
|
||||
self.logger.warning(
|
||||
f"Tag {tag_name} has an invalid value: '{tag_value}', skipping."
|
||||
)
|
||||
return False
|
||||
return tag
|
||||
|
||||
def generate(self):
|
||||
"""
|
||||
Generate full set of Usermacros
|
||||
"""
|
||||
# pylint: disable=too-many-branches
|
||||
tags = []
|
||||
# Parse the field mapper for tags
|
||||
if self.tag_map:
|
||||
self.logger.debug(f"Host {self.nb.name}: Starting tag mapper")
|
||||
field_tags = field_mapper(self.nb.name, self.tag_map, self.nb, self.logger)
|
||||
for tag, value in field_tags.items():
|
||||
t = self.render_tag(tag, value)
|
||||
if t:
|
||||
tags.append(t)
|
||||
|
||||
# Parse NetBox config context for tags
|
||||
if (
|
||||
"zabbix" in self.nb.config_context
|
||||
and "tags" in self.nb.config_context["zabbix"]
|
||||
and isinstance(self.nb.config_context["zabbix"]["tags"], list)
|
||||
):
|
||||
for tag in self.nb.config_context["zabbix"]["tags"]:
|
||||
if isinstance(tag, dict):
|
||||
for tagname, value in tag.items():
|
||||
t = self.render_tag(tagname, value)
|
||||
if t:
|
||||
tags.append(t)
|
||||
|
||||
# Pull in NetBox device tags if tag_name is set
|
||||
if self.tag_name and isinstance(self.tag_name, str):
|
||||
for tag in self.nb.tags:
|
||||
if self.tag_value.lower() in ["display", "name", "slug"]:
|
||||
value = tag[self.tag_value]
|
||||
else:
|
||||
value = tag["name"]
|
||||
t = self.render_tag(self.tag_name, value)
|
||||
if t:
|
||||
tags.append(t)
|
||||
|
||||
return remove_duplicates(tags, sortkey="tag")
|
160
modules/tools.py
160
modules/tools.py
@ -1,11 +1,14 @@
|
||||
"""A collection of tools used by several classes"""
|
||||
from modules.exceptions import HostgroupError
|
||||
|
||||
def convert_recordset(recordset):
|
||||
""" Converts netbox RedcordSet to list of dicts. """
|
||||
"""Converts netbox RedcordSet to list of dicts."""
|
||||
recordlist = []
|
||||
for record in recordset:
|
||||
recordlist.append(record.__dict__)
|
||||
return recordlist
|
||||
|
||||
|
||||
def build_path(endpoint, list_of_dicts):
|
||||
"""
|
||||
Builds a path list of related parent/child items.
|
||||
@ -13,16 +16,17 @@ def build_path(endpoint, list_of_dicts):
|
||||
be used in hostgroups.
|
||||
"""
|
||||
item_path = []
|
||||
itemlist = [i for i in list_of_dicts if i['name'] == endpoint]
|
||||
itemlist = [i for i in list_of_dicts if i["name"] == endpoint]
|
||||
item = itemlist[0] if len(itemlist) == 1 else None
|
||||
item_path.append(item['name'])
|
||||
while item['_depth'] > 0:
|
||||
itemlist = [i for i in list_of_dicts if i['name'] == str(item['parent'])]
|
||||
item_path.append(item["name"])
|
||||
while item["_depth"] > 0:
|
||||
itemlist = [i for i in list_of_dicts if i["name"] == str(item["parent"])]
|
||||
item = itemlist[0] if len(itemlist) == 1 else None
|
||||
item_path.append(item['name'])
|
||||
item_path.append(item["name"])
|
||||
item_path.reverse()
|
||||
return item_path
|
||||
|
||||
|
||||
def proxy_prepper(proxy_list, proxy_group_list):
|
||||
"""
|
||||
Function that takes 2 lists and converts them using a
|
||||
@ -42,3 +46,147 @@ def proxy_prepper(proxy_list, proxy_group_list):
|
||||
group["monitored_by"] = 2
|
||||
output.append(group)
|
||||
return output
|
||||
|
||||
|
||||
def field_mapper(host, mapper, nbdevice, logger):
|
||||
"""
|
||||
Maps NetBox field data to Zabbix properties.
|
||||
Used for Inventory, Usermacros and Tag mappings.
|
||||
"""
|
||||
data = {}
|
||||
# Let's build an dict for each property in the map
|
||||
for nb_field, zbx_field in mapper.items():
|
||||
field_list = nb_field.split("/") # convert str to list based on delimiter
|
||||
# start at the base of the dict...
|
||||
value = nbdevice
|
||||
# ... and step through the dict till we find the needed value
|
||||
for item in field_list:
|
||||
value = value[item] if value else None
|
||||
# Check if the result is usable and expected
|
||||
# We want to apply any int or float 0 values,
|
||||
# even if python thinks those are empty.
|
||||
if (value and isinstance(value, int | float | str)) or (
|
||||
isinstance(value, int | float) and int(value) == 0
|
||||
):
|
||||
data[zbx_field] = str(value)
|
||||
elif not value:
|
||||
# empty value should just be an empty string for API compatibility
|
||||
logger.debug(
|
||||
f"Host {host}: NetBox lookup for "
|
||||
f"'{nb_field}' returned an empty value"
|
||||
)
|
||||
data[zbx_field] = ""
|
||||
else:
|
||||
# Value is not a string or numeral, probably not what the user expected.
|
||||
logger.error(
|
||||
f"Host {host}: Lookup for '{nb_field}'"
|
||||
" returned an unexpected type: it will be skipped."
|
||||
)
|
||||
logger.debug(
|
||||
f"Host {host}: Field mapping complete. "
|
||||
f"Mapped {len(list(filter(None, data.values())))} field(s)"
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
def remove_duplicates(input_list, sortkey=None):
|
||||
"""
|
||||
Removes duplicate entries from a list and sorts the list
|
||||
"""
|
||||
output_list = []
|
||||
if isinstance(input_list, list):
|
||||
output_list = [dict(t) for t in {tuple(d.items()) for d in input_list}]
|
||||
if sortkey and isinstance(sortkey, str):
|
||||
output_list.sort(key=lambda x: x[sortkey])
|
||||
return output_list
|
||||
|
||||
|
||||
def verify_hg_format(hg_format, device_cfs=None, vm_cfs=None, hg_type="dev", logger=None):
|
||||
"""
|
||||
Verifies hostgroup field format
|
||||
"""
|
||||
if not device_cfs:
|
||||
device_cfs = []
|
||||
if not vm_cfs:
|
||||
vm_cfs = []
|
||||
allowed_objects = {"dev": ["location",
|
||||
"rack",
|
||||
"role",
|
||||
"manufacturer",
|
||||
"region",
|
||||
"site",
|
||||
"site_group",
|
||||
"tenant",
|
||||
"tenant_group",
|
||||
"platform",
|
||||
"cluster"]
|
||||
,"vm": ["cluster_type",
|
||||
"role",
|
||||
"manufacturer",
|
||||
"region",
|
||||
"site",
|
||||
"site_group",
|
||||
"tenant",
|
||||
"tenant_group",
|
||||
"cluster",
|
||||
"device",
|
||||
"platform"]
|
||||
,"cfs": {"dev": [], "vm": []}
|
||||
}
|
||||
for cf in device_cfs:
|
||||
allowed_objects['cfs']['dev'].append(cf.name)
|
||||
for cf in vm_cfs:
|
||||
allowed_objects['cfs']['vm'].append(cf.name)
|
||||
hg_objects = []
|
||||
if isinstance(hg_format,list):
|
||||
for f in hg_format:
|
||||
hg_objects = hg_objects + f.split("/")
|
||||
else:
|
||||
hg_objects = hg_format.split("/")
|
||||
hg_objects = sorted(set(hg_objects))
|
||||
for hg_object in hg_objects:
|
||||
if (hg_object not in allowed_objects[hg_type] and
|
||||
hg_object not in allowed_objects['cfs'][hg_type]):
|
||||
e = (
|
||||
f"Hostgroup item {hg_object} is not valid. Make sure you"
|
||||
" use valid items and separate them with '/'."
|
||||
)
|
||||
logger.error(e)
|
||||
raise HostgroupError(e)
|
||||
|
||||
|
||||
def sanatize_log_output(data):
|
||||
"""
|
||||
Used for the update function to Zabbix which
|
||||
shows the data that its using to update the host.
|
||||
Removes and sensitive data from the input.
|
||||
"""
|
||||
if not isinstance(data, dict):
|
||||
return data
|
||||
sanitized_data = data.copy()
|
||||
# Check if there are any sensitive macros defined in the data
|
||||
if "macros" in data:
|
||||
for macro in sanitized_data["macros"]:
|
||||
# Check if macro is secret type
|
||||
if not macro["type"] == str(1):
|
||||
continue
|
||||
macro["value"] = "********"
|
||||
# Check for interface data
|
||||
if "interfaceid" in data:
|
||||
# Interface ID is a value which is most likely not helpful
|
||||
# in logging output or for troubleshooting.
|
||||
del sanitized_data["interfaceid"]
|
||||
# InterfaceID also hints that this is a interface update.
|
||||
# A check is required if there are no macro's used for SNMP security parameters.
|
||||
if not "details" in data:
|
||||
return sanitized_data
|
||||
for key, detail in sanitized_data["details"].items():
|
||||
# If the detail is a secret, we don't want to log it.
|
||||
if key in ("authpassphrase", "privpassphrase", "securityname", "community"):
|
||||
# Check if a macro is used.
|
||||
# If so then logging the output is not a security issue.
|
||||
if detail.startswith("{$") and detail.endswith("}"):
|
||||
continue
|
||||
# A macro is not used, so we sanitize the value.
|
||||
sanitized_data["details"][key] = "********"
|
||||
return sanitized_data
|
||||
|
122
modules/usermacros.py
Normal file
122
modules/usermacros.py
Normal file
@ -0,0 +1,122 @@
|
||||
#!/usr/bin/env python3
|
||||
# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments, logging-fstring-interpolation
|
||||
"""
|
||||
All of the Zabbix Usermacro related configuration
|
||||
"""
|
||||
from logging import getLogger
|
||||
from re import match
|
||||
|
||||
from modules.tools import field_mapper
|
||||
|
||||
|
||||
class ZabbixUsermacros:
|
||||
"""Class that represents Zabbix usermacros."""
|
||||
|
||||
def __init__(self, nb, usermacro_map, usermacro_sync, logger=None, host=None):
|
||||
self.nb = nb
|
||||
self.name = host if host else nb.name
|
||||
self.usermacro_map = usermacro_map
|
||||
self.logger = logger if logger else getLogger(__name__)
|
||||
self.usermacros = {}
|
||||
self.usermacro_sync = usermacro_sync
|
||||
self.sync = False
|
||||
self.force_sync = False
|
||||
self._set_config()
|
||||
|
||||
def __repr__(self):
|
||||
return self.name
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def _set_config(self):
|
||||
"""
|
||||
Setup class
|
||||
"""
|
||||
if str(self.usermacro_sync).lower() == "full":
|
||||
self.sync = True
|
||||
self.force_sync = True
|
||||
elif self.usermacro_sync:
|
||||
self.sync = True
|
||||
return True
|
||||
|
||||
def validate_macro(self, macro_name):
|
||||
"""
|
||||
Validates usermacro name
|
||||
"""
|
||||
pattern = r"\{\$[A-Z0-9\._]*(\:.*)?\}"
|
||||
return match(pattern, macro_name)
|
||||
|
||||
def render_macro(self, macro_name, macro_properties):
|
||||
"""
|
||||
Renders a full usermacro from partial input
|
||||
"""
|
||||
macro = {}
|
||||
macrotypes = {"text": 0, "secret": 1, "vault": 2}
|
||||
if self.validate_macro(macro_name):
|
||||
macro["macro"] = str(macro_name)
|
||||
if isinstance(macro_properties, dict):
|
||||
if not "value" in macro_properties:
|
||||
self.logger.warning(f"Host {self.name}: Usermacro {macro_name} has "
|
||||
"no value in Netbox, skipping.")
|
||||
return False
|
||||
macro["value"] = macro_properties["value"]
|
||||
|
||||
if (
|
||||
"type" in macro_properties
|
||||
and macro_properties["type"].lower() in macrotypes
|
||||
):
|
||||
macro["type"] = str(macrotypes[macro_properties["type"]])
|
||||
else:
|
||||
macro["type"] = str(0)
|
||||
|
||||
if "description" in macro_properties and isinstance(
|
||||
macro_properties["description"], str
|
||||
):
|
||||
macro["description"] = macro_properties["description"]
|
||||
else:
|
||||
macro["description"] = ""
|
||||
|
||||
elif isinstance(macro_properties, str) and macro_properties:
|
||||
macro["value"] = macro_properties
|
||||
macro["type"] = str(0)
|
||||
macro["description"] = ""
|
||||
|
||||
else:
|
||||
self.logger.warning(f"Host {self.name}: Usermacro {macro_name} "
|
||||
"has no value, skipping.")
|
||||
return False
|
||||
else:
|
||||
self.logger.error(
|
||||
f"Host {self.name}: Usermacro {macro_name} is not a valid usermacro name, skipping."
|
||||
)
|
||||
return False
|
||||
return macro
|
||||
|
||||
def generate(self):
|
||||
"""
|
||||
Generate full set of Usermacros
|
||||
"""
|
||||
macros = []
|
||||
# Parse the field mapper for usermacros
|
||||
if self.usermacro_map:
|
||||
self.logger.debug(f"Host {self.nb.name}: Starting usermacro mapper")
|
||||
field_macros = field_mapper(
|
||||
self.nb.name, self.usermacro_map, self.nb, self.logger
|
||||
)
|
||||
for macro, value in field_macros.items():
|
||||
m = self.render_macro(macro, value)
|
||||
if m:
|
||||
macros.append(m)
|
||||
# Parse NetBox config context for usermacros
|
||||
if (
|
||||
"zabbix" in self.nb.config_context
|
||||
and "usermacros" in self.nb.config_context["zabbix"]
|
||||
):
|
||||
for macro, properties in self.nb.config_context["zabbix"][
|
||||
"usermacros"
|
||||
].items():
|
||||
m = self.render_macro(macro, properties)
|
||||
if m:
|
||||
macros.append(m)
|
||||
return macros
|
81
modules/virtual_machine.py
Normal file
81
modules/virtual_machine.py
Normal file
@ -0,0 +1,81 @@
|
||||
# pylint: disable=duplicate-code
|
||||
"""Module that hosts all functions for virtual machine processing"""
|
||||
from modules.device import PhysicalDevice
|
||||
from modules.exceptions import InterfaceConfigError, SyncInventoryError, TemplateError
|
||||
from modules.hostgroups import Hostgroup
|
||||
from modules.interface import ZabbixInterface
|
||||
from modules.config import load_config
|
||||
# Load config
|
||||
config = load_config()
|
||||
|
||||
|
||||
class VirtualMachine(PhysicalDevice):
|
||||
"""Model for virtual machines"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.hostgroup = None
|
||||
self.zbx_template_names = None
|
||||
|
||||
def _inventory_map(self):
|
||||
"""use VM inventory maps"""
|
||||
return config["vm_inventory_map"]
|
||||
|
||||
def _usermacro_map(self):
|
||||
"""use VM usermacro maps"""
|
||||
return config["vm_usermacro_map"]
|
||||
|
||||
def _tag_map(self):
|
||||
"""use VM tag maps"""
|
||||
return config["vm_tag_map"]
|
||||
|
||||
def set_hostgroup(self, hg_format, nb_site_groups, nb_regions):
|
||||
"""Set the hostgroup for this device"""
|
||||
# Create new Hostgroup instance
|
||||
hg = Hostgroup(
|
||||
"vm",
|
||||
self.nb,
|
||||
self.nb_api_version,
|
||||
logger=self.logger,
|
||||
nested_sitegroup_flag=config["traverse_site_groups"],
|
||||
nested_region_flag=config["traverse_regions"],
|
||||
nb_groups=nb_site_groups,
|
||||
nb_regions=nb_regions,
|
||||
)
|
||||
# Generate hostgroup based on hostgroup format
|
||||
if isinstance(hg_format, list):
|
||||
self.hostgroups = [hg.generate(f) for f in hg_format]
|
||||
else:
|
||||
self.hostgroups.append(hg.generate(hg_format))
|
||||
|
||||
def set_vm_template(self):
|
||||
"""Set Template for VMs. Overwrites default class
|
||||
to skip a lookup of custom fields."""
|
||||
# Gather templates ONLY from the device specific context
|
||||
try:
|
||||
self.zbx_template_names = self.get_templates_context()
|
||||
except TemplateError as e:
|
||||
self.logger.warning(e)
|
||||
return True
|
||||
|
||||
def setInterfaceDetails(self): # pylint: disable=invalid-name
|
||||
"""
|
||||
Overwrites device function to select an agent interface type by default
|
||||
Agent type interfaces are more likely to be used with VMs then SNMP
|
||||
"""
|
||||
try:
|
||||
# Initiate interface class
|
||||
interface = ZabbixInterface(self.nb.config_context, self.ip)
|
||||
# Check if NetBox has device context.
|
||||
# If not fall back to old config.
|
||||
if interface.get_context():
|
||||
# If device is SNMP type, add aditional information.
|
||||
if interface.interface["type"] == 2:
|
||||
interface.set_snmp()
|
||||
else:
|
||||
interface.set_default_agent()
|
||||
return [interface.interface]
|
||||
except InterfaceConfigError as e:
|
||||
message = f"{self.name}: {e}"
|
||||
self.logger.warning(message)
|
||||
raise SyncInventoryError(message) from e
|
@ -1,54 +1,43 @@
|
||||
#!/usr/bin/env python3
|
||||
# pylint: disable=invalid-name, logging-not-lazy, too-many-locals, logging-fstring-interpolation
|
||||
|
||||
"""Netbox to Zabbix sync script."""
|
||||
import logging
|
||||
"""NetBox to Zabbix sync script."""
|
||||
import argparse
|
||||
from os import environ, path, sys
|
||||
import logging
|
||||
import ssl
|
||||
from os import environ, sys
|
||||
|
||||
from pynetbox import api
|
||||
from zabbix_utils import ZabbixAPI, APIRequestError, ProcessingError
|
||||
from modules.device import NetworkDevice
|
||||
from modules.tools import convert_recordset, proxy_prepper
|
||||
from modules.exceptions import EnvironmentVarError, HostgroupError, SyncError
|
||||
try:
|
||||
from config import (
|
||||
templates_config_context,
|
||||
templates_config_context_overrule,
|
||||
clustering, create_hostgroups,
|
||||
create_journal, full_proxy_sync,
|
||||
zabbix_device_removal,
|
||||
zabbix_device_disable,
|
||||
hostgroup_format,
|
||||
nb_device_filter
|
||||
)
|
||||
except ModuleNotFoundError:
|
||||
print("Configuration file config.py not found in main directory."
|
||||
"Please create the file or rename the config.py.example file to config.py.")
|
||||
sys.exit(1)
|
||||
from pynetbox.core.query import RequestError as NBRequestError
|
||||
from requests.exceptions import ConnectionError as RequestsConnectionError
|
||||
from zabbix_utils import APIRequestError, ProcessingError, ZabbixAPI
|
||||
from modules.config import load_config
|
||||
from modules.device import PhysicalDevice
|
||||
from modules.exceptions import EnvironmentVarError, SyncError
|
||||
from modules.logging import get_logger, set_log_levels, setup_logger
|
||||
from modules.tools import convert_recordset, proxy_prepper, verify_hg_format
|
||||
from modules.virtual_machine import VirtualMachine
|
||||
|
||||
# Set logging
|
||||
log_format = logging.Formatter('%(asctime)s - %(name)s - '
|
||||
'%(levelname)s - %(message)s')
|
||||
lgout = logging.StreamHandler()
|
||||
lgout.setFormatter(log_format)
|
||||
lgout.setLevel(logging.DEBUG)
|
||||
config = load_config()
|
||||
|
||||
lgfile = logging.FileHandler(path.join(path.dirname(
|
||||
path.realpath(__file__)), "sync.log"))
|
||||
lgfile.setFormatter(log_format)
|
||||
lgfile.setLevel(logging.DEBUG)
|
||||
|
||||
logger = logging.getLogger("Netbox-Zabbix-sync")
|
||||
logger.addHandler(lgout)
|
||||
logger.addHandler(lgfile)
|
||||
logger.setLevel(logging.WARNING)
|
||||
setup_logger()
|
||||
logger = get_logger()
|
||||
|
||||
|
||||
def main(arguments):
|
||||
"""Run the sync process."""
|
||||
# pylint: disable=too-many-branches, too-many-statements
|
||||
# set environment variables
|
||||
if arguments.verbose:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
set_log_levels(logging.WARNING, logging.INFO)
|
||||
if arguments.debug:
|
||||
set_log_levels(logging.WARNING, logging.DEBUG)
|
||||
if arguments.debug_all:
|
||||
set_log_levels(logging.DEBUG, logging.DEBUG)
|
||||
if arguments.quiet:
|
||||
set_log_levels(logging.ERROR, logging.ERROR)
|
||||
|
||||
env_vars = ["ZABBIX_HOST", "NETBOX_HOST", "NETBOX_TOKEN"]
|
||||
if "ZABBIX_TOKEN" in environ:
|
||||
env_vars.append("ZABBIX_TOKEN")
|
||||
@ -72,129 +61,236 @@ def main(arguments):
|
||||
zabbix_host = environ.get("ZABBIX_HOST")
|
||||
netbox_host = environ.get("NETBOX_HOST")
|
||||
netbox_token = environ.get("NETBOX_TOKEN")
|
||||
# Set Netbox API
|
||||
# Set NetBox API
|
||||
netbox = api(netbox_host, token=netbox_token, threading=True)
|
||||
# Check if the provided Hostgroup layout is valid
|
||||
hg_objects = hostgroup_format.split("/")
|
||||
allowed_objects = ["dev_location", "dev_role", "manufacturer", "region",
|
||||
"site", "site_group", "tenant", "tenant_group"]
|
||||
# Create API call to get all custom fields which are on the device objects
|
||||
device_cfs = netbox.extras.custom_fields.filter(type="text", content_type_id=23)
|
||||
for cf in device_cfs:
|
||||
allowed_objects.append(cf.name)
|
||||
for hg_object in hg_objects:
|
||||
if hg_object not in allowed_objects:
|
||||
e = (f"Hostgroup item {hg_object} is not valid. Make sure you"
|
||||
" use valid items and seperate them with '/'.")
|
||||
logger.error(e)
|
||||
raise HostgroupError(e)
|
||||
try:
|
||||
# Get NetBox version
|
||||
nb_version = netbox.version
|
||||
logger.debug(f"NetBox version is {nb_version}.")
|
||||
except RequestsConnectionError:
|
||||
logger.error(
|
||||
f"Unable to connect to NetBox with URL {netbox_host}."
|
||||
" Please check the URL and status of NetBox."
|
||||
)
|
||||
sys.exit(1)
|
||||
except NBRequestError as e:
|
||||
logger.error(f"NetBox error: {e}")
|
||||
sys.exit(1)
|
||||
# Check if the provided Hostgroup layout is valid
|
||||
device_cfs = []
|
||||
vm_cfs = []
|
||||
device_cfs = list(
|
||||
netbox.extras.custom_fields.filter(type="text", content_types="dcim.device")
|
||||
)
|
||||
verify_hg_format(config["hostgroup_format"],
|
||||
device_cfs=device_cfs, hg_type="dev", logger=logger)
|
||||
if config["sync_vms"]:
|
||||
vm_cfs = list(
|
||||
netbox.extras.custom_fields.filter(type="text",
|
||||
content_types="virtualization.virtualmachine")
|
||||
)
|
||||
verify_hg_format(config["vm_hostgroup_format"], vm_cfs=vm_cfs, hg_type="vm", logger=logger)
|
||||
# Set Zabbix API
|
||||
try:
|
||||
ssl_ctx = ssl.create_default_context()
|
||||
|
||||
# If a custom CA bundle is set for pynetbox (requests), also use it for the Zabbix API
|
||||
if environ.get("REQUESTS_CA_BUNDLE", None):
|
||||
ssl_ctx.load_verify_locations(environ["REQUESTS_CA_BUNDLE"])
|
||||
|
||||
if not zabbix_token:
|
||||
zabbix = ZabbixAPI(zabbix_host, user=zabbix_user, password=zabbix_pass)
|
||||
zabbix = ZabbixAPI(
|
||||
zabbix_host, user=zabbix_user, password=zabbix_pass, ssl_context=ssl_ctx
|
||||
)
|
||||
else:
|
||||
zabbix = ZabbixAPI(zabbix_host, token=zabbix_token)
|
||||
zabbix = ZabbixAPI(zabbix_host, token=zabbix_token, ssl_context=ssl_ctx)
|
||||
zabbix.check_auth()
|
||||
except (APIRequestError, ProcessingError) as e:
|
||||
except (APIRequestError, ProcessingError) as e:
|
||||
e = f"Zabbix returned the following error: {str(e)}"
|
||||
logger.error(e)
|
||||
sys.exit(1)
|
||||
# Set API parameter mapping based on API version
|
||||
if not str(zabbix.version).startswith('7'):
|
||||
if not str(zabbix.version).startswith("7"):
|
||||
proxy_name = "host"
|
||||
else:
|
||||
proxy_name = "name"
|
||||
# Get all Zabbix and Netbox data
|
||||
netbox_devices = netbox.dcim.devices.filter(**nb_device_filter)
|
||||
# Get all Zabbix and NetBox data
|
||||
netbox_devices = list(netbox.dcim.devices.filter(**config["nb_device_filter"]))
|
||||
netbox_vms = []
|
||||
if config["sync_vms"]:
|
||||
netbox_vms = list(
|
||||
netbox.virtualization.virtual_machines.filter(**config["nb_vm_filter"]))
|
||||
netbox_site_groups = convert_recordset((netbox.dcim.site_groups.all()))
|
||||
netbox_regions = convert_recordset(netbox.dcim.regions.all())
|
||||
netbox_journals = netbox.extras.journal_entries
|
||||
zabbix_groups = zabbix.hostgroup.get(output=['groupid', 'name'])
|
||||
zabbix_templates = zabbix.template.get(output=['templateid', 'name'])
|
||||
zabbix_proxies = zabbix.proxy.get(output=['proxyid', proxy_name])
|
||||
zabbix_groups = zabbix.hostgroup.get(output=["groupid", "name"])
|
||||
zabbix_templates = zabbix.template.get(output=["templateid", "name"])
|
||||
zabbix_proxies = zabbix.proxy.get(output=["proxyid", proxy_name])
|
||||
# Set empty list for proxy processing Zabbix <= 6
|
||||
zabbix_proxygroups = []
|
||||
if str(zabbix.version).startswith('7'):
|
||||
if str(zabbix.version).startswith("7"):
|
||||
zabbix_proxygroups = zabbix.proxygroup.get(output=["proxy_groupid", "name"])
|
||||
# Sanitize proxy data
|
||||
if proxy_name == "host":
|
||||
for proxy in zabbix_proxies:
|
||||
proxy['name'] = proxy.pop('host')
|
||||
proxy["name"] = proxy.pop("host")
|
||||
# Prepare list of all proxy and proxy_groups
|
||||
zabbix_proxy_list = proxy_prepper(zabbix_proxies, zabbix_proxygroups)
|
||||
|
||||
# Get Netbox API version
|
||||
nb_version = netbox.version
|
||||
# Go through all NetBox devices
|
||||
for nb_vm in netbox_vms:
|
||||
try:
|
||||
vm = VirtualMachine(nb_vm, zabbix, netbox_journals, nb_version,
|
||||
config["create_journal"], logger)
|
||||
logger.debug(f"Host {vm.name}: started operations on VM.")
|
||||
vm.set_vm_template()
|
||||
# Check if a valid template has been found for this VM.
|
||||
if not vm.zbx_template_names:
|
||||
continue
|
||||
vm.set_hostgroup(config["vm_hostgroup_format"],
|
||||
netbox_site_groups, netbox_regions)
|
||||
# Check if a valid hostgroup has been found for this VM.
|
||||
if not vm.hostgroups:
|
||||
continue
|
||||
vm.set_inventory(nb_vm)
|
||||
vm.set_usermacros()
|
||||
vm.set_tags()
|
||||
# Checks if device is in cleanup state
|
||||
if vm.status in config["zabbix_device_removal"]:
|
||||
if vm.zabbix_id:
|
||||
# Delete device from Zabbix
|
||||
# and remove hostID from NetBox.
|
||||
vm.cleanup()
|
||||
logger.debug(f"VM {vm.name}: cleanup complete")
|
||||
continue
|
||||
# Device has been added to NetBox
|
||||
# but is not in Activate state
|
||||
logger.info(
|
||||
f"VM {vm.name}: skipping since this VM is "
|
||||
f"not in the active state."
|
||||
)
|
||||
continue
|
||||
# Check if the VM is in the disabled state
|
||||
if vm.status in config["zabbix_device_disable"]:
|
||||
vm.zabbix_state = 1
|
||||
# Add hostgroup if config is set
|
||||
if config["create_hostgroups"]:
|
||||
# Create new hostgroup. Potentially multiple groups if nested
|
||||
hostgroups = vm.createZabbixHostgroup(zabbix_groups)
|
||||
# go through all newly created hostgroups
|
||||
for group in hostgroups:
|
||||
# Add new hostgroups to zabbix group list
|
||||
zabbix_groups.append(group)
|
||||
# Check if VM is already in Zabbix
|
||||
if vm.zabbix_id:
|
||||
vm.ConsistencyCheck(
|
||||
zabbix_groups,
|
||||
zabbix_templates,
|
||||
zabbix_proxy_list,
|
||||
config["full_proxy_sync"],
|
||||
config["create_hostgroups"],
|
||||
)
|
||||
continue
|
||||
# Add VM to Zabbix
|
||||
vm.createInZabbix(zabbix_groups, zabbix_templates, zabbix_proxy_list)
|
||||
except SyncError:
|
||||
pass
|
||||
|
||||
# Go through all Netbox devices
|
||||
for nb_device in netbox_devices:
|
||||
try:
|
||||
# Set device instance set data such as hostgroup and template information.
|
||||
device = NetworkDevice(nb_device, zabbix, netbox_journals, nb_version,
|
||||
create_journal, logger)
|
||||
device.set_hostgroup(hostgroup_format,netbox_site_groups,netbox_regions)
|
||||
device.set_template(templates_config_context, templates_config_context_overrule)
|
||||
device = PhysicalDevice(nb_device, zabbix, netbox_journals, nb_version,
|
||||
config["create_journal"], logger)
|
||||
logger.debug(f"Host {device.name}: started operations on device.")
|
||||
device.set_template(config["templates_config_context"],
|
||||
config["templates_config_context_overrule"])
|
||||
# Check if a valid template has been found for this VM.
|
||||
if not device.zbx_template_names:
|
||||
continue
|
||||
device.set_hostgroup(
|
||||
config["hostgroup_format"], netbox_site_groups, netbox_regions)
|
||||
# Check if a valid hostgroup has been found for this VM.
|
||||
if not device.hostgroups:
|
||||
continue
|
||||
device.set_inventory(nb_device)
|
||||
device.set_usermacros()
|
||||
device.set_tags()
|
||||
# Checks if device is part of cluster.
|
||||
# Requires clustering variable
|
||||
if device.isCluster() and clustering:
|
||||
if device.isCluster() and config["clustering"]:
|
||||
# Check if device is primary or secondary
|
||||
if device.promoteMasterDevice():
|
||||
e = (f"Device {device.name} is "
|
||||
f"part of cluster and primary.")
|
||||
e = f"Device {device.name}: is " f"part of cluster and primary."
|
||||
logger.info(e)
|
||||
else:
|
||||
# Device is secondary in cluster.
|
||||
# Don't continue with this device.
|
||||
e = (f"Device {device.name} is part of cluster "
|
||||
f"but not primary. Skipping this host...")
|
||||
e = (
|
||||
f"Device {device.name}: is part of cluster "
|
||||
f"but not primary. Skipping this host..."
|
||||
)
|
||||
logger.info(e)
|
||||
continue
|
||||
# Checks if device is in cleanup state
|
||||
if device.status in zabbix_device_removal:
|
||||
if device.status in config["zabbix_device_removal"]:
|
||||
if device.zabbix_id:
|
||||
# Delete device from Zabbix
|
||||
# and remove hostID from Netbox.
|
||||
# and remove hostID from NetBox.
|
||||
device.cleanup()
|
||||
logger.info(f"Cleaned up host {device.name}.")
|
||||
logger.info(f"Device {device.name}: cleanup complete")
|
||||
continue
|
||||
# Device has been added to Netbox
|
||||
# Device has been added to NetBox
|
||||
# but is not in Activate state
|
||||
logger.info(f"Skipping host {device.name} since its "
|
||||
f"not in the active state.")
|
||||
logger.info(
|
||||
f"Device {device.name}: skipping since this device is "
|
||||
f"not in the active state."
|
||||
)
|
||||
continue
|
||||
# Check if the device is in the disabled state
|
||||
if device.status in zabbix_device_disable:
|
||||
if device.status in config["zabbix_device_disable"]:
|
||||
device.zabbix_state = 1
|
||||
# Add hostgroup is config is set
|
||||
if config["create_hostgroups"]:
|
||||
# Create new hostgroup. Potentially multiple groups if nested
|
||||
hostgroups = device.createZabbixHostgroup(zabbix_groups)
|
||||
# go through all newly created hostgroups
|
||||
for group in hostgroups:
|
||||
# Add new hostgroups to zabbix group list
|
||||
zabbix_groups.append(group)
|
||||
# Check if device is already in Zabbix
|
||||
if device.zabbix_id:
|
||||
device.ConsistencyCheck(zabbix_groups, zabbix_templates,
|
||||
zabbix_proxy_list, full_proxy_sync,
|
||||
create_hostgroups)
|
||||
device.ConsistencyCheck(
|
||||
zabbix_groups,
|
||||
zabbix_templates,
|
||||
zabbix_proxy_list,
|
||||
config["full_proxy_sync"],
|
||||
config["create_hostgroups"],
|
||||
)
|
||||
continue
|
||||
# Add hostgroup is config is set
|
||||
# and Hostgroup is not present in Zabbix
|
||||
if create_hostgroups:
|
||||
for group in zabbix_groups:
|
||||
# If hostgroup is already present in Zabbix
|
||||
if group["name"] == device.hostgroup:
|
||||
break
|
||||
else:
|
||||
# Create new hostgroup
|
||||
hostgroup = device.createZabbixHostgroup()
|
||||
zabbix_groups.append(hostgroup)
|
||||
# Add device to Zabbix
|
||||
device.createInZabbix(zabbix_groups, zabbix_templates,
|
||||
zabbix_proxy_list)
|
||||
device.createInZabbix(zabbix_groups, zabbix_templates, zabbix_proxy_list)
|
||||
except SyncError:
|
||||
pass
|
||||
zabbix.logout()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description='A script to sync Zabbix with Netbox device data.'
|
||||
description="A script to sync Zabbix with NetBox device data."
|
||||
)
|
||||
parser.add_argument("-v", "--verbose", help="Turn on debugging.",
|
||||
action="store_true")
|
||||
parser.add_argument(
|
||||
"-v", "--verbose", help="Turn on debugging.", action="store_true"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-vv", "--debug", help="Turn on debugging.", action="store_true"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-vvv",
|
||||
"--debug-all",
|
||||
help="Turn on debugging for all modules.",
|
||||
action="store_true",
|
||||
)
|
||||
parser.add_argument("-q", "--quiet", help="Turn off warnings.", action="store_true")
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
|
@ -1,2 +1,2 @@
|
||||
pynetbox
|
||||
zabbix_utils
|
||||
pynetbox==7.4.1
|
||||
zabbix-utils==2.0.2
|
||||
|
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
139
tests/test_configuration_parsing.py
Normal file
139
tests/test_configuration_parsing.py
Normal file
@ -0,0 +1,139 @@
|
||||
"""Tests for configuration parsing in the modules.config module."""
|
||||
from unittest.mock import patch, MagicMock
|
||||
import os
|
||||
from modules.config import load_config, DEFAULT_CONFIG, load_config_file, load_env_variable
|
||||
|
||||
|
||||
def test_load_config_defaults():
|
||||
"""Test that load_config returns default values when no config file or env vars are present"""
|
||||
with patch('modules.config.load_config_file', return_value=DEFAULT_CONFIG.copy()), \
|
||||
patch('modules.config.load_env_variable', return_value=None):
|
||||
config = load_config()
|
||||
assert config == DEFAULT_CONFIG
|
||||
assert config["templates_config_context"] is False
|
||||
assert config["create_hostgroups"] is True
|
||||
|
||||
|
||||
def test_load_config_file():
|
||||
"""Test that load_config properly loads values from config file"""
|
||||
mock_config = DEFAULT_CONFIG.copy()
|
||||
mock_config["templates_config_context"] = True
|
||||
mock_config["sync_vms"] = True
|
||||
|
||||
with patch('modules.config.load_config_file', return_value=mock_config), \
|
||||
patch('modules.config.load_env_variable', return_value=None):
|
||||
config = load_config()
|
||||
assert config["templates_config_context"] is True
|
||||
assert config["sync_vms"] is True
|
||||
# Unchanged values should remain as defaults
|
||||
assert config["create_journal"] is False
|
||||
|
||||
|
||||
def test_load_env_variables():
|
||||
"""Test that load_config properly loads values from environment variables"""
|
||||
# Mock env variable loading to return values for specific keys
|
||||
def mock_load_env(key):
|
||||
if key == "sync_vms":
|
||||
return True
|
||||
if key == "create_journal":
|
||||
return True
|
||||
return None
|
||||
|
||||
with patch('modules.config.load_config_file', return_value=DEFAULT_CONFIG.copy()), \
|
||||
patch('modules.config.load_env_variable', side_effect=mock_load_env):
|
||||
config = load_config()
|
||||
assert config["sync_vms"] is True
|
||||
assert config["create_journal"] is True
|
||||
# Unchanged values should remain as defaults
|
||||
assert config["templates_config_context"] is False
|
||||
|
||||
|
||||
def test_env_vars_override_config_file():
|
||||
"""Test that environment variables override values from config file"""
|
||||
mock_config = DEFAULT_CONFIG.copy()
|
||||
mock_config["templates_config_context"] = True
|
||||
mock_config["sync_vms"] = False
|
||||
|
||||
# Mock env variable that will override the config file value
|
||||
def mock_load_env(key):
|
||||
if key == "sync_vms":
|
||||
return True
|
||||
return None
|
||||
|
||||
with patch('modules.config.load_config_file', return_value=mock_config), \
|
||||
patch('modules.config.load_env_variable', side_effect=mock_load_env):
|
||||
config = load_config()
|
||||
# This should be overridden by the env var
|
||||
assert config["sync_vms"] is True
|
||||
# This should remain from the config file
|
||||
assert config["templates_config_context"] is True
|
||||
|
||||
|
||||
def test_load_config_file_function():
|
||||
"""Test the load_config_file function directly"""
|
||||
# Test when the file exists
|
||||
with patch('pathlib.Path.exists', return_value=True), \
|
||||
patch('importlib.util.spec_from_file_location') as mock_spec:
|
||||
# Setup the mock module with attributes
|
||||
mock_module = MagicMock()
|
||||
mock_module.templates_config_context = True
|
||||
mock_module.sync_vms = True
|
||||
|
||||
# Setup the mock spec
|
||||
mock_spec_instance = MagicMock()
|
||||
mock_spec.return_value = mock_spec_instance
|
||||
mock_spec_instance.loader.exec_module = lambda x: None
|
||||
|
||||
# Patch module_from_spec to return our mock module
|
||||
with patch('importlib.util.module_from_spec', return_value=mock_module):
|
||||
config = load_config_file(DEFAULT_CONFIG.copy())
|
||||
assert config["templates_config_context"] is True
|
||||
assert config["sync_vms"] is True
|
||||
|
||||
|
||||
def test_load_config_file_not_found():
|
||||
"""Test load_config_file when the config file doesn't exist"""
|
||||
with patch('pathlib.Path.exists', return_value=False):
|
||||
result = load_config_file(DEFAULT_CONFIG.copy())
|
||||
# Should return a dict equal to DEFAULT_CONFIG, not a new object
|
||||
assert result == DEFAULT_CONFIG
|
||||
|
||||
|
||||
def test_load_env_variable_function():
|
||||
"""Test the load_env_variable function directly"""
|
||||
# Create a real environment variable for testing with correct prefix and uppercase
|
||||
test_var = "NBZX_TEMPLATES_CONFIG_CONTEXT"
|
||||
original_env = os.environ.get(test_var, None)
|
||||
try:
|
||||
# Set the environment variable with the proper prefix and case
|
||||
os.environ[test_var] = "True"
|
||||
|
||||
# Test that it's properly read (using lowercase in the function call)
|
||||
value = load_env_variable("templates_config_context")
|
||||
assert value == "True"
|
||||
|
||||
# Test when the environment variable doesn't exist
|
||||
value = load_env_variable("nonexistent_variable")
|
||||
assert value is None
|
||||
finally:
|
||||
# Clean up - restore original environment
|
||||
if original_env is not None:
|
||||
os.environ[test_var] = original_env
|
||||
else:
|
||||
os.environ.pop(test_var, None)
|
||||
|
||||
|
||||
def test_load_config_file_exception_handling():
|
||||
"""Test that load_config_file handles exceptions gracefully"""
|
||||
# This test requires modifying the load_config_file function to handle exceptions
|
||||
# For now, we're just checking that an exception is raised
|
||||
with patch('pathlib.Path.exists', return_value=True), \
|
||||
patch('importlib.util.spec_from_file_location', side_effect=Exception("Import error")):
|
||||
# Since the current implementation doesn't handle exceptions, we should
|
||||
# expect an exception to be raised
|
||||
try:
|
||||
load_config_file(DEFAULT_CONFIG.copy())
|
||||
assert False, "An exception should have been raised"
|
||||
except Exception: # pylint: disable=broad-except
|
||||
# This is expected
|
||||
pass
|
166
tests/test_device_deletion.py
Normal file
166
tests/test_device_deletion.py
Normal file
@ -0,0 +1,166 @@
|
||||
"""Tests for device deletion functionality in the PhysicalDevice class."""
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from zabbix_utils import APIRequestError
|
||||
from modules.device import PhysicalDevice
|
||||
from modules.exceptions import SyncExternalError
|
||||
|
||||
|
||||
class TestDeviceDeletion(unittest.TestCase):
|
||||
"""Test class for device deletion functionality."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures."""
|
||||
# Create mock NetBox device
|
||||
self.mock_nb_device = MagicMock()
|
||||
self.mock_nb_device.id = 123
|
||||
self.mock_nb_device.name = "test-device"
|
||||
self.mock_nb_device.status.label = "Decommissioning"
|
||||
self.mock_nb_device.custom_fields = {"zabbix_hostid": "456"}
|
||||
self.mock_nb_device.config_context = {}
|
||||
|
||||
# Set up a primary IP
|
||||
primary_ip = MagicMock()
|
||||
primary_ip.address = "192.168.1.1/24"
|
||||
self.mock_nb_device.primary_ip = primary_ip
|
||||
|
||||
# Create mock Zabbix API
|
||||
self.mock_zabbix = MagicMock()
|
||||
self.mock_zabbix.version = "6.0"
|
||||
|
||||
# Set up mock host.get response
|
||||
self.mock_zabbix.host.get.return_value = [{"hostid": "456"}]
|
||||
|
||||
# Mock NetBox journal class
|
||||
self.mock_nb_journal = MagicMock()
|
||||
|
||||
# Create logger mock
|
||||
self.mock_logger = MagicMock()
|
||||
|
||||
# Create PhysicalDevice instance with mocks
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
self.device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
journal=True,
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
def test_cleanup_successful_deletion(self):
|
||||
"""Test successful device deletion from Zabbix."""
|
||||
# Setup
|
||||
self.mock_zabbix.host.get.return_value = [{"hostid": "456"}]
|
||||
self.mock_zabbix.host.delete.return_value = {"hostids": ["456"]}
|
||||
|
||||
# Execute
|
||||
self.device.cleanup()
|
||||
|
||||
# Verify
|
||||
self.mock_zabbix.host.get.assert_called_once_with(filter={'hostid': '456'}, output=[])
|
||||
self.mock_zabbix.host.delete.assert_called_once_with('456')
|
||||
self.mock_nb_device.save.assert_called_once()
|
||||
self.assertIsNone(self.mock_nb_device.custom_fields["zabbix_hostid"])
|
||||
self.mock_logger.info.assert_called_with(f"Host {self.device.name}: "
|
||||
"Deleted host from Zabbix.")
|
||||
|
||||
def test_cleanup_device_already_deleted(self):
|
||||
"""Test cleanup when device is already deleted from Zabbix."""
|
||||
# Setup
|
||||
self.mock_zabbix.host.get.return_value = [] # Empty list means host not found
|
||||
|
||||
# Execute
|
||||
self.device.cleanup()
|
||||
|
||||
# Verify
|
||||
self.mock_zabbix.host.get.assert_called_once_with(filter={'hostid': '456'}, output=[])
|
||||
self.mock_zabbix.host.delete.assert_not_called()
|
||||
self.mock_nb_device.save.assert_called_once()
|
||||
self.assertIsNone(self.mock_nb_device.custom_fields["zabbix_hostid"])
|
||||
self.mock_logger.info.assert_called_with(
|
||||
f"Host {self.device.name}: was already deleted from Zabbix. Removed link in NetBox.")
|
||||
|
||||
def test_cleanup_api_error(self):
|
||||
"""Test cleanup when Zabbix API returns an error."""
|
||||
# Setup
|
||||
self.mock_zabbix.host.get.return_value = [{"hostid": "456"}]
|
||||
self.mock_zabbix.host.delete.side_effect = APIRequestError("API Error")
|
||||
|
||||
# Execute and verify
|
||||
with self.assertRaises(SyncExternalError):
|
||||
self.device.cleanup()
|
||||
|
||||
# Verify correct calls were made
|
||||
self.mock_zabbix.host.get.assert_called_once_with(filter={'hostid': '456'}, output=[])
|
||||
self.mock_zabbix.host.delete.assert_called_once_with('456')
|
||||
self.mock_nb_device.save.assert_not_called()
|
||||
self.mock_logger.error.assert_called()
|
||||
|
||||
def test_zeroize_cf(self):
|
||||
"""Test _zeroize_cf method that clears the custom field."""
|
||||
# Execute
|
||||
self.device._zeroize_cf() # pylint: disable=protected-access
|
||||
|
||||
# Verify
|
||||
self.assertIsNone(self.mock_nb_device.custom_fields["zabbix_hostid"])
|
||||
self.mock_nb_device.save.assert_called_once()
|
||||
|
||||
def test_create_journal_entry(self):
|
||||
"""Test create_journal_entry method."""
|
||||
# Setup
|
||||
test_message = "Test journal entry"
|
||||
|
||||
# Execute
|
||||
result = self.device.create_journal_entry("info", test_message)
|
||||
|
||||
# Verify
|
||||
self.assertTrue(result)
|
||||
self.mock_nb_journal.create.assert_called_once()
|
||||
journal_entry = self.mock_nb_journal.create.call_args[0][0]
|
||||
self.assertEqual(journal_entry["assigned_object_type"], "dcim.device")
|
||||
self.assertEqual(journal_entry["assigned_object_id"], 123)
|
||||
self.assertEqual(journal_entry["kind"], "info")
|
||||
self.assertEqual(journal_entry["comments"], test_message)
|
||||
|
||||
def test_create_journal_entry_invalid_severity(self):
|
||||
"""Test create_journal_entry with invalid severity."""
|
||||
# Execute
|
||||
result = self.device.create_journal_entry("invalid", "Test message")
|
||||
|
||||
# Verify
|
||||
self.assertFalse(result)
|
||||
self.mock_nb_journal.create.assert_not_called()
|
||||
self.mock_logger.warning.assert_called()
|
||||
|
||||
def test_create_journal_entry_when_disabled(self):
|
||||
"""Test create_journal_entry when journaling is disabled."""
|
||||
# Setup - create device with journal=False
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
journal=False, # Disable journaling
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Execute
|
||||
result = device.create_journal_entry("info", "Test message")
|
||||
|
||||
# Verify
|
||||
self.assertFalse(result)
|
||||
self.mock_nb_journal.create.assert_not_called()
|
||||
|
||||
def test_cleanup_updates_journal(self):
|
||||
"""Test that cleanup method creates a journal entry."""
|
||||
# Setup
|
||||
self.mock_zabbix.host.get.return_value = [{"hostid": "456"}]
|
||||
|
||||
# Execute
|
||||
with patch.object(self.device, 'create_journal_entry') as mock_journal_entry:
|
||||
self.device.cleanup()
|
||||
|
||||
# Verify
|
||||
mock_journal_entry.assert_called_once_with("warning", "Deleted host from Zabbix")
|
340
tests/test_hostgroups.py
Normal file
340
tests/test_hostgroups.py
Normal file
@ -0,0 +1,340 @@
|
||||
"""Tests for the Hostgroup class in the hostgroups module."""
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch, call
|
||||
from modules.hostgroups import Hostgroup
|
||||
from modules.exceptions import HostgroupError
|
||||
|
||||
|
||||
class TestHostgroups(unittest.TestCase):
|
||||
"""Test class for Hostgroup functionality."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures."""
|
||||
# Create mock logger
|
||||
self.mock_logger = MagicMock()
|
||||
|
||||
# *** Mock NetBox Device setup ***
|
||||
# Create mock device with all properties
|
||||
self.mock_device = MagicMock()
|
||||
self.mock_device.name = "test-device"
|
||||
|
||||
# Set up site information
|
||||
site = MagicMock()
|
||||
site.name = "TestSite"
|
||||
|
||||
# Set up region information
|
||||
region = MagicMock()
|
||||
region.name = "TestRegion"
|
||||
# Ensure region string representation returns the name
|
||||
region.__str__.return_value = "TestRegion"
|
||||
site.region = region
|
||||
|
||||
# Set up site group information
|
||||
site_group = MagicMock()
|
||||
site_group.name = "TestSiteGroup"
|
||||
# Ensure site group string representation returns the name
|
||||
site_group.__str__.return_value = "TestSiteGroup"
|
||||
site.group = site_group
|
||||
|
||||
self.mock_device.site = site
|
||||
|
||||
# Set up role information (varies based on NetBox version)
|
||||
self.mock_device_role = MagicMock()
|
||||
self.mock_device_role.name = "TestRole"
|
||||
# Ensure string representation returns the name
|
||||
self.mock_device_role.__str__.return_value = "TestRole"
|
||||
self.mock_device.device_role = self.mock_device_role
|
||||
self.mock_device.role = self.mock_device_role
|
||||
|
||||
# Set up tenant information
|
||||
tenant = MagicMock()
|
||||
tenant.name = "TestTenant"
|
||||
# Ensure tenant string representation returns the name
|
||||
tenant.__str__.return_value = "TestTenant"
|
||||
tenant_group = MagicMock()
|
||||
tenant_group.name = "TestTenantGroup"
|
||||
# Ensure tenant group string representation returns the name
|
||||
tenant_group.__str__.return_value = "TestTenantGroup"
|
||||
tenant.group = tenant_group
|
||||
self.mock_device.tenant = tenant
|
||||
|
||||
# Set up platform information
|
||||
platform = MagicMock()
|
||||
platform.name = "TestPlatform"
|
||||
self.mock_device.platform = platform
|
||||
|
||||
# Device-specific properties
|
||||
device_type = MagicMock()
|
||||
manufacturer = MagicMock()
|
||||
manufacturer.name = "TestManufacturer"
|
||||
device_type.manufacturer = manufacturer
|
||||
self.mock_device.device_type = device_type
|
||||
|
||||
location = MagicMock()
|
||||
location.name = "TestLocation"
|
||||
# Ensure location string representation returns the name
|
||||
location.__str__.return_value = "TestLocation"
|
||||
self.mock_device.location = location
|
||||
|
||||
# Custom fields
|
||||
self.mock_device.custom_fields = {"test_cf": "TestCF"}
|
||||
|
||||
# *** Mock NetBox VM setup ***
|
||||
# Create mock VM with all properties
|
||||
self.mock_vm = MagicMock()
|
||||
self.mock_vm.name = "test-vm"
|
||||
|
||||
# Reuse site from device
|
||||
self.mock_vm.site = site
|
||||
|
||||
# Set up role for VM
|
||||
self.mock_vm.role = self.mock_device_role
|
||||
|
||||
# Set up tenant for VM (same as device)
|
||||
self.mock_vm.tenant = tenant
|
||||
|
||||
# Set up platform for VM (same as device)
|
||||
self.mock_vm.platform = platform
|
||||
|
||||
# VM-specific properties
|
||||
cluster = MagicMock()
|
||||
cluster.name = "TestCluster"
|
||||
cluster_type = MagicMock()
|
||||
cluster_type.name = "TestClusterType"
|
||||
cluster.type = cluster_type
|
||||
self.mock_vm.cluster = cluster
|
||||
|
||||
# Custom fields
|
||||
self.mock_vm.custom_fields = {"test_cf": "TestCF"}
|
||||
|
||||
# Mock data for nesting tests
|
||||
self.mock_regions_data = [
|
||||
{"name": "ParentRegion", "parent": None, "_depth": 0},
|
||||
{"name": "TestRegion", "parent": "ParentRegion", "_depth": 1}
|
||||
]
|
||||
|
||||
self.mock_groups_data = [
|
||||
{"name": "ParentSiteGroup", "parent": None, "_depth": 0},
|
||||
{"name": "TestSiteGroup", "parent": "ParentSiteGroup", "_depth": 1}
|
||||
]
|
||||
|
||||
def test_device_hostgroup_creation(self):
|
||||
"""Test basic device hostgroup creation."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Test the string representation
|
||||
self.assertEqual(str(hostgroup), "Hostgroup for dev test-device")
|
||||
|
||||
# Check format options were set correctly
|
||||
self.assertEqual(hostgroup.format_options["site"], "TestSite")
|
||||
self.assertEqual(hostgroup.format_options["region"], "TestRegion")
|
||||
self.assertEqual(hostgroup.format_options["site_group"], "TestSiteGroup")
|
||||
self.assertEqual(hostgroup.format_options["role"], "TestRole")
|
||||
self.assertEqual(hostgroup.format_options["tenant"], "TestTenant")
|
||||
self.assertEqual(hostgroup.format_options["tenant_group"], "TestTenantGroup")
|
||||
self.assertEqual(hostgroup.format_options["platform"], "TestPlatform")
|
||||
self.assertEqual(hostgroup.format_options["manufacturer"], "TestManufacturer")
|
||||
self.assertEqual(hostgroup.format_options["location"], "TestLocation")
|
||||
|
||||
def test_vm_hostgroup_creation(self):
|
||||
"""Test basic VM hostgroup creation."""
|
||||
hostgroup = Hostgroup("vm", self.mock_vm, "4.0", self.mock_logger)
|
||||
|
||||
# Test the string representation
|
||||
self.assertEqual(str(hostgroup), "Hostgroup for vm test-vm")
|
||||
|
||||
# Check format options were set correctly
|
||||
self.assertEqual(hostgroup.format_options["site"], "TestSite")
|
||||
self.assertEqual(hostgroup.format_options["region"], "TestRegion")
|
||||
self.assertEqual(hostgroup.format_options["site_group"], "TestSiteGroup")
|
||||
self.assertEqual(hostgroup.format_options["role"], "TestRole")
|
||||
self.assertEqual(hostgroup.format_options["tenant"], "TestTenant")
|
||||
self.assertEqual(hostgroup.format_options["tenant_group"], "TestTenantGroup")
|
||||
self.assertEqual(hostgroup.format_options["platform"], "TestPlatform")
|
||||
self.assertEqual(hostgroup.format_options["cluster"], "TestCluster")
|
||||
self.assertEqual(hostgroup.format_options["cluster_type"], "TestClusterType")
|
||||
|
||||
def test_invalid_object_type(self):
|
||||
"""Test that an invalid object type raises an exception."""
|
||||
with self.assertRaises(HostgroupError):
|
||||
Hostgroup("invalid", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
def test_device_hostgroup_formats(self):
|
||||
"""Test different hostgroup formats for devices."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Default format: site/manufacturer/role
|
||||
default_result = hostgroup.generate()
|
||||
self.assertEqual(default_result, "TestSite/TestManufacturer/TestRole")
|
||||
|
||||
# Custom format: site/region
|
||||
custom_result = hostgroup.generate("site/region")
|
||||
self.assertEqual(custom_result, "TestSite/TestRegion")
|
||||
|
||||
# Custom format: site/tenant/platform/location
|
||||
complex_result = hostgroup.generate("site/tenant/platform/location")
|
||||
self.assertEqual(complex_result, "TestSite/TestTenant/TestPlatform/TestLocation")
|
||||
|
||||
def test_vm_hostgroup_formats(self):
|
||||
"""Test different hostgroup formats for VMs."""
|
||||
hostgroup = Hostgroup("vm", self.mock_vm, "4.0", self.mock_logger)
|
||||
|
||||
# Default format: cluster/role
|
||||
default_result = hostgroup.generate()
|
||||
self.assertEqual(default_result, "TestCluster/TestRole")
|
||||
|
||||
# Custom format: site/tenant
|
||||
custom_result = hostgroup.generate("site/tenant")
|
||||
self.assertEqual(custom_result, "TestSite/TestTenant")
|
||||
|
||||
# Custom format: cluster/cluster_type/platform
|
||||
complex_result = hostgroup.generate("cluster/cluster_type/platform")
|
||||
self.assertEqual(complex_result, "TestCluster/TestClusterType/TestPlatform")
|
||||
|
||||
def test_device_netbox_version_differences(self):
|
||||
"""Test hostgroup generation with different NetBox versions."""
|
||||
# NetBox v2.x
|
||||
hostgroup_v2 = Hostgroup("dev", self.mock_device, "2.11", self.mock_logger)
|
||||
self.assertEqual(hostgroup_v2.format_options["role"], "TestRole")
|
||||
|
||||
# NetBox v3.x
|
||||
hostgroup_v3 = Hostgroup("dev", self.mock_device, "3.5", self.mock_logger)
|
||||
self.assertEqual(hostgroup_v3.format_options["role"], "TestRole")
|
||||
|
||||
# NetBox v4.x (already tested in other methods)
|
||||
|
||||
def test_custom_field_lookup(self):
|
||||
"""Test custom field lookup functionality."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Test custom field exists and is populated
|
||||
cf_result = hostgroup.custom_field_lookup("test_cf")
|
||||
self.assertTrue(cf_result["result"])
|
||||
self.assertEqual(cf_result["cf"], "TestCF")
|
||||
|
||||
# Test custom field doesn't exist
|
||||
cf_result = hostgroup.custom_field_lookup("nonexistent_cf")
|
||||
self.assertFalse(cf_result["result"])
|
||||
self.assertIsNone(cf_result["cf"])
|
||||
|
||||
def test_hostgroup_with_custom_field(self):
|
||||
"""Test hostgroup generation including a custom field."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Generate with custom field included
|
||||
result = hostgroup.generate("site/test_cf/role")
|
||||
self.assertEqual(result, "TestSite/TestCF/TestRole")
|
||||
|
||||
def test_missing_hostgroup_format_item(self):
|
||||
"""Test handling of missing hostgroup format items."""
|
||||
# Create a device with minimal attributes
|
||||
minimal_device = MagicMock()
|
||||
minimal_device.name = "minimal-device"
|
||||
minimal_device.site = None
|
||||
minimal_device.tenant = None
|
||||
minimal_device.platform = None
|
||||
minimal_device.custom_fields = {}
|
||||
|
||||
# Create role
|
||||
role = MagicMock()
|
||||
role.name = "MinimalRole"
|
||||
minimal_device.role = role
|
||||
|
||||
# Create device_type with manufacturer
|
||||
device_type = MagicMock()
|
||||
manufacturer = MagicMock()
|
||||
manufacturer.name = "MinimalManufacturer"
|
||||
device_type.manufacturer = manufacturer
|
||||
minimal_device.device_type = device_type
|
||||
|
||||
# Create hostgroup
|
||||
hostgroup = Hostgroup("dev", minimal_device, "4.0", self.mock_logger)
|
||||
|
||||
# Generate with default format
|
||||
result = hostgroup.generate()
|
||||
# Site is missing, so only manufacturer and role should be included
|
||||
self.assertEqual(result, "MinimalManufacturer/MinimalRole")
|
||||
|
||||
# Test with invalid format
|
||||
with self.assertRaises(HostgroupError):
|
||||
hostgroup.generate("site/nonexistent/role")
|
||||
|
||||
def test_hostgroup_missing_required_attributes(self):
|
||||
"""Test handling when no valid hostgroup can be generated."""
|
||||
# Create a VM with minimal attributes that won't satisfy any format
|
||||
minimal_vm = MagicMock()
|
||||
minimal_vm.name = "minimal-vm"
|
||||
minimal_vm.site = None
|
||||
minimal_vm.tenant = None
|
||||
minimal_vm.platform = None
|
||||
minimal_vm.role = None
|
||||
minimal_vm.cluster = None
|
||||
minimal_vm.custom_fields = {}
|
||||
|
||||
hostgroup = Hostgroup("vm", minimal_vm, "4.0", self.mock_logger)
|
||||
|
||||
# With default format of cluster/role, both are None, so should raise an error
|
||||
with self.assertRaises(HostgroupError):
|
||||
hostgroup.generate()
|
||||
|
||||
def test_nested_region_hostgroups(self):
|
||||
"""Test hostgroup generation with nested regions."""
|
||||
# Mock the build_path function to return a predictable result
|
||||
with patch('modules.hostgroups.build_path') as mock_build_path:
|
||||
# Configure the mock to return a list of regions in the path
|
||||
mock_build_path.return_value = ["ParentRegion", "TestRegion"]
|
||||
|
||||
# Create hostgroup with nested regions enabled
|
||||
hostgroup = Hostgroup(
|
||||
"dev",
|
||||
self.mock_device,
|
||||
"4.0",
|
||||
self.mock_logger,
|
||||
nested_region_flag=True,
|
||||
nb_regions=self.mock_regions_data
|
||||
)
|
||||
|
||||
# Generate hostgroup with region
|
||||
result = hostgroup.generate("site/region/role")
|
||||
# Should include the parent region
|
||||
self.assertEqual(result, "TestSite/ParentRegion/TestRegion/TestRole")
|
||||
|
||||
def test_nested_sitegroup_hostgroups(self):
|
||||
"""Test hostgroup generation with nested site groups."""
|
||||
# Mock the build_path function to return a predictable result
|
||||
with patch('modules.hostgroups.build_path') as mock_build_path:
|
||||
# Configure the mock to return a list of site groups in the path
|
||||
mock_build_path.return_value = ["ParentSiteGroup", "TestSiteGroup"]
|
||||
|
||||
# Create hostgroup with nested site groups enabled
|
||||
hostgroup = Hostgroup(
|
||||
"dev",
|
||||
self.mock_device,
|
||||
"4.0",
|
||||
self.mock_logger,
|
||||
nested_sitegroup_flag=True,
|
||||
nb_groups=self.mock_groups_data
|
||||
)
|
||||
|
||||
# Generate hostgroup with site_group
|
||||
result = hostgroup.generate("site/site_group/role")
|
||||
# Should include the parent site group
|
||||
self.assertEqual(result, "TestSite/ParentSiteGroup/TestSiteGroup/TestRole")
|
||||
|
||||
|
||||
def test_list_formatoptions(self):
|
||||
"""Test the list_formatoptions method for debugging."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Patch sys.stdout to capture print output
|
||||
with patch('sys.stdout') as mock_stdout:
|
||||
hostgroup.list_formatoptions()
|
||||
|
||||
# Check that print was called with expected output
|
||||
calls = [call.write(f"The following options are available for host test-device"),
|
||||
call.write('\n')]
|
||||
mock_stdout.assert_has_calls(calls, any_order=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
247
tests/test_interface.py
Normal file
247
tests/test_interface.py
Normal file
@ -0,0 +1,247 @@
|
||||
"""Tests for the ZabbixInterface class in the interface module."""
|
||||
import unittest
|
||||
from modules.interface import ZabbixInterface
|
||||
from modules.exceptions import InterfaceConfigError
|
||||
|
||||
|
||||
class TestZabbixInterface(unittest.TestCase):
|
||||
"""Test class for ZabbixInterface functionality."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures."""
|
||||
self.test_ip = "192.168.1.1"
|
||||
self.empty_context = {}
|
||||
self.default_interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
|
||||
# Create some test contexts for different scenarios
|
||||
self.snmpv2_context = {
|
||||
"zabbix": {
|
||||
"interface_type": 2,
|
||||
"interface_port": "161",
|
||||
"snmp": {
|
||||
"version": 2,
|
||||
"community": "public",
|
||||
"bulk": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.snmpv3_context = {
|
||||
"zabbix": {
|
||||
"interface_type": 2,
|
||||
"snmp": {
|
||||
"version": 3,
|
||||
"securityname": "snmpuser",
|
||||
"securitylevel": "authPriv",
|
||||
"authprotocol": "SHA",
|
||||
"authpassphrase": "authpass123",
|
||||
"privprotocol": "AES",
|
||||
"privpassphrase": "privpass123",
|
||||
"contextname": "context1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.agent_context = {
|
||||
"zabbix": {
|
||||
"interface_type": 1,
|
||||
"interface_port": "10050"
|
||||
}
|
||||
}
|
||||
|
||||
def test_init(self):
|
||||
"""Test initialization of ZabbixInterface."""
|
||||
interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
|
||||
# Check basic properties
|
||||
self.assertEqual(interface.ip, self.test_ip)
|
||||
self.assertEqual(interface.context, self.empty_context)
|
||||
self.assertEqual(interface.interface["ip"], self.test_ip)
|
||||
self.assertEqual(interface.interface["main"], "1")
|
||||
self.assertEqual(interface.interface["useip"], "1")
|
||||
self.assertEqual(interface.interface["dns"], "")
|
||||
|
||||
def test_get_context_empty(self):
|
||||
"""Test get_context with empty context."""
|
||||
interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
result = interface.get_context()
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_get_context_with_interface_type(self):
|
||||
"""Test get_context with interface_type but no port."""
|
||||
context = {"zabbix": {"interface_type": 2}}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
|
||||
# Should set type and default port
|
||||
result = interface.get_context()
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(interface.interface["type"], 2)
|
||||
self.assertEqual(interface.interface["port"], "161") # Default port for SNMP
|
||||
|
||||
def test_get_context_with_interface_type_and_port(self):
|
||||
"""Test get_context with both interface_type and port."""
|
||||
context = {"zabbix": {"interface_type": 1, "interface_port": "12345"}}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
|
||||
# Should set type and specified port
|
||||
result = interface.get_context()
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(interface.interface["type"], 1)
|
||||
self.assertEqual(interface.interface["port"], "12345")
|
||||
|
||||
def test_set_default_port(self):
|
||||
"""Test _set_default_port for different interface types."""
|
||||
interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
|
||||
# Test for agent type (1)
|
||||
interface.interface["type"] = 1
|
||||
interface._set_default_port() # pylint: disable=protected-access
|
||||
self.assertEqual(interface.interface["port"], "10050")
|
||||
|
||||
# Test for SNMP type (2)
|
||||
interface.interface["type"] = 2
|
||||
interface._set_default_port() # pylint: disable=protected-access
|
||||
self.assertEqual(interface.interface["port"], "161")
|
||||
|
||||
# Test for IPMI type (3)
|
||||
interface.interface["type"] = 3
|
||||
interface._set_default_port() # pylint: disable=protected-access
|
||||
self.assertEqual(interface.interface["port"], "623")
|
||||
|
||||
# Test for JMX type (4)
|
||||
interface.interface["type"] = 4
|
||||
interface._set_default_port() # pylint: disable=protected-access
|
||||
self.assertEqual(interface.interface["port"], "12345")
|
||||
|
||||
# Test for unsupported type
|
||||
interface.interface["type"] = 99
|
||||
result = interface._set_default_port() # pylint: disable=protected-access
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_set_snmp_v2(self):
|
||||
"""Test set_snmp with SNMPv2 configuration."""
|
||||
interface = ZabbixInterface(self.snmpv2_context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp
|
||||
interface.set_snmp()
|
||||
|
||||
# Check SNMP details
|
||||
self.assertEqual(interface.interface["details"]["version"], "2")
|
||||
self.assertEqual(interface.interface["details"]["community"], "public")
|
||||
self.assertEqual(interface.interface["details"]["bulk"], "1")
|
||||
|
||||
def test_set_snmp_v3(self):
|
||||
"""Test set_snmp with SNMPv3 configuration."""
|
||||
interface = ZabbixInterface(self.snmpv3_context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp
|
||||
interface.set_snmp()
|
||||
|
||||
# Check SNMP details
|
||||
self.assertEqual(interface.interface["details"]["version"], "3")
|
||||
self.assertEqual(interface.interface["details"]["securityname"], "snmpuser")
|
||||
self.assertEqual(interface.interface["details"]["securitylevel"], "authPriv")
|
||||
self.assertEqual(interface.interface["details"]["authprotocol"], "SHA")
|
||||
self.assertEqual(interface.interface["details"]["authpassphrase"], "authpass123")
|
||||
self.assertEqual(interface.interface["details"]["privprotocol"], "AES")
|
||||
self.assertEqual(interface.interface["details"]["privpassphrase"], "privpass123")
|
||||
self.assertEqual(interface.interface["details"]["contextname"], "context1")
|
||||
|
||||
def test_set_snmp_no_snmp_config(self):
|
||||
"""Test set_snmp with missing SNMP configuration."""
|
||||
# Create context with interface type but no SNMP config
|
||||
context = {"zabbix": {"interface_type": 2}}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp - should raise exception
|
||||
with self.assertRaises(InterfaceConfigError):
|
||||
interface.set_snmp()
|
||||
|
||||
def test_set_snmp_unsupported_version(self):
|
||||
"""Test set_snmp with unsupported SNMP version."""
|
||||
# Create context with invalid SNMP version
|
||||
context = {
|
||||
"zabbix": {
|
||||
"interface_type": 2,
|
||||
"snmp": {
|
||||
"version": 4 # Invalid version
|
||||
}
|
||||
}
|
||||
}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp - should raise exception
|
||||
with self.assertRaises(InterfaceConfigError):
|
||||
interface.set_snmp()
|
||||
|
||||
def test_set_snmp_no_version(self):
|
||||
"""Test set_snmp with missing SNMP version."""
|
||||
# Create context without SNMP version
|
||||
context = {
|
||||
"zabbix": {
|
||||
"interface_type": 2,
|
||||
"snmp": {
|
||||
"community": "public" # No version specified
|
||||
}
|
||||
}
|
||||
}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp - should raise exception
|
||||
with self.assertRaises(InterfaceConfigError):
|
||||
interface.set_snmp()
|
||||
|
||||
def test_set_snmp_non_snmp_interface(self):
|
||||
"""Test set_snmp with non-SNMP interface type."""
|
||||
interface = ZabbixInterface(self.agent_context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp - should raise exception
|
||||
with self.assertRaises(InterfaceConfigError):
|
||||
interface.set_snmp()
|
||||
|
||||
def test_set_default_snmp(self):
|
||||
"""Test set_default_snmp method."""
|
||||
interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
interface.set_default_snmp()
|
||||
|
||||
# Check interface properties
|
||||
self.assertEqual(interface.interface["type"], "2")
|
||||
self.assertEqual(interface.interface["port"], "161")
|
||||
self.assertEqual(interface.interface["details"]["version"], "2")
|
||||
self.assertEqual(interface.interface["details"]["community"], "{$SNMP_COMMUNITY}")
|
||||
self.assertEqual(interface.interface["details"]["bulk"], "1")
|
||||
|
||||
def test_set_default_agent(self):
|
||||
"""Test set_default_agent method."""
|
||||
interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
interface.set_default_agent()
|
||||
|
||||
# Check interface properties
|
||||
self.assertEqual(interface.interface["type"], "1")
|
||||
self.assertEqual(interface.interface["port"], "10050")
|
||||
|
||||
def test_snmpv2_no_community(self):
|
||||
"""Test SNMPv2 with no community string specified."""
|
||||
# Create context with SNMPv2 but no community
|
||||
context = {
|
||||
"zabbix": {
|
||||
"interface_type": 2,
|
||||
"snmp": {
|
||||
"version": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp
|
||||
interface.set_snmp()
|
||||
|
||||
# Should use default community string
|
||||
self.assertEqual(interface.interface["details"]["community"], "{$SNMP_COMMUNITY}")
|
429
tests/test_physical_device.py
Normal file
429
tests/test_physical_device.py
Normal file
@ -0,0 +1,429 @@
|
||||
"""Tests for the PhysicalDevice class in the device module."""
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from modules.device import PhysicalDevice
|
||||
from modules.exceptions import TemplateError, SyncInventoryError
|
||||
|
||||
|
||||
class TestPhysicalDevice(unittest.TestCase):
|
||||
"""Test class for PhysicalDevice functionality."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures."""
|
||||
# Create mock NetBox device
|
||||
self.mock_nb_device = MagicMock()
|
||||
self.mock_nb_device.id = 123
|
||||
self.mock_nb_device.name = "test-device"
|
||||
self.mock_nb_device.status.label = "Active"
|
||||
self.mock_nb_device.custom_fields = {"zabbix_hostid": None}
|
||||
self.mock_nb_device.config_context = {}
|
||||
|
||||
# Set up a primary IP
|
||||
primary_ip = MagicMock()
|
||||
primary_ip.address = "192.168.1.1/24"
|
||||
self.mock_nb_device.primary_ip = primary_ip
|
||||
|
||||
# Create mock Zabbix API
|
||||
self.mock_zabbix = MagicMock()
|
||||
self.mock_zabbix.version = "6.0"
|
||||
|
||||
# Mock NetBox journal class
|
||||
self.mock_nb_journal = MagicMock()
|
||||
|
||||
# Create logger mock
|
||||
self.mock_logger = MagicMock()
|
||||
|
||||
# Create PhysicalDevice instance with mocks
|
||||
with patch('modules.device.config',
|
||||
{"device_cf": "zabbix_hostid",
|
||||
"template_cf": "zabbix_template",
|
||||
"templates_config_context": False,
|
||||
"templates_config_context_overrule": False,
|
||||
"traverse_regions": False,
|
||||
"traverse_site_groups": False,
|
||||
"inventory_mode": "disabled",
|
||||
"inventory_sync": False,
|
||||
"device_inventory_map": {}
|
||||
}):
|
||||
self.device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
journal=True,
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
def test_init(self):
|
||||
"""Test the initialization of the PhysicalDevice class."""
|
||||
# Check that basic properties are set correctly
|
||||
self.assertEqual(self.device.name, "test-device")
|
||||
self.assertEqual(self.device.id, 123)
|
||||
self.assertEqual(self.device.status, "Active")
|
||||
self.assertEqual(self.device.ip, "192.168.1.1")
|
||||
self.assertEqual(self.device.cidr, "192.168.1.1/24")
|
||||
|
||||
def test_init_no_primary_ip(self):
|
||||
"""Test initialization when device has no primary IP."""
|
||||
# Set primary_ip to None
|
||||
self.mock_nb_device.primary_ip = None
|
||||
|
||||
# Creating device should raise SyncInventoryError
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
with self.assertRaises(SyncInventoryError):
|
||||
PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
def test_set_basics_with_special_characters(self):
|
||||
"""Test _setBasics when device name contains special characters."""
|
||||
# Set name with special characters that
|
||||
# will actually trigger the special character detection
|
||||
self.mock_nb_device.name = "test-devïce"
|
||||
|
||||
# We need to patch the search function to simulate finding special characters
|
||||
with patch('modules.device.search') as mock_search, \
|
||||
patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
# Make the search function return True to simulate special characters
|
||||
mock_search.return_value = True
|
||||
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# With the mocked search function, the name should be changed to NETBOX_ID format
|
||||
self.assertEqual(device.name, f"NETBOX_ID{self.mock_nb_device.id}")
|
||||
# And visible_name should be set to the original name
|
||||
self.assertEqual(device.visible_name, "test-devïce")
|
||||
# use_visible_name flag should be set
|
||||
self.assertTrue(device.use_visible_name)
|
||||
|
||||
def test_get_templates_context(self):
|
||||
"""Test get_templates_context with valid config."""
|
||||
# Set up config_context with valid template data
|
||||
self.mock_nb_device.config_context = {
|
||||
"zabbix": {
|
||||
"templates": ["Template1", "Template2"]
|
||||
}
|
||||
}
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Test that templates are returned correctly
|
||||
templates = device.get_templates_context()
|
||||
self.assertEqual(templates, ["Template1", "Template2"])
|
||||
|
||||
def test_get_templates_context_with_string(self):
|
||||
"""Test get_templates_context with a string instead of list."""
|
||||
# Set up config_context with a string template
|
||||
self.mock_nb_device.config_context = {
|
||||
"zabbix": {
|
||||
"templates": "Template1"
|
||||
}
|
||||
}
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Test that template is wrapped in a list
|
||||
templates = device.get_templates_context()
|
||||
self.assertEqual(templates, ["Template1"])
|
||||
|
||||
def test_get_templates_context_no_zabbix_key(self):
|
||||
"""Test get_templates_context when zabbix key is missing."""
|
||||
# Set up config_context without zabbix key
|
||||
self.mock_nb_device.config_context = {}
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Test that TemplateError is raised
|
||||
with self.assertRaises(TemplateError):
|
||||
device.get_templates_context()
|
||||
|
||||
def test_get_templates_context_no_templates_key(self):
|
||||
"""Test get_templates_context when templates key is missing."""
|
||||
# Set up config_context without templates key
|
||||
self.mock_nb_device.config_context = {"zabbix": {}}
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Test that TemplateError is raised
|
||||
with self.assertRaises(TemplateError):
|
||||
device.get_templates_context()
|
||||
|
||||
def test_set_template_with_config_context(self):
|
||||
"""Test set_template with templates_config_context=True."""
|
||||
# Set up config_context with templates
|
||||
self.mock_nb_device.config_context = {
|
||||
"zabbix": {
|
||||
"templates": ["Template1"]
|
||||
}
|
||||
}
|
||||
|
||||
# Mock get_templates_context to return expected templates
|
||||
with patch.object(PhysicalDevice, 'get_templates_context', return_value=["Template1"]):
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call set_template with prefer_config_context=True
|
||||
result = device.set_template(prefer_config_context=True, overrule_custom=False)
|
||||
|
||||
# Check result and template names
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(device.zbx_template_names, ["Template1"])
|
||||
|
||||
def test_set_inventory_disabled_mode(self):
|
||||
"""Test set_inventory with inventory_mode=disabled."""
|
||||
# Configure with disabled inventory mode
|
||||
config_patch = {
|
||||
"device_cf": "zabbix_hostid",
|
||||
"inventory_mode": "disabled",
|
||||
"inventory_sync": False
|
||||
}
|
||||
|
||||
with patch('modules.device.config', config_patch):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call set_inventory with the config patch still active
|
||||
with patch('modules.device.config', config_patch):
|
||||
result = device.set_inventory({})
|
||||
|
||||
# Check result
|
||||
self.assertTrue(result)
|
||||
# Default value for disabled inventory
|
||||
self.assertEqual(device.inventory_mode, -1)
|
||||
|
||||
def test_set_inventory_manual_mode(self):
|
||||
"""Test set_inventory with inventory_mode=manual."""
|
||||
# Configure with manual inventory mode
|
||||
config_patch = {
|
||||
"device_cf": "zabbix_hostid",
|
||||
"inventory_mode": "manual",
|
||||
"inventory_sync": False
|
||||
}
|
||||
|
||||
with patch('modules.device.config', config_patch):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call set_inventory with the config patch still active
|
||||
with patch('modules.device.config', config_patch):
|
||||
result = device.set_inventory({})
|
||||
|
||||
# Check result
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(device.inventory_mode, 0) # Manual mode
|
||||
|
||||
def test_set_inventory_automatic_mode(self):
|
||||
"""Test set_inventory with inventory_mode=automatic."""
|
||||
# Configure with automatic inventory mode
|
||||
config_patch = {
|
||||
"device_cf": "zabbix_hostid",
|
||||
"inventory_mode": "automatic",
|
||||
"inventory_sync": False
|
||||
}
|
||||
|
||||
with patch('modules.device.config', config_patch):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call set_inventory with the config patch still active
|
||||
with patch('modules.device.config', config_patch):
|
||||
result = device.set_inventory({})
|
||||
|
||||
# Check result
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(device.inventory_mode, 1) # Automatic mode
|
||||
|
||||
def test_set_inventory_with_inventory_sync(self):
|
||||
"""Test set_inventory with inventory_sync=True."""
|
||||
# Configure with inventory sync enabled
|
||||
config_patch = {
|
||||
"device_cf": "zabbix_hostid",
|
||||
"inventory_mode": "manual",
|
||||
"inventory_sync": True,
|
||||
"device_inventory_map": {
|
||||
"name": "name",
|
||||
"serial": "serialno_a"
|
||||
}
|
||||
}
|
||||
|
||||
with patch('modules.device.config', config_patch):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Create a mock device with the required attributes
|
||||
mock_device_data = {
|
||||
"name": "test-device",
|
||||
"serial": "ABC123"
|
||||
}
|
||||
|
||||
# Call set_inventory with the config patch still active
|
||||
with patch('modules.device.config', config_patch):
|
||||
result = device.set_inventory(mock_device_data)
|
||||
|
||||
# Check result
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(device.inventory_mode, 0) # Manual mode
|
||||
self.assertEqual(device.inventory, {
|
||||
"name": "test-device",
|
||||
"serialno_a": "ABC123"
|
||||
})
|
||||
|
||||
def test_iscluster_true(self):
|
||||
"""Test isCluster when device is part of a cluster."""
|
||||
# Set up virtual_chassis
|
||||
self.mock_nb_device.virtual_chassis = MagicMock()
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Check isCluster result
|
||||
self.assertTrue(device.isCluster())
|
||||
|
||||
def test_is_cluster_false(self):
|
||||
"""Test isCluster when device is not part of a cluster."""
|
||||
# Set virtual_chassis to None
|
||||
self.mock_nb_device.virtual_chassis = None
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Check isCluster result
|
||||
self.assertFalse(device.isCluster())
|
||||
|
||||
|
||||
def test_promote_master_device_primary(self):
|
||||
"""Test promoteMasterDevice when device is primary in cluster."""
|
||||
# Set up virtual chassis with master device
|
||||
mock_vc = MagicMock()
|
||||
mock_vc.name = "virtual-chassis-1"
|
||||
mock_master = MagicMock()
|
||||
mock_master.id = self.mock_nb_device.id # Set master ID to match the current device
|
||||
mock_vc.master = mock_master
|
||||
self.mock_nb_device.virtual_chassis = mock_vc
|
||||
|
||||
# Create device with the updated mock
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call promoteMasterDevice and check the result
|
||||
result = device.promoteMasterDevice()
|
||||
|
||||
# Should return True for primary device
|
||||
self.assertTrue(result)
|
||||
# Device name should be updated to virtual chassis name
|
||||
self.assertEqual(device.name, "virtual-chassis-1")
|
||||
|
||||
|
||||
def test_promote_master_device_secondary(self):
|
||||
"""Test promoteMasterDevice when device is secondary in cluster."""
|
||||
# Set up virtual chassis with a different master device
|
||||
mock_vc = MagicMock()
|
||||
mock_vc.name = "virtual-chassis-1"
|
||||
mock_master = MagicMock()
|
||||
mock_master.id = self.mock_nb_device.id + 1 # Different ID than the current device
|
||||
mock_vc.master = mock_master
|
||||
self.mock_nb_device.virtual_chassis = mock_vc
|
||||
|
||||
# Create device with the updated mock
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call promoteMasterDevice and check the result
|
||||
result = device.promoteMasterDevice()
|
||||
|
||||
# Should return False for secondary device
|
||||
self.assertFalse(result)
|
||||
# Device name should not be modified
|
||||
self.assertEqual(device.name, "test-device")
|
62
tests/test_tools.py
Normal file
62
tests/test_tools.py
Normal file
@ -0,0 +1,62 @@
|
||||
from modules.tools import sanatize_log_output
|
||||
|
||||
def test_sanatize_log_output_secrets():
|
||||
data = {
|
||||
"macros": [
|
||||
{"macro": "{$SECRET}", "type": "1", "value": "supersecret"},
|
||||
{"macro": "{$PLAIN}", "type": "0", "value": "notsecret"},
|
||||
]
|
||||
}
|
||||
sanitized = sanatize_log_output(data)
|
||||
assert sanitized["macros"][0]["value"] == "********"
|
||||
assert sanitized["macros"][1]["value"] == "notsecret"
|
||||
|
||||
def test_sanatize_log_output_interface_secrets():
|
||||
data = {
|
||||
"interfaceid": 123,
|
||||
"details": {
|
||||
"authpassphrase": "supersecret",
|
||||
"privpassphrase": "anothersecret",
|
||||
"securityname": "sensitiveuser",
|
||||
"community": "public",
|
||||
"other": "normalvalue"
|
||||
}
|
||||
}
|
||||
sanitized = sanatize_log_output(data)
|
||||
# Sensitive fields should be sanitized
|
||||
assert sanitized["details"]["authpassphrase"] == "********"
|
||||
assert sanitized["details"]["privpassphrase"] == "********"
|
||||
assert sanitized["details"]["securityname"] == "********"
|
||||
# Non-sensitive fields should remain
|
||||
assert sanitized["details"]["community"] == "********"
|
||||
assert sanitized["details"]["other"] == "normalvalue"
|
||||
# interfaceid should be removed
|
||||
assert "interfaceid" not in sanitized
|
||||
|
||||
def test_sanatize_log_output_interface_macros():
|
||||
data = {
|
||||
"interfaceid": 123,
|
||||
"details": {
|
||||
"authpassphrase": "{$SECRET_MACRO}",
|
||||
"privpassphrase": "{$SECRET_MACRO}",
|
||||
"securityname": "{$USER_MACRO}",
|
||||
"community": "{$SNNMP_COMMUNITY}",
|
||||
}
|
||||
}
|
||||
sanitized = sanatize_log_output(data)
|
||||
# Macro values should not be sanitized
|
||||
assert sanitized["details"]["authpassphrase"] == "{$SECRET_MACRO}"
|
||||
assert sanitized["details"]["privpassphrase"] == "{$SECRET_MACRO}"
|
||||
assert sanitized["details"]["securityname"] == "{$USER_MACRO}"
|
||||
assert sanitized["details"]["community"] == "{$SNNMP_COMMUNITY}"
|
||||
assert "interfaceid" not in sanitized
|
||||
|
||||
def test_sanatize_log_output_plain_data():
|
||||
data = {"foo": "bar", "baz": 123}
|
||||
sanitized = sanatize_log_output(data)
|
||||
assert sanitized == data
|
||||
|
||||
def test_sanatize_log_output_non_dict():
|
||||
data = [1, 2, 3]
|
||||
sanitized = sanatize_log_output(data)
|
||||
assert sanitized == data
|
125
tests/test_usermacros.py
Normal file
125
tests/test_usermacros.py
Normal file
@ -0,0 +1,125 @@
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from modules.device import PhysicalDevice
|
||||
from modules.usermacros import ZabbixUsermacros
|
||||
|
||||
class DummyNB:
|
||||
def __init__(self, name="dummy", config_context=None, **kwargs):
|
||||
self.name = name
|
||||
self.config_context = config_context or {}
|
||||
for k, v in kwargs.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
def __getitem__(self, key):
|
||||
# Allow dict-style access for test compatibility
|
||||
if hasattr(self, key):
|
||||
return getattr(self, key)
|
||||
if key in self.config_context:
|
||||
return self.config_context[key]
|
||||
raise KeyError(key)
|
||||
|
||||
class TestUsermacroSync(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.nb = DummyNB(serial="1234")
|
||||
self.logger = MagicMock()
|
||||
self.usermacro_map = {"serial": "{$HW_SERIAL}"}
|
||||
|
||||
@patch("modules.device.config", {"usermacro_sync": False})
|
||||
def test_usermacro_sync_false(self):
|
||||
device = PhysicalDevice.__new__(PhysicalDevice)
|
||||
device.nb = self.nb
|
||||
device.logger = self.logger
|
||||
device.name = "dummy"
|
||||
device._usermacro_map = MagicMock(return_value=self.usermacro_map)
|
||||
# call set_usermacros
|
||||
result = device.set_usermacros()
|
||||
self.assertEqual(device.usermacros, [])
|
||||
self.assertTrue(result is True or result is None)
|
||||
|
||||
@patch("modules.device.config", {"usermacro_sync": True})
|
||||
def test_usermacro_sync_true(self):
|
||||
device = PhysicalDevice.__new__(PhysicalDevice)
|
||||
device.nb = self.nb
|
||||
device.logger = self.logger
|
||||
device.name = "dummy"
|
||||
device._usermacro_map = MagicMock(return_value=self.usermacro_map)
|
||||
result = device.set_usermacros()
|
||||
self.assertIsInstance(device.usermacros, list)
|
||||
self.assertGreater(len(device.usermacros), 0)
|
||||
|
||||
@patch("modules.device.config", {"usermacro_sync": "full"})
|
||||
def test_usermacro_sync_full(self):
|
||||
device = PhysicalDevice.__new__(PhysicalDevice)
|
||||
device.nb = self.nb
|
||||
device.logger = self.logger
|
||||
device.name = "dummy"
|
||||
device._usermacro_map = MagicMock(return_value=self.usermacro_map)
|
||||
result = device.set_usermacros()
|
||||
self.assertIsInstance(device.usermacros, list)
|
||||
self.assertGreater(len(device.usermacros), 0)
|
||||
|
||||
class TestZabbixUsermacros(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.nb = DummyNB()
|
||||
self.logger = MagicMock()
|
||||
|
||||
def test_validate_macro_valid(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
self.assertTrue(macros.validate_macro("{$TEST_MACRO}"))
|
||||
self.assertTrue(macros.validate_macro("{$A1_2.3}"))
|
||||
self.assertTrue(macros.validate_macro("{$FOO:bar}"))
|
||||
|
||||
def test_validate_macro_invalid(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
self.assertFalse(macros.validate_macro("$TEST_MACRO"))
|
||||
self.assertFalse(macros.validate_macro("{TEST_MACRO}"))
|
||||
self.assertFalse(macros.validate_macro("{$test}")) # lower-case not allowed
|
||||
self.assertFalse(macros.validate_macro(""))
|
||||
|
||||
def test_render_macro_dict(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
macro = macros.render_macro("{$FOO}", {"value": "bar", "type": "secret", "description": "desc"})
|
||||
self.assertEqual(macro["macro"], "{$FOO}")
|
||||
self.assertEqual(macro["value"], "bar")
|
||||
self.assertEqual(macro["type"], "1")
|
||||
self.assertEqual(macro["description"], "desc")
|
||||
|
||||
def test_render_macro_dict_missing_value(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
result = macros.render_macro("{$FOO}", {"type": "text"})
|
||||
self.assertFalse(result)
|
||||
self.logger.warning.assert_called()
|
||||
|
||||
def test_render_macro_str(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
macro = macros.render_macro("{$FOO}", "bar")
|
||||
self.assertEqual(macro["macro"], "{$FOO}")
|
||||
self.assertEqual(macro["value"], "bar")
|
||||
self.assertEqual(macro["type"], "0")
|
||||
self.assertEqual(macro["description"], "")
|
||||
|
||||
def test_render_macro_invalid_name(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
result = macros.render_macro("FOO", "bar")
|
||||
self.assertFalse(result)
|
||||
self.logger.error.assert_called()
|
||||
|
||||
def test_generate_from_map(self):
|
||||
nb = DummyNB(memory="bar", role="baz")
|
||||
usermacro_map = {"memory": "{$FOO}", "role": "{$BAR}"}
|
||||
macros = ZabbixUsermacros(nb, usermacro_map, True, logger=self.logger)
|
||||
result = macros.generate()
|
||||
self.assertEqual(len(result), 2)
|
||||
self.assertEqual(result[0]["macro"], "{$FOO}")
|
||||
self.assertEqual(result[1]["macro"], "{$BAR}")
|
||||
|
||||
def test_generate_from_config_context(self):
|
||||
config_context = {"zabbix": {"usermacros": {"{$FOO}": {"value": "bar"}}}}
|
||||
nb = DummyNB(config_context=config_context)
|
||||
macros = ZabbixUsermacros(nb, {}, True, logger=self.logger)
|
||||
result = macros.generate()
|
||||
self.assertEqual(len(result), 1)
|
||||
self.assertEqual(result[0]["macro"], "{$FOO}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
Loading…
Reference in New Issue
Block a user