mirror of
https://github.com/TheNetworkGuy/netbox-zabbix-sync.git
synced 2025-07-13 15:24:48 -06:00
Compare commits
258 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
d056a20de2 | ||
![]() |
a57b51870f | ||
![]() |
dbc7acaf98 | ||
![]() |
87b33706c0 | ||
![]() |
affd4c6998 | ||
![]() |
22982c3607 | ||
![]() |
dec2cf6996 | ||
![]() |
940f2d6afb | ||
![]() |
d79f96a5b4 | ||
![]() |
2f40ec467b | ||
![]() |
e0d28633c3 | ||
![]() |
0a20e270ed | ||
![]() |
a5be9538d9 | ||
![]() |
b31e41ca6b | ||
![]() |
ba530ecd58 | ||
![]() |
a3259c4fe3 | ||
![]() |
5e390396ba | ||
![]() |
ee6d13bfdf | ||
![]() |
8fe7e5763b | ||
![]() |
a7a79ea81e | ||
![]() |
b62e8203b6 | ||
![]() |
bfadd88542 | ||
![]() |
bd4d21c5d8 | ||
![]() |
148ce47c10 | ||
![]() |
7969de50bf | ||
![]() |
7394bf8d1d | ||
![]() |
8ce2cab86f | ||
![]() |
76723d2823 | ||
![]() |
c58e5aba1e | ||
![]() |
baf23403a0 | ||
![]() |
3115eaa04e | ||
![]() |
c8fda04ce8 | ||
![]() |
7b8827fa94 | ||
![]() |
b705e1341f | ||
![]() |
8df17f208c | ||
![]() |
22d735dd82 | ||
![]() |
a325863aec | ||
![]() |
9e1a90833d | ||
![]() |
45e633b5ed | ||
![]() |
298e6c4370 | ||
![]() |
77b0798b65 | ||
![]() |
27ee4c341f | ||
![]() |
f7eb47a8a8 | ||
![]() |
bc53737e02 | ||
![]() |
539ad64c8d | ||
![]() |
bbe28d9705 | ||
![]() |
2998dfde54 | ||
![]() |
d60eb1cb2d | ||
![]() |
98edf0ad99 | ||
![]() |
772fef0930 | ||
![]() |
68cf28565d | ||
![]() |
0c715d4f96 | ||
![]() |
819126ce36 | ||
![]() |
04a610cf84 | ||
![]() |
e91eecffaa | ||
![]() |
eb307337f6 | ||
![]() |
5fd89a1f8a | ||
![]() |
cb0500d0c0 | ||
![]() |
7383583c43 | ||
![]() |
dad7d2911f | ||
![]() |
4fd582970d | ||
![]() |
ad2ace942a | ||
![]() |
989f6fa96e | ||
![]() |
f303e7e01d | ||
![]() |
38d61dcde7 | ||
![]() |
feb719542d | ||
![]() |
ea5b7d3196 | ||
![]() |
28193cc120 | ||
![]() |
908e7eeda9 | ||
![]() |
e9a86334d9 | ||
![]() |
2ea2edb6a6 | ||
![]() |
37b3bfc7fb | ||
![]() |
6abdac2eb4 | ||
![]() |
13fe406b63 | ||
![]() |
20a3c67fd4 | ||
![]() |
b56a4332b9 | ||
![]() |
73d34851fb | ||
![]() |
10313ef5cf | ||
![]() |
93c88333a6 | ||
![]() |
50b7ede81b | ||
![]() |
3e52edef2d | ||
![]() |
4449e040ce | ||
![]() |
aa6be1312e | ||
![]() |
50c13c20cb | ||
![]() |
964045f53e | ||
![]() |
6bdaf4e5b7 | ||
![]() |
5a3467538e | ||
![]() |
50918e43fa | ||
![]() |
7781bc6732 | ||
![]() |
9ab5e09dd5 | ||
![]() |
886c5b24b9 | ||
![]() |
b314b2c883 | ||
![]() |
0c798ec968 | ||
![]() |
a5312365f9 | ||
![]() |
53066d2d51 | ||
![]() |
525904cf43 | ||
![]() |
1e269780ce | ||
![]() |
15d63ce3b8 | ||
![]() |
c810b06718 | ||
![]() |
825d788cfe | ||
![]() |
48a04c58e3 | ||
![]() |
733df33b71 | ||
![]() |
593c8707af | ||
![]() |
523393308d | ||
![]() |
d65fa5b699 | ||
![]() |
fd70045c6d | ||
![]() |
f9453cc23c | ||
![]() |
3d4e7803cc | ||
![]() |
edb9cd6ab6 | ||
![]() |
53d679e638 | ||
![]() |
72558d3825 | ||
![]() |
eea7df660a | ||
![]() |
1b831a2d39 | ||
![]() |
6d4e250b23 | ||
![]() |
cebefd681e | ||
![]() |
4264dc9b31 | ||
![]() |
c67180138e | ||
![]() |
b8bb3fb3f0 | ||
![]() |
5f78a2c789 | ||
![]() |
1157ed9e64 | ||
![]() |
c7d3dab27c | ||
![]() |
ba2f77a640 | ||
![]() |
4c91c660a8 | ||
![]() |
8272e34c12 | ||
![]() |
4c982ff0f5 | ||
![]() |
7a671d6625 | ||
![]() |
5617275594 | ||
![]() |
1673f7bb59 | ||
![]() |
c76e36ad38 | ||
![]() |
b0eee8ad9b | ||
![]() |
9ff6b66c96 | ||
![]() |
ffb8d5239c | ||
![]() |
73d5306898 | ||
![]() |
f301244306 | ||
![]() |
867749ddd6 | ||
![]() |
d0941ff909 | ||
![]() |
434722df53 | ||
![]() |
9131c940c5 | ||
![]() |
8b670ba395 | ||
![]() |
4ec8036c88 | ||
![]() |
81764b589a | ||
![]() |
acab7dd6d2 | ||
![]() |
2177234d7f | ||
![]() |
3f4d173ac0 | ||
![]() |
0996059c5f | ||
![]() |
0155c29fcc | ||
![]() |
5d4ff9c5ed | ||
![]() |
204937b784 | ||
![]() |
e0827ac428 | ||
![]() |
09a6906a63 | ||
![]() |
30545ec0f3 | ||
![]() |
56c19d97de | ||
![]() |
ffc2aa1947 | ||
![]() |
9417908994 | ||
![]() |
06f97b132a | ||
![]() |
20096a215b | ||
![]() |
f1da1cfb50 | ||
![]() |
5093823287 | ||
![]() |
c1504987f1 | ||
![]() |
d598a9739a | ||
![]() |
7bf72de0f9 | ||
![]() |
66f24e6891 | ||
![]() |
bff34a8e38 | ||
![]() |
886ef2a172 | ||
![]() |
9c07d7dbc4 | ||
![]() |
9f29d2b27b | ||
![]() |
e827953d8d | ||
![]() |
053028b283 | ||
![]() |
2e867d1129 | ||
![]() |
a0ea21d731 | ||
![]() |
70a5c3e384 | ||
![]() |
91796395ef | ||
![]() |
610a73c061 | ||
![]() |
4de022496e | ||
![]() |
0603d8c244 | ||
![]() |
2b92f8da9b | ||
![]() |
d1ec1114ac | ||
![]() |
acad07eed4 | ||
![]() |
da4fec6bf1 | ||
![]() |
07049ea6d8 | ||
![]() |
2094799a51 | ||
![]() |
c0c52f973e | ||
![]() |
39b63aa420 | ||
![]() |
017b5623f5 | ||
![]() |
9be09bca10 | ||
![]() |
23997f9423 | ||
![]() |
e8a733cbd0 | ||
![]() |
be76386584 | ||
![]() |
b5a01e09e8 | ||
![]() |
ecec3ee46e | ||
![]() |
7099df93d1 | ||
![]() |
d1e864c75b | ||
![]() |
6f044cb228 | ||
![]() |
2e7890784b | ||
![]() |
c695353fce | ||
![]() |
e0b473a6d4 | ||
![]() |
8e9594172b | ||
![]() |
8a749e63cf | ||
![]() |
ddc65a6d58 | ||
![]() |
58d894832e | ||
![]() |
b9713792d7 | ||
![]() |
45192531f9 | ||
![]() |
72fde13ef4 | ||
![]() |
78b9d5ae8b | ||
![]() |
60140b4b74 | ||
![]() |
0b9b8a4898 | ||
![]() |
441d7e7e95 | ||
![]() |
c185b7364d | ||
![]() |
e56451f5e1 | ||
![]() |
dee6a079a5 | ||
![]() |
2b62caca85 | ||
![]() |
e9143eb24c | ||
![]() |
4eed151e22 | ||
![]() |
3e638c6f78 | ||
![]() |
634f4b77d5 | ||
![]() |
c006e7feb5 | ||
![]() |
091c9746c0 | ||
![]() |
364d376f55 | ||
![]() |
ab2a341fa7 | ||
![]() |
fbb9eeb48c | ||
![]() |
5b08d27a5e | ||
![]() |
583d845c40 | ||
![]() |
27a4a5c6eb | ||
![]() |
537710a4b9 | ||
![]() |
5defc1a25e | ||
![]() |
d6973dc32d | ||
![]() |
71f604a6f6 | ||
![]() |
b94a0df02d | ||
![]() |
3079a88de8 | ||
![]() |
4aa8b6d2fb | ||
![]() |
e82631c89d | ||
![]() |
18d29c98d3 | ||
![]() |
661ce88287 | ||
![]() |
4b7f3ec0b9 | ||
![]() |
3a39c314be | ||
![]() |
bf325c6839 | ||
![]() |
5922d3e8ae | ||
![]() |
dcd84e836b | ||
![]() |
33cf3e5358 | ||
![]() |
7c988f9ff8 | ||
![]() |
d46b749af0 | ||
![]() |
e05c35a3ea | ||
![]() |
142aae75e0 | ||
![]() |
c538c51b7b | ||
![]() |
0d7c581ee2 | ||
![]() |
c684ac4a9d | ||
![]() |
2fcd21a723 | ||
![]() |
23bef6b549 | ||
![]() |
0d02e096e9 | ||
![]() |
3c7079117a | ||
![]() |
89d5f22064 | ||
![]() |
15d40873b0 | ||
![]() |
de8143e89f | ||
![]() |
173fdbf19f | ||
![]() |
d55bb4053b | ||
![]() |
c8e42b366f | ||
![]() |
1f4a81e2e4 | ||
![]() |
8aba95525b |
22
.devcontainer/devcontainer.json
Normal file
22
.devcontainer/devcontainer.json
Normal file
@ -0,0 +1,22 @@
|
||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||
// README at: https://github.com/devcontainers/templates/tree/main/src/python
|
||||
{
|
||||
"name": "Python 3",
|
||||
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
||||
"image": "mcr.microsoft.com/devcontainers/python:1-3.12-bullseye",
|
||||
|
||||
// Features to add to the dev container. More info: https://containers.dev/features.
|
||||
// "features": {},
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
|
||||
// Use 'postCreateCommand' to run commands after the container is created.
|
||||
"postCreateCommand": "pip3 install --user -r requirements.txt && pip3 install --user pylint pytest coverage pytest-cov"
|
||||
|
||||
// Configure tool-specific properties.
|
||||
// "customizations": {},
|
||||
|
||||
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
||||
// "remoteUser": "root"
|
||||
}
|
55
.github/workflows/publish-image.yml
vendored
Normal file
55
.github/workflows/publish-image.yml
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
name: Build and Push Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
release:
|
||||
types: [published]
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
|
||||
jobs:
|
||||
test_quality:
|
||||
uses: ./.github/workflows/quality.yml
|
||||
test_code:
|
||||
uses: ./.github/workflows/run_tests.yml
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@369eb591f429131d6889c46b94e711f089e6ca96
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: |
|
||||
index:org.opencontainers.image.description=Python script to synchronise NetBox devices to Zabbix.
|
28
.github/workflows/quality.yml
vendored
Normal file
28
.github/workflows/quality.yml
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
name: Pylint Quality control
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
python_quality_testing:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.12","3.13"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pylint
|
||||
pip install -r requirements.txt
|
||||
- name: Analysing the code with pylint
|
||||
run: |
|
||||
pylint --module-naming-style=any modules/* netbox_zabbix_sync.py
|
33
.github/workflows/run_tests.yml
vendored
Normal file
33
.github/workflows/run_tests.yml
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
name: Pytest code testing
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test_code:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.12
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pytest pytest-mock coverage pytest-cov
|
||||
pip install -r requirements.txt
|
||||
- name: Testing the code with PyTest
|
||||
run: |
|
||||
cp config.py.example config.py
|
||||
pytest tests
|
||||
- name: Run tests with coverage
|
||||
run: |
|
||||
cp config.py.example config.py
|
||||
coverage run -m pytest tests
|
||||
- name: Check coverage percentage
|
||||
run: |
|
||||
coverage report --fail-under=70
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -1,5 +1,11 @@
|
||||
*.log
|
||||
.venv
|
||||
config.py
|
||||
Pipfile
|
||||
Pipfile.lock
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
.vscode
|
||||
.flake
|
||||
.coverage
|
20
Dockerfile
Normal file
20
Dockerfile
Normal file
@ -0,0 +1,20 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
FROM python:3.12-alpine
|
||||
RUN mkdir -p /opt/netbox-zabbix && chown -R 1000:1000 /opt/netbox-zabbix
|
||||
|
||||
RUN mkdir -p /opt/netbox-zabbix
|
||||
RUN addgroup -g 1000 -S netbox-zabbix && adduser -u 1000 -S netbox-zabbix -G netbox-zabbix
|
||||
RUN chown -R 1000:1000 /opt/netbox-zabbix
|
||||
|
||||
WORKDIR /opt/netbox-zabbix
|
||||
|
||||
COPY --chown=1000:1000 . /opt/netbox-zabbix
|
||||
|
||||
USER 1000:1000
|
||||
|
||||
RUN if ! [ -f ./config.py ]; then cp ./config.py.example ./config.py; fi
|
||||
USER root
|
||||
RUN pip install -r ./requirements.txt
|
||||
USER 1000:1000
|
||||
ENTRYPOINT ["python"]
|
||||
CMD ["/opt/netbox-zabbix/netbox_zabbix_sync.py", "-v"]
|
662
README.md
662
README.md
@ -1,40 +1,104 @@
|
||||
# NetBox to Zabbix synchronization
|
||||
|
||||
# Netbox to Zabbix synchronization
|
||||
A script to create, update and delete Zabbix hosts using NetBox device objects. Tested and compatible with all [currently supported Zabbix releases](https://www.zabbix.com/life_cycle_and_release_policy).
|
||||
|
||||
A script to create, update and delete Zabbix hosts using Netbox device objects.
|
||||
## Installation via Docker
|
||||
|
||||
To pull the latest stable version to your local cache, use the following docker
|
||||
pull command:
|
||||
|
||||
## Installation
|
||||
```bash
|
||||
docker pull ghcr.io/thenetworkguy/netbox-zabbix-sync:main
|
||||
```
|
||||
|
||||
Make sure to specify the needed environment variables for the script to work
|
||||
(see [here](#set-environment-variables)) on the command line or use an
|
||||
[env file](https://docs.docker.com/reference/cli/docker/container/run/#env).
|
||||
|
||||
```bash
|
||||
docker run -d -t -i -e ZABBIX_HOST='https://zabbix.local' \
|
||||
-e ZABBIX_TOKEN='othersecrettoken' \
|
||||
-e NETBOX_HOST='https://netbox.local' \
|
||||
-e NETBOX_TOKEN='secrettoken' \
|
||||
--name netbox-zabbix-sync ghcr.io/thenetworkguy/netbox-zabbix-sync:main
|
||||
```
|
||||
|
||||
This should run a one-time sync. You can check the sync with
|
||||
`docker logs netbox-zabbix-sync`.
|
||||
|
||||
The image uses the default `config.py` for its configuration, you can use a
|
||||
volume mount in the docker run command to override with your own config file if
|
||||
needed (see [config file](#config-file)):
|
||||
|
||||
```bash
|
||||
docker run -d -t -i -v $(pwd)/config.py:/opt/netbox-zabbix/config.py ...
|
||||
```
|
||||
|
||||
## Installation from Source
|
||||
|
||||
### Cloning the repository
|
||||
```
|
||||
|
||||
```bash
|
||||
git clone https://github.com/TheNetworkGuy/netbox-zabbix-sync.git
|
||||
```
|
||||
|
||||
### Packages
|
||||
Make sure that you have a python environment with the following packages installed. You can also use the requirements.txt file for installation with pip.
|
||||
```
|
||||
|
||||
Make sure that you have a python environment with the following packages
|
||||
installed. You can also use the `requirements.txt` file for installation with
|
||||
pip.
|
||||
|
||||
```sh
|
||||
# Packages:
|
||||
pynetbox
|
||||
pyzabbix
|
||||
|
||||
# Install them through requirements.txt from a venv:
|
||||
virtualenv .venv
|
||||
source .venv/bin/activate
|
||||
.venv/bin/pip --require-virtualenv install -r requirements.txt
|
||||
```
|
||||
|
||||
### Config file
|
||||
First time user? Copy the config.py.example file to config.py. This file is used for modifying filters and setting variables such as custom field names.
|
||||
```
|
||||
|
||||
First time user? Copy the `config.py.example` file to `config.py`. This file is
|
||||
used for modifying filters and setting variables such as custom field names.
|
||||
|
||||
```sh
|
||||
cp config.py.example config.py
|
||||
```
|
||||
|
||||
### Set environment variables
|
||||
Set the following environment variables
|
||||
```
|
||||
|
||||
Set the following environment variables:
|
||||
|
||||
```bash
|
||||
ZABBIX_HOST="https://zabbix.local"
|
||||
ZABBIX_USER="username"
|
||||
ZABBIX_PASS="Password"
|
||||
NETBOX_HOST="https://netbox.local"
|
||||
NETBOX_TOKEN="secrettoken"
|
||||
```
|
||||
### Netbox custom fields
|
||||
Use the following custom fields in Netbox (if you are using config context for the template information then the zabbix_template field is not required):
|
||||
|
||||
Or, you can use a Zabbix API token to login instead of using a username and
|
||||
password. In that case `ZABBIX_USER` and `ZABBIX_PASS` will be ignored.
|
||||
|
||||
```bash
|
||||
ZABBIX_TOKEN=othersecrettoken
|
||||
```
|
||||
|
||||
If you are using custom SSL certificates for NetBox and/or Zabbix, you can set
|
||||
the following environment variable to the path of your CA bundle file:
|
||||
|
||||
```sh
|
||||
export REQUESTS_CA_BUNDLE=/path/to/your/ca-bundle.crt
|
||||
```
|
||||
|
||||
### NetBox custom fields
|
||||
|
||||
Use the following custom fields in NetBox (if you are using config context for
|
||||
the template information then the zabbix_template field is not required):
|
||||
|
||||
```
|
||||
* Type: Integer
|
||||
* Name: zabbix_hostid
|
||||
@ -42,6 +106,7 @@ Use the following custom fields in Netbox (if you are using config context for t
|
||||
* Default: null
|
||||
* Object: dcim > device
|
||||
```
|
||||
|
||||
```
|
||||
* Type: Text
|
||||
* Name: zabbix_template
|
||||
@ -49,100 +114,239 @@ Use the following custom fields in Netbox (if you are using config context for t
|
||||
* Default: null
|
||||
* Object: dcim > device_type
|
||||
```
|
||||
You can make the hostID field hidden or read-only to prevent human intervention.
|
||||
|
||||
This is optional and there is a use case for leaving it read-write in the UI to manually change the ID. For example to re-run a sync.
|
||||
You can make the `zabbix_hostid` field hidden or read-only to prevent human
|
||||
intervention.
|
||||
|
||||
This is optional, but there may be cases where you want to leave it
|
||||
read-write in the UI. For example to manually change or clear the ID and re-run a sync.
|
||||
|
||||
## Virtual Machine (VM) Syncing
|
||||
|
||||
In order to use VM syncing, make sure that the `zabbix_id` custom field is also
|
||||
present on Virtual machine objects in NetBox.
|
||||
|
||||
Use the `config.py` file and set the `sync_vms` variable to `True`.
|
||||
|
||||
You can set the `vm_hostgroup_format` variable to a customizable value for VM
|
||||
hostgroups. The default is `cluster_type/cluster/role`.
|
||||
|
||||
To enable filtering for VM's, check the `nb_vm_filter` variable out. It works
|
||||
the same as with the device filter (see documentation under "Hostgroup layout").
|
||||
Note that not all filtering capabilities and properties of devices are
|
||||
applicable to VM's and vice-versa. Check the NetBox API documentation to see
|
||||
which filtering options are available for each object type.
|
||||
|
||||
## Config file
|
||||
|
||||
### Hostgroup
|
||||
Setting the create_hostgroups variable to False requires manual hostgroup creation for devices in a new category.
|
||||
|
||||
The format can be set with the hostgroup_format variable.
|
||||
Setting the `create_hostgroups` variable to `False` requires manual hostgroup
|
||||
creation for devices in a new category. I would recommend setting this variable
|
||||
to `True` since leaving it on `False` results in a lot of manual work.
|
||||
|
||||
Make sure that the Zabbix user has proper permissions to create hosts.
|
||||
The hostgroups are in a nested format. This means that proper permissions only need to be applied to the site name hostgroup and cascaded to any child hostgroups.
|
||||
The format can be set with the `hostgroup_format` variable for devices and
|
||||
`vm_hostgroup_format` for virtual machines.
|
||||
|
||||
#### layout
|
||||
The default hostgroup layout is "site/manufacturer/device_role".
|
||||
Any nested parent hostgroups will also be created automatically. For instance
|
||||
the region `Berlin` with parent region `Germany` will create the hostgroup
|
||||
`Germany/Berlin`.
|
||||
|
||||
**Variables**
|
||||
Make sure that the Zabbix user has proper permissions to create hosts. The
|
||||
hostgroups are in a nested format. This means that proper permissions only need
|
||||
to be applied to the site name hostgroup and cascaded to any child hostgroups.
|
||||
|
||||
You can change this behaviour with the hostgroup_format variable. The following values can be used:
|
||||
| name | description |
|
||||
| ------------ | ------------ |
|
||||
|dev_location|The device location name|
|
||||
|dev_role|The device role name|
|
||||
|manufacturer|Manufacturer name|
|
||||
|region|The region name of the device|
|
||||
|site|Site name|
|
||||
|site_group|Site group name|
|
||||
|tenant|Tenant name|
|
||||
|tenant_group|Tenant group name|
|
||||
#### Layout
|
||||
|
||||
The default hostgroup layout is "site/manufacturer/device_role". You can change
|
||||
this behaviour with the hostgroup_format variable. The following values can be
|
||||
used:
|
||||
|
||||
You can specify the value like so, sperated by a "/":
|
||||
**Both devices and virtual machines**
|
||||
|
||||
| name | description |
|
||||
| ------------- | ------------------------------------------------------------------------------------ |
|
||||
| role | Role name of a device or VM |
|
||||
| region | The region name |
|
||||
| site | Site name |
|
||||
| site_group | Site group name |
|
||||
| tenant | Tenant name |
|
||||
| tenant_group | Tenant group name |
|
||||
| platform | Software platform of a device or VM |
|
||||
| custom fields | See the section "Layout -> Custom Fields" to use custom fields as hostgroup variable |
|
||||
|
||||
**Only for devices**
|
||||
|
||||
| name | description |
|
||||
| ------------ | ------------------------ |
|
||||
| location | The device location name |
|
||||
| manufacturer | Device manufacturer name |
|
||||
| rack | Rack |
|
||||
|
||||
**Only for VMs**
|
||||
|
||||
| name | description |
|
||||
| ------------ | --------------- |
|
||||
| cluster | VM cluster name |
|
||||
| cluster_type | VM cluster type |
|
||||
| device | parent device |
|
||||
|
||||
You can specify the value separated by a "/" like so:
|
||||
|
||||
```python
|
||||
hostgroup_format = "tenant/site/location/role"
|
||||
```
|
||||
hostgroup_format = "tenant/site/dev_location/dev_role"
|
||||
```
|
||||
**custom fields**
|
||||
|
||||
You can also use the value of custom fields under the device object.
|
||||
You can also provice a list of groups like so:
|
||||
|
||||
```python
|
||||
hostgroup_format = ["region/site_group/site", "role", "tenant_group/tenant"]
|
||||
```
|
||||
|
||||
|
||||
**Group traversal**
|
||||
|
||||
The default behaviour for `region` is to only use the directly assigned region
|
||||
in the rendered hostgroup name. However, by setting `traverse_region` to `True`
|
||||
in `config.py` the script will render a full region path of all parent regions
|
||||
for the hostgroup name. `traverse_site_groups` controls the same behaviour for
|
||||
site_groups.
|
||||
|
||||
**Custom fields**
|
||||
|
||||
You can use the value of custom fields for hostgroup generation. This allows
|
||||
more freedom and even allows a full static mapping instead of a dynamic rendered
|
||||
hostgroup name.
|
||||
|
||||
For instance a custom field with the name `mycustomfieldname` and type string
|
||||
has the following values for 2 devices:
|
||||
|
||||
This allows more freedom and even allows a full static mapping instead of a dynamic rendered hostgroup name.
|
||||
```
|
||||
hostgroup_format = "site/mycustomfieldname"
|
||||
Device A has the value Train for custom field mycustomfieldname.
|
||||
Device B has the value Bus for custom field mycustomfieldname.
|
||||
Both devices are located in the site Paris.
|
||||
```
|
||||
|
||||
With the hostgroup format `site/mycustomfieldname` the following hostgroups will
|
||||
be generated:
|
||||
|
||||
```
|
||||
Device A: Paris/Train
|
||||
Device B: Paris/Bus
|
||||
```
|
||||
|
||||
**Empty variables or hostgroups**
|
||||
|
||||
Should the content of a variable be empty, then the hostgroup position is skipped.
|
||||
Should the content of a variable be empty, then the hostgroup position is
|
||||
skipped.
|
||||
|
||||
For example, consider the following scenario with 2 devices, both the same
|
||||
device type and site. One of them is linked to a tenant, the other one does not
|
||||
have a relationship with a tenant.
|
||||
|
||||
For example, consider the following scenario with 2 devices, both the same device type and site. One of them is linked to a tenant, the other one does not have a relationship with a tenant.
|
||||
- Device_role: PDU
|
||||
- Site: HQ-AMS
|
||||
|
||||
```python
|
||||
hostgroup_format = "site/tenant/role"
|
||||
```
|
||||
hostgroup_format = "site/tenant/device_role"
|
||||
```
|
||||
When running the script like above, the following hostgroup (HG) will be generated for both hosts:
|
||||
- Device A with no relationship with a tenant: HQ-AMS/PDU
|
||||
- Device B with a relationship to tenant "Fork Industries": HQ-AMS/Fork Industries/PDU
|
||||
|
||||
When running the script like above, the following hostgroup (HG) will be
|
||||
generated for both hosts:
|
||||
|
||||
- Device A with no relationship with a tenant: HQ-AMS/PDU
|
||||
- Device B with a relationship to tenant "Fork Industries": HQ-AMS/Fork
|
||||
Industries/PDU
|
||||
|
||||
The same logic applies to custom fields being used in the HG format:
|
||||
```
|
||||
|
||||
```python
|
||||
hostgroup_format = "site/mycustomfieldname"
|
||||
```
|
||||
For device A with the value "ABC123" in the custom field "mycustomfieldname" -> HQ-AMS/ABC123
|
||||
For a device which does not have a value in the custom field "mycustomfieldname" -> HQ-AMS
|
||||
|
||||
Should there be a scenario where a custom field does not have a value under a device, and the HG format only uses this single variable, then this will result in an error:
|
||||
For device A with the value "ABC123" in the custom field "mycustomfieldname" ->
|
||||
HQ-AMS/ABC123 For a device which does not have a value in the custom field
|
||||
"mycustomfieldname" -> HQ-AMS
|
||||
|
||||
Should there be a scenario where a custom field does not have a value under a
|
||||
device, and the HG format only uses this single variable, then this will result
|
||||
in an error:
|
||||
|
||||
```
|
||||
hostgroup_format = "mycustomfieldname"
|
||||
|
||||
Netbox-Zabbix-sync - ERROR - ESXI1 has no reliable hostgroup. This is most likely due to the use of custom fields that are empty.
|
||||
NetBox-Zabbix-sync - ERROR - ESXI1 has no reliable hostgroup. This is most likely due to the use of custom fields that are empty.
|
||||
```
|
||||
### Device status
|
||||
By setting a status on a Netbox device you determine how the host is added (or updated) in Zabbix. There are, by default, 3 options:
|
||||
* Delete the host from Zabbix (triggered by Netbox status "Decommissioning" and "Inventory")
|
||||
* Create the host in Zabbix but with a disabled status (Trigger by "Offline", "Planned", "Staged" and "Failed")
|
||||
* Create the host in Zabbix with an enabled status (For now only enabled with the "Active" status)
|
||||
|
||||
You can modify this behaviour by changing the following list variables in the script:
|
||||
- zabbix_device_removal
|
||||
- zabbix_device_disable
|
||||
### Device status
|
||||
|
||||
By setting a status on a NetBox device you determine how the host is added (or
|
||||
updated) in Zabbix. There are, by default, 3 options:
|
||||
|
||||
- Delete the host from Zabbix (triggered by NetBox status "Decommissioning" and
|
||||
"Inventory")
|
||||
- Create the host in Zabbix but with a disabled status (Trigger by "Offline",
|
||||
"Planned", "Staged" and "Failed")
|
||||
- Create the host in Zabbix with an enabled status (For now only enabled with
|
||||
the "Active" status)
|
||||
|
||||
You can modify this behaviour by changing the following list variables in the
|
||||
script:
|
||||
|
||||
- `zabbix_device_removal`
|
||||
- `zabbix_device_disable`
|
||||
|
||||
### Zabbix Inventory
|
||||
|
||||
This script allows you to enable the inventory on managed Zabbix hosts and sync
|
||||
NetBox device properties to the specified inventory fields. To map NetBox
|
||||
information to NetBox inventory fields, set `inventory_sync` to `True`.
|
||||
|
||||
You can set the inventory mode to "disabled", "manual" or "automatic" with the
|
||||
`inventory_mode` variable. See
|
||||
[Zabbix Manual](https://www.zabbix.com/documentation/current/en/manual/config/hosts/inventory#building-inventory)
|
||||
for more information about the modes.
|
||||
|
||||
Use the `device_inventory_map` variable to map which NetBox properties are used in
|
||||
which Zabbix Inventory fields. For nested properties, you can use the '/'
|
||||
seperator. For example, the following map will assign the custom field
|
||||
'mycustomfield' to the 'alias' Zabbix inventory field:
|
||||
|
||||
For Virtual Machines, use `vm_inventory_map`.
|
||||
|
||||
```python
|
||||
inventory_sync = True
|
||||
inventory_mode = "manual"
|
||||
device_inventory_map = {"custom_fields/mycustomfield/name": "alias"}
|
||||
vm_inventory_map = {"custom_fields/mycustomfield/name": "alias"}
|
||||
```
|
||||
|
||||
See `config.py.example` for an extensive example map. Any Zabbix Inventory fields
|
||||
that are not included in the map will not be touched by the script, so you can
|
||||
safely add manual values or use items to automatically add values to other
|
||||
fields.
|
||||
|
||||
### Template source
|
||||
You can either use a Netbox device type custom field or Netbox config context for the Zabbix template information.
|
||||
|
||||
Using a custom field allows for only one template. You can assign multiple templates to one host using the config context source.
|
||||
Should you make use of an advanced templating structure with lots of nesting then i would recommend sticking to the custom field.
|
||||
You can either use a NetBox device type custom field or NetBox config context
|
||||
for the Zabbix template information.
|
||||
|
||||
You can change the behaviour in the config file. By default this setting is false but you can set it to true to use config context:
|
||||
```
|
||||
Using a custom field allows for only one template. You can assign multiple
|
||||
templates to one host using the config context source. Should you make use of an
|
||||
advanced templating structure with lots of nesting then i would recommend
|
||||
sticking to the custom field.
|
||||
|
||||
You can change the behaviour in the config file. By default this setting is
|
||||
false but you can set it to true to use config context:
|
||||
|
||||
```python
|
||||
templates_config_context = True
|
||||
```
|
||||
|
||||
After that make sure that for each host there is at least one template defined in the config context in this format:
|
||||
```
|
||||
After that make sure that for each host there is at least one template defined
|
||||
in the config context in this format:
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
"templates": [
|
||||
@ -155,41 +359,249 @@ After that make sure that for each host there is at least one template defined i
|
||||
}
|
||||
```
|
||||
|
||||
You can also opt for the default device type custom field behaviour but with the added benefit of overwriting the template should a device in Netbox have a device specific context defined. In this case the device specific context template(s) will take priority over the device type custom field template.
|
||||
```
|
||||
You can also opt for the default device type custom field behaviour but with the
|
||||
added benefit of overwriting the template should a device in NetBox have a
|
||||
device specific context defined. In this case the device specific context
|
||||
template(s) will take priority over the device type custom field template.
|
||||
|
||||
```python
|
||||
templates_config_context_overrule = True
|
||||
```
|
||||
|
||||
### Tags
|
||||
|
||||
This script can sync host tags to your Zabbix hosts for use in filtering,
|
||||
SLA calculations and event correlation.
|
||||
|
||||
Tags can be synced from the following sources:
|
||||
|
||||
1. NetBox device/vm tags
|
||||
2. NetBox config context
|
||||
3. NetBox fields
|
||||
|
||||
Syncing tags will override any tags that were set manually on the host,
|
||||
making NetBox the single source-of-truth for managing tags.
|
||||
|
||||
To enable syncing, turn on tag_sync in the config file.
|
||||
By default, this script will modify tag names and tag values to lowercase.
|
||||
You can change this behaviour by setting tag_lower to False.
|
||||
|
||||
```python
|
||||
tag_sync = True
|
||||
tag_lower = True
|
||||
```
|
||||
|
||||
#### Device tags
|
||||
|
||||
As NetBox doesn't follow the tag/value pattern for tags, we will need a tag
|
||||
name set to register the netbox tags.
|
||||
|
||||
By default the tag name is "NetBox", but you can change this to whatever you want.
|
||||
The value for the tag can be set to 'name', 'display', or 'slug', which refers to the property of the NetBox tag object that will be used as the value in Zabbix.
|
||||
|
||||
```python
|
||||
tag_name = 'NetBox'
|
||||
tag_value = 'name'
|
||||
```
|
||||
|
||||
#### Config context
|
||||
|
||||
You can supply custom tags via config context by adding the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
"tags": [
|
||||
{
|
||||
"MyTagName": "MyTagValue"
|
||||
},
|
||||
{
|
||||
"environment": "production"
|
||||
}
|
||||
],
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This will allow you to assign tags based on the config context rules.
|
||||
|
||||
#### NetBox Field
|
||||
|
||||
NetBox field can also be used as input for tags, just like inventory and usermacros.
|
||||
To enable syncing from fields, make sure to configure a `device_tag_map` and/or a `vm_tag_map`.
|
||||
|
||||
```python
|
||||
device_tag_map = {"site/name": "site",
|
||||
"rack/name": "rack",
|
||||
"platform/name": "target"}
|
||||
|
||||
vm_tag_map = {"site/name": "site",
|
||||
"cluster/name": "cluster",
|
||||
"platform/name": "target"}
|
||||
```
|
||||
|
||||
To turn off field syncing, set the maps to empty dictionaries:
|
||||
|
||||
```python
|
||||
device_tag_map = {}
|
||||
vm_tag_map = {}
|
||||
```
|
||||
|
||||
|
||||
### Usermacros
|
||||
|
||||
You can choose to use NetBox as a source for Host usermacros by
|
||||
enabling the following option in the configuration file:
|
||||
|
||||
```python
|
||||
usermacro_sync = True
|
||||
```
|
||||
|
||||
Please be advised that enabling this option will _clear_ any usermacros
|
||||
manually set on the managed hosts and override them with the usermacros
|
||||
from NetBox.
|
||||
|
||||
There are two NetBox sources that can be used to populate usermacros:
|
||||
|
||||
1. NetBox config context
|
||||
2. NetBox fields
|
||||
|
||||
#### Config context
|
||||
|
||||
By defining a dictionary `usermacros` within the `zabbix` key in
|
||||
config context, you can dynamically assign usermacro values based on
|
||||
anything that you can target based on
|
||||
[config contexts](https://netboxlabs.com/docs/netbox/en/stable/features/context-data/)
|
||||
within NetBox.
|
||||
|
||||
Through this method, it is possible to define the following types of usermacros:
|
||||
|
||||
1. Text
|
||||
2. Secret
|
||||
3. Vault
|
||||
|
||||
The default macro type is text if no `type` and `value` have been set.
|
||||
It is also possible to create usermacros with
|
||||
[context](https://www.zabbix.com/documentation/7.0/en/manual/config/macros/user_macros_context).
|
||||
|
||||
Examples:
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
"usermacros": {
|
||||
"{$USER_MACRO}": "test value",
|
||||
"{$CONTEXT_MACRO:\"test\"}": "test value",
|
||||
"{$CONTEXT_REGEX_MACRO:regex:\".*\"}": "test value",
|
||||
"{$SECRET_MACRO}": {
|
||||
"type": "secret",
|
||||
"value": "PaSsPhRaSe"
|
||||
},
|
||||
"{$VAULT_MACRO}": {
|
||||
"type": "vault",
|
||||
"value": "secret/vmware:password"
|
||||
},
|
||||
"{$USER_MACRO2}": {
|
||||
"type": "text",
|
||||
"value": "another test value"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Please be aware that secret usermacros are only synced _once_ by default.
|
||||
This is the default behavior because Zabbix API won't return the value of
|
||||
secrets so the script cannot compare the values with those set in NetBox.
|
||||
|
||||
If you update a secret usermacro value, just remove the value from the host
|
||||
in Zabbix and the new value will be synced during the next run.
|
||||
|
||||
Alternatively, you can set the following option in the config file:
|
||||
|
||||
```python
|
||||
usermacro_sync = "full"
|
||||
```
|
||||
|
||||
This will force a full usermacro sync on every run on hosts that have secret usermacros set.
|
||||
That way, you will know for sure the secret values are always up to date.
|
||||
|
||||
Keep in mind that NetBox will show your secrets in plain text.
|
||||
If true secrecy is required, consider switching to
|
||||
[vault](https://www.zabbix.com/documentation/current/en/manual/config/macros/secret_macros#vault-secret)
|
||||
usermacros.
|
||||
|
||||
#### Netbox Fields
|
||||
|
||||
To use NetBox fields as a source for usermacros, you will need to set up usermacro maps
|
||||
for devices and/or virtual machines in the configuration file.
|
||||
This method only supports `text` type usermacros.
|
||||
|
||||
For example:
|
||||
|
||||
```python
|
||||
usermacro_sync = True
|
||||
device_usermacro_map = {"serial": "{$HW_SERIAL}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"}
|
||||
vm_usermacro_map = {"memory": "{$TOTAL_MEMORY}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"}
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Permissions
|
||||
|
||||
### Netbox
|
||||
Make sure that the Netbox user has proper permissions for device read and modify (modify to set the Zabbix HostID custom field) operations. The user should also have read-only access to the device types.
|
||||
### NetBox
|
||||
|
||||
Make sure that the NetBox user has proper permissions for device read and modify
|
||||
(modify to set the Zabbix HostID custom field) operations. The user should also
|
||||
have read-only access to the device types.
|
||||
|
||||
### Zabbix
|
||||
Make sure that the Zabbix user has permissions to read hostgroups and proxy servers. The user should have full rights on creating, modifying and deleting hosts.
|
||||
|
||||
If you want to automatically create hostgroups then the create permission on host-groups should also be applied.
|
||||
Make sure that the Zabbix user has permissions to read hostgroups and proxy
|
||||
servers. The user should have full rights on creating, modifying and deleting
|
||||
hosts.
|
||||
|
||||
If you want to automatically create hostgroups then the create permission on
|
||||
host-groups should also be applied.
|
||||
|
||||
### Custom links
|
||||
To make the user experience easier you could add a custom link that redirects users to the Zabbix latest data.
|
||||
|
||||
To make the user experience easier you could add a custom link that redirects
|
||||
users to the Zabbix latest data.
|
||||
|
||||
```
|
||||
* Name: zabbix_latestData
|
||||
* Text: {% if obj.cf["zabbix_hostid"] %}Show host in Zabbix{% endif %}
|
||||
* URL: http://myzabbixserver.local/zabbix.php?action=latest.view&hostids[]={{ obj.cf["zabbix_hostid"] }}
|
||||
* Text: {% if object.cf["zabbix_hostid"] %}Show host in Zabbix{% endif %}
|
||||
* URL: http://myzabbixserver.local/zabbix.php?action=latest.view&hostids[]={{ object.cf["zabbix_hostid"] }}
|
||||
```
|
||||
|
||||
## Running the script
|
||||
|
||||
```
|
||||
python3 netbox_zabbix_sync.py
|
||||
```
|
||||
|
||||
### Flags
|
||||
| Flag | Option | Description |
|
||||
| ------------ | ------------ | ------------ |
|
||||
| -v | verbose | Log with debugging on. |
|
||||
|
||||
| Flag | Option | Description |
|
||||
| ---- | --------- | ------------------------------------- |
|
||||
| -v | verbose | Log with info on. |
|
||||
| -vv | debug | Log with debugging on. |
|
||||
| -vvv | debug-all | Log with debugging on for all modules |
|
||||
|
||||
## Config context
|
||||
|
||||
### Zabbix proxy
|
||||
|
||||
You can set the proxy for a device using the 'proxy' key in config context.
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
@ -197,31 +609,72 @@ You can set the proxy for a device using the 'proxy' key in config context.
|
||||
}
|
||||
}
|
||||
```
|
||||
Because of the posible amount of destruction when setting up Netbox but forgetting the proxy command, the sync works a bit different. By default everything is synced except in a situation where the Zabbix host has a proxy configured but nothing is configured in Netbox. To force deletion and a full sync, set the full_proxy_sync variable in the config file.
|
||||
|
||||
### Set interface parameters within Netbox
|
||||
When adding a new device, you can set the interface type with custom context. By default, the following configuration is applied when no config context is provided:
|
||||
It is now possible to specify proxy groups with the introduction of Proxy groups
|
||||
in Zabbix 7. Specifying a group in the config context on older Zabbix releases
|
||||
will have no impact and the script will ignore the statement.
|
||||
|
||||
* SNMPv2
|
||||
* UDP 161
|
||||
* Bulk requests enabled
|
||||
* SNMP community: {$SNMP_COMMUNITY}
|
||||
|
||||
Due to Zabbix limitations of changing interface type with a linked template, changing the interface type from within Netbox is not supported and the script will generate an error.
|
||||
|
||||
For example when changing a SNMP interface to an Agent interface:
|
||||
```
|
||||
Netbox-Zabbix-sync - WARNING - Device: Interface OUT of sync.
|
||||
Netbox-Zabbix-sync - ERROR - Device: changing interface type to 1 is not supported.
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
"proxy_group": "yourawesomeproxygroup.local"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To configure the interface parameters you'll need to use custom context. Custom context was used to make this script as customizable as posible for each environment. For example, you could:
|
||||
* Set the custom context directly on a device
|
||||
* Set the custom context on a label, which you would add to a device (for instance, SNMPv3)
|
||||
* Set the custom context on a device role
|
||||
* Set the custom context on a site or region
|
||||
The script will prefer groups when specifying both a proxy and group. This is
|
||||
done with the assumption that groups are more resilient and HA ready, making it
|
||||
a more logical choice to use for proxy linkage. This also makes migrating from a
|
||||
proxy to proxy group easier since the group take priority over the individual
|
||||
proxy.
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
"proxy": "yourawesomeproxy.local",
|
||||
"proxy_group": "yourawesomeproxygroup.local"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
In the example above the host will use the group on Zabbix 7. On Zabbix 6 and
|
||||
below the host will use the proxy. Zabbix 7 will use the proxy value when
|
||||
omitting the proxy_group value.
|
||||
|
||||
### Set interface parameters within NetBox
|
||||
|
||||
When adding a new device, you can set the interface type with custom context. By
|
||||
default, the following configuration is applied when no config context is
|
||||
provided:
|
||||
|
||||
- SNMPv2
|
||||
- UDP 161
|
||||
- Bulk requests enabled
|
||||
- SNMP community: {$SNMP_COMMUNITY}
|
||||
|
||||
Due to Zabbix limitations of changing interface type with a linked template,
|
||||
changing the interface type from within NetBox is not supported and the script
|
||||
will generate an error.
|
||||
|
||||
For example, when changing a SNMP interface to an Agent interface:
|
||||
|
||||
```
|
||||
NetBox-Zabbix-sync - WARNING - Device: Interface OUT of sync.
|
||||
NetBox-Zabbix-sync - ERROR - Device: changing interface type to 1 is not supported.
|
||||
```
|
||||
|
||||
To configure the interface parameters you'll need to use custom context. Custom
|
||||
context was used to make this script as customizable as possible for each
|
||||
environment. For example, you could:
|
||||
|
||||
- Set the custom context directly on a device
|
||||
- Set the custom context on a tag, which you would add to a device (for
|
||||
instance, SNMPv3)
|
||||
- Set the custom context on a device role
|
||||
- Set the custom context on a site or region
|
||||
|
||||
##### Agent interface configuration example
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
@ -230,7 +683,9 @@ To configure the interface parameters you'll need to use custom context. Custom
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### SNMPv2 interface configuration example
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
@ -244,7 +699,9 @@ To configure the interface parameters you'll need to use custom context. Custom
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### SNMPv3 interface configuration example
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
@ -261,6 +718,13 @@ To configure the interface parameters you'll need to use custom context. Custom
|
||||
}
|
||||
```
|
||||
|
||||
I would recommend using macros for sensitive data such as community strings since the data in Netbox is plain-text.
|
||||
I would recommend using usermacros for sensitive data such as community strings
|
||||
since the data in NetBox is plain-text.
|
||||
|
||||
> **_NOTE:_** Not all SNMP data is required for a working configuration.
|
||||
> [The following parameters are allowed](https://www.zabbix.com/documentation/current/manual/api/reference/hostinterface/object#details_tag "The following parameters are allowed") but
|
||||
> are not all required, depending on your environment.
|
||||
|
||||
|
||||
|
||||
|
||||
Note: Not all SNMP data is required for a working configuration. [The following parameters are allowed ](https://www.zabbix.com/documentation/current/manual/api/reference/hostinterface/object#details_tag "The following parameters are allowed ")but are not all required, depending on your environment.
|
@ -1,47 +1,153 @@
|
||||
# Template logic.
|
||||
## Template logic.
|
||||
# Set to true to enable the template source information
|
||||
# coming from config context instead of a custom field.
|
||||
templates_config_context = False
|
||||
# Set to true to give config context templates a
|
||||
|
||||
# Set to true to give config context templates a
|
||||
# higher priority then custom field templates
|
||||
templates_config_context_overrule = False
|
||||
|
||||
# Set template and device Netbox "custom field" names
|
||||
# Set template and device NetBox "custom field" names
|
||||
# Template_cf is not used when templates_config_context is enabled
|
||||
template_cf = "zabbix_template"
|
||||
device_cf = "zabbix_hostid"
|
||||
|
||||
# Enable clustering of devices with virtual chassis setup
|
||||
## Enable clustering of devices with virtual chassis setup
|
||||
clustering = False
|
||||
|
||||
# Enable hostgroup generation. Requires permissions in Zabbix
|
||||
## Enable hostgroup generation. Requires permissions in Zabbix
|
||||
create_hostgroups = True
|
||||
|
||||
# Create journal entries
|
||||
## Create journal entries
|
||||
create_journal = False
|
||||
|
||||
## Virtual machine sync
|
||||
# Set sync_vms to True in order to use this new feature
|
||||
# Use the hostgroup vm_hostgroup_format mapper for specific
|
||||
# hostgroup atributes of VM's such as cluster_type and cluster
|
||||
sync_vms = False
|
||||
# Check the README documentation for values to use in the VM hostgroup format.
|
||||
vm_hostgroup_format = "cluster_type/cluster/role"
|
||||
|
||||
## Proxy Sync
|
||||
# Set to true to enable removal of proxy's under hosts. Use with caution and make sure that you specified
|
||||
# all the required proxy's in the device config context before enabeling this option.
|
||||
# With this option disabled proxy's will only be added and modified for Zabbix hosts.
|
||||
full_proxy_sync = False
|
||||
|
||||
# Netbox to Zabbix device state convertion
|
||||
## NetBox to Zabbix device state convertion
|
||||
zabbix_device_removal = ["Decommissioning", "Inventory"]
|
||||
zabbix_device_disable = ["Offline", "Planned", "Staged", "Failed"]
|
||||
|
||||
# Hostgroup mapping
|
||||
# Available choices: dev_location, dev_role, manufacturer, region, site, site_group, tenant, tenant_group
|
||||
## Hostgroup mapping
|
||||
# See the README documentation for available options
|
||||
# You can also use CF (custom field) names under the device. The CF content will be used for the hostgroup generation.
|
||||
hostgroup_format = "site/manufacturer/dev_role"
|
||||
#
|
||||
# When using region in the group name, the default behaviour is to use name of the directly assigned region.
|
||||
# By setting traverse_regions to True the full path of all parent regions will be used in the hostgroup, e.g.:
|
||||
#
|
||||
# 'Global/Europe/Netherlands/Amsterdam' instead of just 'Amsterdam'.
|
||||
#
|
||||
# traverse_site_groups controls the same behaviour for any assigned site_groups.
|
||||
hostgroup_format = "site/manufacturer/role"
|
||||
traverse_regions = False
|
||||
traverse_site_groups = False
|
||||
|
||||
# Custom filter for device filtering. Variable must be present but can be left empty with no filtering.
|
||||
# A couple of examples are as follows:
|
||||
## Filtering
|
||||
# Custom device filter, variable must be present but can be left empty with no filtering.
|
||||
# A couple of examples:
|
||||
# nb_device_filter = {} #No filter
|
||||
# nb_device_filter = {"tag": "zabbix"} #Use a tag
|
||||
# nb_device_filter = {"site": "HQ-AMS"} #Use a site name
|
||||
# nb_device_filter = {"site": ["HQ-AMS", "HQ-FRA"]} #Device must be in either one of these sites
|
||||
# nb_device_filter = {"site": "HQ-AMS", "tag": "zabbix", "role__n": ["PDU", "console-server"]} #Device must be in site HQ-AMS, have the tag zabbix and must not be part of the PDU or console-server role
|
||||
|
||||
# nb_device_filter = {} #No filter
|
||||
# nb_device_filter = {"tag": "zabbix"} #Use a tag
|
||||
# nb_device_filter = {"site": "HQ-AMS"} #Use a site name
|
||||
# nb_device_filter = {"site": ["HQ-AMS", "HQ-FRA"]} #Device must be in either one of these sites
|
||||
# nb_device_filter = {"site": "HQ-AMS", "tag": "zabbix", "role__n": ["PDU", "console-server"]} #Device must be in site HQ-AMS, have the tag zabbix and must not be part of the PDU or console-server role
|
||||
# Default device filter, only get devices which have a name in NetBox:
|
||||
nb_device_filter = {"name__n": "null"}
|
||||
# Default filter for VMs
|
||||
nb_vm_filter = {"name__n": "null"}
|
||||
|
||||
# Default device filter, only get devices which have a name in Netbox.
|
||||
nb_device_filter = {"name__n": "null"}
|
||||
## Inventory
|
||||
# See https://www.zabbix.com/documentation/current/en/manual/config/hosts/inventory#building-inventory
|
||||
# Choice between disabled, manual or automatic.
|
||||
# Make sure to select at least manual or automatic in use with the inventory_sync function.
|
||||
inventory_mode = "disabled"
|
||||
|
||||
# To allow syncing of NetBox device properties, set inventory_sync to True
|
||||
inventory_sync = False
|
||||
|
||||
# inventory_map is used to map NetBox properties to Zabbix Inventory fields.
|
||||
# For nested properties, you can use the '/' seperator.
|
||||
# For example, the following map will assign the custom field 'mycustomfield' to the 'alias' Zabbix inventory field:
|
||||
#
|
||||
# device_inventory_map = { "custom_fields/mycustomfield/name": "alias"}
|
||||
#
|
||||
# The following maps should provide some nice defaults:
|
||||
device_inventory_map = { "asset_tag": "asset_tag",
|
||||
"virtual_chassis/name": "chassis",
|
||||
"status/label": "deployment_status",
|
||||
"location/name": "location",
|
||||
"latitude": "location_lat",
|
||||
"longitude": "location_lon",
|
||||
"comments": "notes",
|
||||
"name": "name",
|
||||
"rack/name": "site_rack",
|
||||
"serial": "serialno_a",
|
||||
"device_type/model": "type",
|
||||
"device_type/manufacturer/name": "vendor",
|
||||
"oob_ip/address": "oob_ip" }
|
||||
|
||||
# We also support inventory mapping on Virtual Machines.
|
||||
vm_inventory_map = { "status/label": "deployment_status",
|
||||
"comments": "notes",
|
||||
"name": "name" }
|
||||
|
||||
# To allow syncing of usermacros from NetBox, set to True.
|
||||
# this will enable both field mapping and config context usermacros.
|
||||
#
|
||||
# If set to "full", it will force the update of secret usermacros every run.
|
||||
# Please see the README.md for more information.
|
||||
usermacro_sync = False
|
||||
|
||||
# device usermacro_map to map NetBox fields to usermacros.
|
||||
device_usermacro_map = {"serial": "{$HW_SERIAL}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"}
|
||||
|
||||
# virtual machine usermacro_map to map NetBox fields to usermacros.
|
||||
vm_usermacro_map = {"memory": "{$TOTAL_MEMORY}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"}
|
||||
|
||||
# To sync host tags to Zabbix, set to True.
|
||||
tag_sync = False
|
||||
|
||||
# Setting tag_lower to True will lower capital letters ain tag names and values
|
||||
# This is more inline with the Zabbix way of working with tags.
|
||||
#
|
||||
# You can however set this to False to ensure capital letters are synced to Zabbix tags.
|
||||
tag_lower = True
|
||||
|
||||
# We can sync NetBox device/VM tags to Zabbix, but as NetBox tags don't follow the key/value
|
||||
# pattern, we need to specify a tag name to register the NetBox tags in Zabbix.
|
||||
#
|
||||
#
|
||||
#
|
||||
# If tag_name is set to False, we won't sync NetBox device/VM tags to Zabbix.
|
||||
tag_name = 'NetBox'
|
||||
|
||||
# We can choose to use 'name', 'slug' or 'display' NetBox tag properties as a value in Zabbix.
|
||||
# 'name'is used by default.
|
||||
tag_value = "name"
|
||||
|
||||
# device tag_map to map NetBox fields to host tags.
|
||||
device_tag_map = {"site/name": "site",
|
||||
"rack/name": "rack",
|
||||
"platform/name": "target"}
|
||||
|
||||
# Virtual machine tag_map to map NetBox fields to host tags.
|
||||
vm_tag_map = {"site/name": "site",
|
||||
"cluster/name": "cluster",
|
||||
"platform/name": "target"}
|
||||
|
128
modules/config.py
Normal file
128
modules/config.py
Normal file
@ -0,0 +1,128 @@
|
||||
"""
|
||||
Module for parsing configuration from the top level config.py file
|
||||
"""
|
||||
from pathlib import Path
|
||||
from importlib import util
|
||||
from os import environ, path
|
||||
from logging import getLogger
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
# PLEASE NOTE: This is a sample config file. Please do NOT make any edits in this file!
|
||||
# You should create your own config.py and it will overwrite the default config.
|
||||
|
||||
DEFAULT_CONFIG = {
|
||||
"templates_config_context": False,
|
||||
"templates_config_context_overrule": False,
|
||||
"template_cf": "zabbix_template",
|
||||
"device_cf": "zabbix_hostid",
|
||||
"clustering": False,
|
||||
"create_hostgroups": True,
|
||||
"create_journal": False,
|
||||
"sync_vms": False,
|
||||
"vm_hostgroup_format": "cluster_type/cluster/role",
|
||||
"full_proxy_sync": False,
|
||||
"zabbix_device_removal": ["Decommissioning", "Inventory"],
|
||||
"zabbix_device_disable": ["Offline", "Planned", "Staged", "Failed"],
|
||||
"hostgroup_format": "site/manufacturer/role",
|
||||
"traverse_regions": False,
|
||||
"traverse_site_groups": False,
|
||||
"nb_device_filter": {"name__n": "null"},
|
||||
"nb_vm_filter": {"name__n": "null"},
|
||||
"inventory_mode": "disabled",
|
||||
"inventory_sync": False,
|
||||
"device_inventory_map": {
|
||||
"asset_tag": "asset_tag",
|
||||
"virtual_chassis/name": "chassis",
|
||||
"status/label": "deployment_status",
|
||||
"location/name": "location",
|
||||
"latitude": "location_lat",
|
||||
"longitude": "location_lon",
|
||||
"comments": "notes",
|
||||
"name": "name",
|
||||
"rack/name": "site_rack",
|
||||
"serial": "serialno_a",
|
||||
"device_type/model": "type",
|
||||
"device_type/manufacturer/name": "vendor",
|
||||
"oob_ip/address": "oob_ip"
|
||||
},
|
||||
"vm_inventory_map": {
|
||||
"status/label": "deployment_status",
|
||||
"comments": "notes",
|
||||
"name": "name"
|
||||
},
|
||||
"usermacro_sync": False,
|
||||
"device_usermacro_map": {
|
||||
"serial": "{$HW_SERIAL}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"
|
||||
},
|
||||
"vm_usermacro_map": {
|
||||
"memory": "{$TOTAL_MEMORY}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"
|
||||
},
|
||||
"tag_sync": False,
|
||||
"tag_lower": True,
|
||||
"tag_name": 'NetBox',
|
||||
"tag_value": "name",
|
||||
"device_tag_map": {
|
||||
"site/name": "site",
|
||||
"rack/name": "rack",
|
||||
"platform/name": "target"
|
||||
},
|
||||
"vm_tag_map": {
|
||||
"site/name": "site",
|
||||
"cluster/name": "cluster",
|
||||
"platform/name": "target"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def load_config():
|
||||
"""Returns combined config from all sources"""
|
||||
# Overwrite default config with config.py
|
||||
conf = load_config_file(config_default=DEFAULT_CONFIG)
|
||||
# Overwrite default config and config.py with environment variables
|
||||
for key in conf:
|
||||
value_setting = load_env_variable(key)
|
||||
if value_setting is not None:
|
||||
conf[key] = value_setting
|
||||
return conf
|
||||
|
||||
|
||||
def load_env_variable(config_environvar):
|
||||
"""Returns config from environment variable"""
|
||||
prefix = "NBZX_"
|
||||
config_environvar = prefix + config_environvar.upper()
|
||||
if config_environvar in environ:
|
||||
return environ[config_environvar]
|
||||
return None
|
||||
|
||||
|
||||
def load_config_file(config_default, config_file="config.py"):
|
||||
"""Returns config from config.py file"""
|
||||
# Find the script path and config file next to it.
|
||||
script_dir = path.dirname(path.dirname(path.abspath(__file__)))
|
||||
config_path = Path(path.join(script_dir, config_file))
|
||||
|
||||
# If the script directory is not found, try the current working directory
|
||||
if not config_path.exists():
|
||||
config_path = Path(config_file)
|
||||
|
||||
# If both checks fail then fallback to the default config
|
||||
if not config_path.exists():
|
||||
return config_default
|
||||
|
||||
dconf = config_default.copy()
|
||||
# Dynamically import the config module
|
||||
spec = util.spec_from_file_location("config", config_path)
|
||||
config_module = util.module_from_spec(spec)
|
||||
spec.loader.exec_module(config_module)
|
||||
# Update DEFAULT_CONFIG with variables from the config module
|
||||
for key in dconf:
|
||||
if hasattr(config_module, key):
|
||||
dconf[key] = getattr(config_module, key)
|
||||
return dconf
|
956
modules/device.py
Normal file
956
modules/device.py
Normal file
@ -0,0 +1,956 @@
|
||||
# pylint: disable=invalid-name, logging-not-lazy, too-many-locals, logging-fstring-interpolation, too-many-lines, too-many-public-methods, duplicate-code
|
||||
"""
|
||||
Device specific handeling for NetBox to Zabbix
|
||||
"""
|
||||
|
||||
from copy import deepcopy
|
||||
from logging import getLogger
|
||||
from re import search
|
||||
from operator import itemgetter
|
||||
|
||||
from zabbix_utils import APIRequestError
|
||||
from pynetbox import RequestError as NetboxRequestError
|
||||
|
||||
from modules.exceptions import (
|
||||
InterfaceConfigError,
|
||||
SyncExternalError,
|
||||
SyncInventoryError,
|
||||
TemplateError,
|
||||
)
|
||||
from modules.hostgroups import Hostgroup
|
||||
from modules.interface import ZabbixInterface
|
||||
from modules.tags import ZabbixTags
|
||||
from modules.tools import field_mapper, remove_duplicates, sanatize_log_output
|
||||
from modules.usermacros import ZabbixUsermacros
|
||||
from modules.config import load_config
|
||||
|
||||
config = load_config()
|
||||
|
||||
class PhysicalDevice:
|
||||
# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments
|
||||
"""
|
||||
Represents Network device.
|
||||
INPUT: (NetBox device class, ZabbixAPI class, journal flag, NB journal class)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, nb, zabbix, nb_journal_class, nb_version, journal=None, logger=None
|
||||
):
|
||||
self.nb = nb
|
||||
self.id = nb.id
|
||||
self.name = nb.name
|
||||
self.visible_name = None
|
||||
self.status = nb.status.label
|
||||
self.zabbix = zabbix
|
||||
self.zabbix_id = None
|
||||
self.group_ids = []
|
||||
self.nb_api_version = nb_version
|
||||
self.zbx_template_names = []
|
||||
self.zbx_templates = []
|
||||
self.hostgroups = []
|
||||
self.tenant = nb.tenant
|
||||
self.config_context = nb.config_context
|
||||
self.zbxproxy = None
|
||||
self.zabbix_state = 0
|
||||
self.journal = journal
|
||||
self.nb_journals = nb_journal_class
|
||||
self.inventory_mode = -1
|
||||
self.inventory = {}
|
||||
self.usermacros = []
|
||||
self.tags = {}
|
||||
self.logger = logger if logger else getLogger(__name__)
|
||||
self._setBasics()
|
||||
|
||||
def __repr__(self):
|
||||
return self.name
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def _inventory_map(self):
|
||||
"""Use device inventory maps"""
|
||||
return config["device_inventory_map"]
|
||||
|
||||
def _usermacro_map(self):
|
||||
"""Use device inventory maps"""
|
||||
return config["device_usermacro_map"]
|
||||
|
||||
def _tag_map(self):
|
||||
"""Use device host tag maps"""
|
||||
return config["device_tag_map"]
|
||||
|
||||
def _setBasics(self):
|
||||
"""
|
||||
Sets basic information like IP address.
|
||||
"""
|
||||
# Return error if device does not have primary IP.
|
||||
if self.nb.primary_ip:
|
||||
self.cidr = self.nb.primary_ip.address
|
||||
self.ip = self.cidr.split("/")[0]
|
||||
else:
|
||||
e = f"Host {self.name}: no primary IP."
|
||||
self.logger.warning(e)
|
||||
raise SyncInventoryError(e)
|
||||
|
||||
# Check if device has custom field for ZBX ID
|
||||
if config["device_cf"] in self.nb.custom_fields:
|
||||
self.zabbix_id = self.nb.custom_fields[config["device_cf"]]
|
||||
else:
|
||||
e = f'Host {self.name}: Custom field {config["device_cf"]} not present'
|
||||
self.logger.warning(e)
|
||||
raise SyncInventoryError(e)
|
||||
|
||||
# Validate hostname format.
|
||||
odd_character_list = ["ä", "ö", "ü", "Ä", "Ö", "Ü", "ß"]
|
||||
self.use_visible_name = False
|
||||
if any(letter in self.name for letter in odd_character_list) or bool(
|
||||
search("[\u0400-\u04ff]", self.name)
|
||||
):
|
||||
self.name = f"NETBOX_ID{self.id}"
|
||||
self.visible_name = self.nb.name
|
||||
self.use_visible_name = True
|
||||
self.logger.info(
|
||||
f"Host {self.visible_name} contains special characters. "
|
||||
f"Using {self.name} as name for the NetBox object "
|
||||
f"and using {self.visible_name} as visible name in Zabbix."
|
||||
)
|
||||
else:
|
||||
pass
|
||||
|
||||
def set_hostgroup(self, hg_format, nb_site_groups, nb_regions):
|
||||
"""Set the hostgroup for this device"""
|
||||
# Create new Hostgroup instance
|
||||
hg = Hostgroup(
|
||||
"dev",
|
||||
self.nb,
|
||||
self.nb_api_version,
|
||||
logger=self.logger,
|
||||
nested_sitegroup_flag=config['traverse_site_groups'],
|
||||
nested_region_flag=config['traverse_regions'],
|
||||
nb_groups=nb_site_groups,
|
||||
nb_regions=nb_regions,
|
||||
)
|
||||
# Generate hostgroup based on hostgroup format
|
||||
if isinstance(hg_format, list):
|
||||
self.hostgroups = [hg.generate(f) for f in hg_format]
|
||||
else:
|
||||
self.hostgroups.append(hg.generate(hg_format))
|
||||
|
||||
def set_template(self, prefer_config_context, overrule_custom):
|
||||
"""Set Template"""
|
||||
self.zbx_template_names = None
|
||||
# Gather templates ONLY from the device specific context
|
||||
if prefer_config_context:
|
||||
try:
|
||||
self.zbx_template_names = self.get_templates_context()
|
||||
except TemplateError as e:
|
||||
self.logger.warning(e)
|
||||
return True
|
||||
# Gather templates from the custom field but overrule
|
||||
# them should there be any device specific templates
|
||||
if overrule_custom:
|
||||
try:
|
||||
self.zbx_template_names = self.get_templates_context()
|
||||
except TemplateError:
|
||||
pass
|
||||
if not self.zbx_template_names:
|
||||
self.zbx_template_names = self.get_templates_cf()
|
||||
return True
|
||||
# Gather templates ONLY from the custom field
|
||||
self.zbx_template_names = self.get_templates_cf()
|
||||
return True
|
||||
|
||||
def get_templates_cf(self):
|
||||
"""Get template from custom field"""
|
||||
# Get Zabbix templates from the device type
|
||||
device_type_cfs = self.nb.device_type.custom_fields
|
||||
# Check if the ZBX Template CF is present
|
||||
if config["template_cf"] in device_type_cfs:
|
||||
# Set value to template
|
||||
return [device_type_cfs[config["template_cf"]]]
|
||||
# Custom field not found, return error
|
||||
e = (
|
||||
f"Custom field {config['template_cf']} not "
|
||||
f"found for {self.nb.device_type.manufacturer.name}"
|
||||
f" - {self.nb.device_type.display}."
|
||||
)
|
||||
self.logger.warning(e)
|
||||
raise TemplateError(e)
|
||||
|
||||
|
||||
|
||||
def get_templates_context(self):
|
||||
"""Get Zabbix templates from the device context"""
|
||||
if "zabbix" not in self.config_context:
|
||||
e = (
|
||||
f"Host {self.name}: Key 'zabbix' not found in config "
|
||||
"context for template lookup"
|
||||
)
|
||||
raise TemplateError(e)
|
||||
if "templates" not in self.config_context["zabbix"]:
|
||||
e = (
|
||||
f"Host {self.name}: Key 'templates' not found in config "
|
||||
"context 'zabbix' for template lookup"
|
||||
)
|
||||
raise TemplateError(e)
|
||||
# Check if format is list or string.
|
||||
if isinstance(self.config_context["zabbix"]["templates"], str):
|
||||
return [self.config_context["zabbix"]["templates"]]
|
||||
return self.config_context["zabbix"]["templates"]
|
||||
|
||||
def set_inventory(self, nbdevice):
|
||||
"""Set host inventory"""
|
||||
# Set inventory mode. Default is disabled (see class init function).
|
||||
if config["inventory_mode"] == "disabled":
|
||||
if config["inventory_sync"]:
|
||||
self.logger.error(f"Host {self.name}: Unable to map NetBox inventory to Zabbix. "
|
||||
"Inventory sync is enabled in "
|
||||
"config but inventory mode is disabled.")
|
||||
return True
|
||||
if config["inventory_mode"] == "manual":
|
||||
self.inventory_mode = 0
|
||||
elif config["inventory_mode"] == "automatic":
|
||||
self.inventory_mode = 1
|
||||
else:
|
||||
self.logger.error(
|
||||
f"Host {self.name}: Specified value for inventory mode in"
|
||||
f" config is not valid. Got value {config['inventory_mode']}"
|
||||
)
|
||||
return False
|
||||
self.inventory = {}
|
||||
if config["inventory_sync"] and self.inventory_mode in [0, 1]:
|
||||
self.logger.debug(f"Host {self.name}: Starting inventory mapper")
|
||||
self.inventory = field_mapper(
|
||||
self.name, self._inventory_map(), nbdevice, self.logger
|
||||
)
|
||||
return True
|
||||
|
||||
def isCluster(self):
|
||||
"""
|
||||
Checks if device is part of cluster.
|
||||
"""
|
||||
return bool(self.nb.virtual_chassis)
|
||||
|
||||
def getClusterMaster(self):
|
||||
"""
|
||||
Returns chassis master ID.
|
||||
"""
|
||||
if not self.isCluster():
|
||||
e = (
|
||||
f"Unable to proces {self.name} for cluster calculation: "
|
||||
f"not part of a cluster."
|
||||
)
|
||||
self.logger.warning(e)
|
||||
raise SyncInventoryError(e)
|
||||
if not self.nb.virtual_chassis.master:
|
||||
e = (
|
||||
f"{self.name} is part of a NetBox virtual chassis which does "
|
||||
"not have a master configured. Skipping for this reason."
|
||||
)
|
||||
self.logger.error(e)
|
||||
raise SyncInventoryError(e)
|
||||
return self.nb.virtual_chassis.master.id
|
||||
|
||||
def promoteMasterDevice(self):
|
||||
"""
|
||||
If device is Primary in cluster,
|
||||
promote device name to the cluster name.
|
||||
Returns True if succesfull, returns False if device is secondary.
|
||||
"""
|
||||
masterid = self.getClusterMaster()
|
||||
if masterid == self.id:
|
||||
self.logger.debug(
|
||||
f"Host {self.name} is primary cluster member. "
|
||||
f"Modifying hostname from {self.name} to "
|
||||
+ f"{self.nb.virtual_chassis.name}."
|
||||
)
|
||||
self.name = self.nb.virtual_chassis.name
|
||||
return True
|
||||
self.logger.debug(f"Host {self.name} is non-primary cluster member.")
|
||||
return False
|
||||
|
||||
def zbxTemplatePrepper(self, templates):
|
||||
"""
|
||||
Returns Zabbix template IDs
|
||||
INPUT: list of templates from Zabbix
|
||||
OUTPUT: True
|
||||
"""
|
||||
# Check if there are templates defined
|
||||
if not self.zbx_template_names:
|
||||
e = f"Host {self.name}: No templates found"
|
||||
self.logger.info(e)
|
||||
raise SyncInventoryError()
|
||||
# Set variable to empty list
|
||||
self.zbx_templates = []
|
||||
# Go through all templates definded in NetBox
|
||||
for nb_template in self.zbx_template_names:
|
||||
template_match = False
|
||||
# Go through all templates found in Zabbix
|
||||
for zbx_template in templates:
|
||||
# If the template names match
|
||||
if zbx_template["name"] == nb_template:
|
||||
# Set match variable to true, add template details
|
||||
# to class variable and return debug log
|
||||
template_match = True
|
||||
self.zbx_templates.append(
|
||||
{
|
||||
"templateid": zbx_template["templateid"],
|
||||
"name": zbx_template["name"],
|
||||
}
|
||||
)
|
||||
e = f"Host {self.name}: found template {zbx_template['name']}"
|
||||
self.logger.debug(e)
|
||||
# Return error should the template not be found in Zabbix
|
||||
if not template_match:
|
||||
e = (
|
||||
f"Unable to find template {nb_template} "
|
||||
f"for host {self.name} in Zabbix. Skipping host..."
|
||||
)
|
||||
self.logger.warning(e)
|
||||
raise SyncInventoryError(e)
|
||||
|
||||
def setZabbixGroupID(self, groups):
|
||||
"""
|
||||
Sets Zabbix group ID as instance variable
|
||||
INPUT: list of hostgroups
|
||||
OUTPUT: True / False
|
||||
"""
|
||||
# Go through all groups
|
||||
for hg in self.hostgroups:
|
||||
for group in groups:
|
||||
if group["name"] == hg:
|
||||
self.group_ids.append({"groupid": group["groupid"]})
|
||||
e = (
|
||||
f"Host {self.name}: matched group "
|
||||
f"\"{group['name']}\" (ID:{group['groupid']})"
|
||||
)
|
||||
self.logger.debug(e)
|
||||
if len(self.group_ids) == len(self.hostgroups):
|
||||
return True
|
||||
return False
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
Removes device from external resources.
|
||||
Resets custom fields in NetBox.
|
||||
"""
|
||||
if self.zabbix_id:
|
||||
try:
|
||||
# Check if the Zabbix host exists in Zabbix
|
||||
zbx_host = bool(
|
||||
self.zabbix.host.get(filter={"hostid": self.zabbix_id}, output=[])
|
||||
)
|
||||
e = (
|
||||
f"Host {self.name}: was already deleted from Zabbix."
|
||||
" Removed link in NetBox."
|
||||
)
|
||||
if zbx_host:
|
||||
# Delete host should it exists
|
||||
self.zabbix.host.delete(self.zabbix_id)
|
||||
e = f"Host {self.name}: Deleted host from Zabbix."
|
||||
self._zeroize_cf()
|
||||
self.logger.info(e)
|
||||
self.create_journal_entry("warning", "Deleted host from Zabbix")
|
||||
except APIRequestError as e:
|
||||
message = f"Zabbix returned the following error: {str(e)}."
|
||||
self.logger.error(message)
|
||||
raise SyncExternalError(message) from e
|
||||
|
||||
def _zeroize_cf(self):
|
||||
"""Sets the hostID custom field in NetBox to zero,
|
||||
effectively destroying the link"""
|
||||
self.nb.custom_fields[config["device_cf"]] = None
|
||||
self.nb.save()
|
||||
|
||||
def _zabbixHostnameExists(self):
|
||||
"""
|
||||
Checks if hostname exists in Zabbix.
|
||||
"""
|
||||
# Validate the hostname or visible name field
|
||||
if not self.use_visible_name:
|
||||
zbx_filter = {"host": self.name}
|
||||
else:
|
||||
zbx_filter = {"name": self.visible_name}
|
||||
host = self.zabbix.host.get(filter=zbx_filter, output=[])
|
||||
return bool(host)
|
||||
|
||||
def setInterfaceDetails(self):
|
||||
"""
|
||||
Checks interface parameters from NetBox and
|
||||
creates a model for the interface to be used in Zabbix.
|
||||
"""
|
||||
try:
|
||||
# Initiate interface class
|
||||
interface = ZabbixInterface(self.nb.config_context, self.ip)
|
||||
# Check if NetBox has device context.
|
||||
# If not fall back to old config.
|
||||
if interface.get_context():
|
||||
# If device is SNMP type, add aditional information.
|
||||
if interface.interface["type"] == 2:
|
||||
interface.set_snmp()
|
||||
else:
|
||||
interface.set_default_snmp()
|
||||
return [interface.interface]
|
||||
except InterfaceConfigError as e:
|
||||
message = f"{self.name}: {e}"
|
||||
self.logger.warning(message)
|
||||
raise SyncInventoryError(message) from e
|
||||
|
||||
def set_usermacros(self):
|
||||
"""
|
||||
Generates Usermacros
|
||||
"""
|
||||
macros = ZabbixUsermacros(
|
||||
self.nb,
|
||||
self._usermacro_map(),
|
||||
config['usermacro_sync'],
|
||||
logger=self.logger,
|
||||
host=self.name,
|
||||
)
|
||||
if macros.sync is False:
|
||||
self.usermacros = []
|
||||
return True
|
||||
|
||||
self.usermacros = macros.generate()
|
||||
return True
|
||||
|
||||
def set_tags(self):
|
||||
"""
|
||||
Generates Host Tags
|
||||
"""
|
||||
tags = ZabbixTags(
|
||||
self.nb,
|
||||
self._tag_map(),
|
||||
config['tag_sync'],
|
||||
config['tag_lower'],
|
||||
tag_name=config['tag_name'],
|
||||
tag_value=config['tag_value'],
|
||||
logger=self.logger,
|
||||
host=self.name,
|
||||
)
|
||||
if tags.sync is False:
|
||||
self.tags = []
|
||||
|
||||
self.tags = tags.generate()
|
||||
return True
|
||||
|
||||
def setProxy(self, proxy_list):
|
||||
"""
|
||||
Sets proxy or proxy group if this
|
||||
value has been defined in config context
|
||||
|
||||
input: List of all proxies and proxy groups in standardized format
|
||||
"""
|
||||
# check if the key Zabbix is defined in the config context
|
||||
if "zabbix" not in self.nb.config_context:
|
||||
return False
|
||||
if (
|
||||
"proxy" in self.nb.config_context["zabbix"]
|
||||
and not self.nb.config_context["zabbix"]["proxy"]
|
||||
):
|
||||
return False
|
||||
# Proxy group takes priority over a proxy due
|
||||
# to it being HA and therefore being more reliable
|
||||
# Includes proxy group fix since Zabbix <= 6 should ignore this
|
||||
proxy_types = ["proxy"]
|
||||
if str(self.zabbix.version).startswith("7"):
|
||||
# Only insert groups in front of list for Zabbix7
|
||||
proxy_types.insert(0, "proxy_group")
|
||||
for proxy_type in proxy_types:
|
||||
# Check if the key exists in NetBox CC
|
||||
if proxy_type in self.nb.config_context["zabbix"]:
|
||||
proxy_name = self.nb.config_context["zabbix"][proxy_type]
|
||||
# go through all proxies
|
||||
for proxy in proxy_list:
|
||||
# If the proxy does not match the type, ignore and continue
|
||||
if not proxy["type"] == proxy_type:
|
||||
continue
|
||||
# If the proxy name matches
|
||||
if proxy["name"] == proxy_name:
|
||||
self.logger.debug(
|
||||
f"Host {self.name}: using {proxy['type']}" f" {proxy_name}"
|
||||
)
|
||||
self.zbxproxy = proxy
|
||||
return True
|
||||
self.logger.warning(
|
||||
f"Host {self.name}: unable to find proxy {proxy_name}"
|
||||
)
|
||||
return False
|
||||
|
||||
def createInZabbix(
|
||||
self,
|
||||
groups,
|
||||
templates,
|
||||
proxies,
|
||||
description="Host added by NetBox sync script.",
|
||||
):
|
||||
"""
|
||||
Creates Zabbix host object with parameters from NetBox object.
|
||||
"""
|
||||
# Check if hostname is already present in Zabbix
|
||||
if not self._zabbixHostnameExists():
|
||||
# Set group and template ID's for host
|
||||
if not self.setZabbixGroupID(groups):
|
||||
e = (
|
||||
f"Unable to find group '{self.hostgroup}' "
|
||||
f"for host {self.name} in Zabbix."
|
||||
)
|
||||
self.logger.warning(e)
|
||||
raise SyncInventoryError(e)
|
||||
self.zbxTemplatePrepper(templates)
|
||||
templateids = []
|
||||
for template in self.zbx_templates:
|
||||
templateids.append({"templateid": template["templateid"]})
|
||||
# Set interface, group and template configuration
|
||||
interfaces = self.setInterfaceDetails()
|
||||
groups = self.group_ids
|
||||
# Set Zabbix proxy if defined
|
||||
self.setProxy(proxies)
|
||||
# Set basic data for host creation
|
||||
create_data = {
|
||||
"host": self.name,
|
||||
"name": self.visible_name,
|
||||
"status": self.zabbix_state,
|
||||
"interfaces": interfaces,
|
||||
"groups": groups,
|
||||
"templates": templateids,
|
||||
"description": description,
|
||||
"inventory_mode": self.inventory_mode,
|
||||
"inventory": self.inventory,
|
||||
"macros": self.usermacros,
|
||||
"tags": self.tags,
|
||||
}
|
||||
# If a Zabbix proxy or Zabbix Proxy group has been defined
|
||||
if self.zbxproxy:
|
||||
# If a lower version than 7 is used, we can assume that
|
||||
# the proxy is a normal proxy and not a proxy group
|
||||
if not str(self.zabbix.version).startswith("7"):
|
||||
create_data["proxy_hostid"] = self.zbxproxy["id"]
|
||||
else:
|
||||
# Configure either a proxy or proxy group
|
||||
create_data[self.zbxproxy["idtype"]] = self.zbxproxy["id"]
|
||||
create_data["monitored_by"] = self.zbxproxy["monitored_by"]
|
||||
# Add host to Zabbix
|
||||
try:
|
||||
host = self.zabbix.host.create(**create_data)
|
||||
self.zabbix_id = host["hostids"][0]
|
||||
except APIRequestError as e:
|
||||
msg = f"Host {self.name}: Couldn't create. Zabbix returned {str(e)}."
|
||||
self.logger.error(msg)
|
||||
raise SyncExternalError(msg) from e
|
||||
# Set NetBox custom field to hostID value.
|
||||
self.nb.custom_fields[config["device_cf"]] = int(self.zabbix_id)
|
||||
self.nb.save()
|
||||
msg = f"Host {self.name}: Created host in Zabbix."
|
||||
self.logger.info(msg)
|
||||
self.create_journal_entry("success", msg)
|
||||
else:
|
||||
self.logger.error(
|
||||
f"Host {self.name}: Unable to add to Zabbix. Host already present."
|
||||
)
|
||||
|
||||
def createZabbixHostgroup(self, hostgroups):
|
||||
"""
|
||||
Creates Zabbix host group based on hostgroup format.
|
||||
Creates multiple when using a nested format.
|
||||
"""
|
||||
final_data = []
|
||||
# Check if the hostgroup is in a nested format and check each parent
|
||||
for hostgroup in self.hostgroups:
|
||||
for pos in range(len(hostgroup.split("/"))):
|
||||
zabbix_hg = hostgroup.rsplit("/", pos)[0]
|
||||
if self.lookupZabbixHostgroup(hostgroups, zabbix_hg):
|
||||
# Hostgroup already exists
|
||||
continue
|
||||
# Create new group
|
||||
try:
|
||||
# API call to Zabbix
|
||||
groupid = self.zabbix.hostgroup.create(name=zabbix_hg)
|
||||
e = f"Hostgroup '{zabbix_hg}': created in Zabbix."
|
||||
self.logger.info(e)
|
||||
# Add group to final data
|
||||
final_data.append(
|
||||
{"groupid": groupid["groupids"][0], "name": zabbix_hg}
|
||||
)
|
||||
except APIRequestError as e:
|
||||
msg = f"Hostgroup '{zabbix_hg}': unable to create. Zabbix returned {str(e)}."
|
||||
self.logger.error(msg)
|
||||
raise SyncExternalError(msg) from e
|
||||
return final_data
|
||||
|
||||
def lookupZabbixHostgroup(self, group_list, lookup_group):
|
||||
"""
|
||||
Function to check if a hostgroup
|
||||
exists in a list of Zabbix hostgroups
|
||||
INPUT: Group list and group lookup
|
||||
OUTPUT: Boolean
|
||||
"""
|
||||
for group in group_list:
|
||||
if group["name"] == lookup_group:
|
||||
return True
|
||||
return False
|
||||
|
||||
def updateZabbixHost(self, **kwargs):
|
||||
"""
|
||||
Updates Zabbix host with given parameters.
|
||||
INPUT: Key word arguments for Zabbix host object.
|
||||
"""
|
||||
try:
|
||||
self.zabbix.host.update(hostid=self.zabbix_id, **kwargs)
|
||||
except APIRequestError as e:
|
||||
e = (
|
||||
f"Host {self.name}: Unable to update. "
|
||||
f"Zabbix returned the following error: {str(e)}."
|
||||
)
|
||||
self.logger.error(e)
|
||||
raise SyncExternalError(e) from None
|
||||
self.logger.info(f"Host {self.name}: updated with data {sanatize_log_output(kwargs)}.")
|
||||
self.create_journal_entry("info", "Updated host in Zabbix with latest NB data.")
|
||||
|
||||
def ConsistencyCheck(
|
||||
self, groups, templates, proxies, proxy_power, create_hostgroups
|
||||
):
|
||||
# pylint: disable=too-many-branches, too-many-statements
|
||||
"""
|
||||
Checks if Zabbix object is still valid with NetBox parameters.
|
||||
"""
|
||||
# If group is found or if the hostgroup is nested
|
||||
if not self.setZabbixGroupID(groups): # or len(self.hostgroups.split("/")) > 1:
|
||||
if create_hostgroups:
|
||||
# Script is allowed to create a new hostgroup
|
||||
new_groups = self.createZabbixHostgroup(groups)
|
||||
for group in new_groups:
|
||||
# Add all new groups to the list of groups
|
||||
groups.append(group)
|
||||
# check if the initial group was not already found (and this is a nested folder check)
|
||||
if not self.group_ids:
|
||||
# Function returns true / false but also sets GroupID
|
||||
if not self.setZabbixGroupID(groups) and not create_hostgroups:
|
||||
e = (
|
||||
f"Host {self.name}: different hostgroup is required but "
|
||||
"unable to create hostgroup without generation permission."
|
||||
)
|
||||
self.logger.warning(e)
|
||||
raise SyncInventoryError(e)
|
||||
#if self.group_ids:
|
||||
# self.group_ids.append(self.pri_group_id)
|
||||
|
||||
# Prepare templates and proxy config
|
||||
self.zbxTemplatePrepper(templates)
|
||||
self.setProxy(proxies)
|
||||
# Get host object from Zabbix
|
||||
host = self.zabbix.host.get(
|
||||
filter={"hostid": self.zabbix_id},
|
||||
selectInterfaces=["type", "ip", "port", "details", "interfaceid"],
|
||||
selectGroups=["groupid"],
|
||||
selectHostGroups=["groupid"],
|
||||
selectParentTemplates=["templateid"],
|
||||
selectInventory=list(self._inventory_map().values()),
|
||||
selectMacros=["macro", "value", "type", "description"],
|
||||
selectTags=["tag", "value"],
|
||||
)
|
||||
if len(host) > 1:
|
||||
e = (
|
||||
f"Got {len(host)} results for Zabbix hosts "
|
||||
f"with ID {self.zabbix_id} - hostname {self.name}."
|
||||
)
|
||||
self.logger.error(e)
|
||||
raise SyncInventoryError(e)
|
||||
if len(host) == 0:
|
||||
e = (
|
||||
f"Host {self.name}: No Zabbix host found. "
|
||||
f"This is likely the result of a deleted Zabbix host "
|
||||
f"without zeroing the ID field in NetBox."
|
||||
)
|
||||
self.logger.error(e)
|
||||
raise SyncInventoryError(e)
|
||||
host = host[0]
|
||||
if host["host"] == self.name:
|
||||
self.logger.debug(f"Host {self.name}: hostname in-sync.")
|
||||
else:
|
||||
self.logger.warning(
|
||||
f"Host {self.name}: hostname OUT of sync. "
|
||||
f"Received value: {host['host']}"
|
||||
)
|
||||
self.updateZabbixHost(host=self.name)
|
||||
|
||||
# Execute check depending on wether the name is special or not
|
||||
if self.use_visible_name:
|
||||
if host["name"] == self.visible_name:
|
||||
self.logger.debug(f"Host {self.name}: visible name in-sync.")
|
||||
else:
|
||||
self.logger.warning(
|
||||
f"Host {self.name}: visible name OUT of sync."
|
||||
f" Received value: {host['name']}"
|
||||
)
|
||||
self.updateZabbixHost(name=self.visible_name)
|
||||
|
||||
# Check if the templates are in-sync
|
||||
if not self.zbx_template_comparer(host["parentTemplates"]):
|
||||
self.logger.warning(f"Host {self.name}: template(s) OUT of sync.")
|
||||
# Prepare Templates for API parsing
|
||||
templateids = []
|
||||
for template in self.zbx_templates:
|
||||
templateids.append({"templateid": template["templateid"]})
|
||||
# Update Zabbix with NB templates and clear any old / lost templates
|
||||
self.updateZabbixHost(
|
||||
templates_clear=host["parentTemplates"], templates=templateids
|
||||
)
|
||||
else:
|
||||
self.logger.debug(f"Host {self.name}: template(s) in-sync.")
|
||||
|
||||
# Check if Zabbix version is 6 or higher. Issue #93
|
||||
group_dictname = "hostgroups"
|
||||
if str(self.zabbix.version).startswith(("6", "5")):
|
||||
group_dictname = "groups"
|
||||
# Check if hostgroups match
|
||||
if (sorted(host[group_dictname], key=itemgetter('groupid')) ==
|
||||
sorted(self.group_ids, key=itemgetter('groupid'))):
|
||||
self.logger.debug(f"Host {self.name}: hostgroups in-sync.")
|
||||
else:
|
||||
self.logger.warning(f"Host {self.name}: hostgroups OUT of sync.")
|
||||
self.updateZabbixHost(groups=self.group_ids)
|
||||
|
||||
if int(host["status"]) == self.zabbix_state:
|
||||
self.logger.debug(f"Host {self.name}: status in-sync.")
|
||||
else:
|
||||
self.logger.warning(f"Host {self.name}: status OUT of sync.")
|
||||
self.updateZabbixHost(status=str(self.zabbix_state))
|
||||
|
||||
# Check if a proxy has been defined
|
||||
if self.zbxproxy:
|
||||
# Check if proxy or proxy group is defined
|
||||
if (self.zbxproxy["idtype"] in host and
|
||||
host[self.zbxproxy["idtype"]] == self.zbxproxy["id"]):
|
||||
self.logger.debug(f"Host {self.name}: proxy in-sync.")
|
||||
# Backwards compatibility for Zabbix <= 6
|
||||
elif "proxy_hostid" in host and host["proxy_hostid"] == self.zbxproxy["id"]:
|
||||
self.logger.debug(f"Host {self.name}: proxy in-sync.")
|
||||
# Proxy does not match, update Zabbix
|
||||
else:
|
||||
self.logger.warning(f"Host {self.name}: proxy OUT of sync.")
|
||||
# Zabbix <= 6 patch
|
||||
if not str(self.zabbix.version).startswith("7"):
|
||||
self.updateZabbixHost(proxy_hostid=self.zbxproxy["id"])
|
||||
# Zabbix 7+
|
||||
else:
|
||||
# Prepare data structure for updating either proxy or group
|
||||
update_data = {
|
||||
self.zbxproxy["idtype"]: self.zbxproxy["id"],
|
||||
"monitored_by": self.zbxproxy["monitored_by"],
|
||||
}
|
||||
self.updateZabbixHost(**update_data)
|
||||
else:
|
||||
# No proxy is defined in NetBox
|
||||
proxy_set = False
|
||||
# Check if a proxy is defined. Uses the proxy_hostid key for backwards compatibility
|
||||
for key in ("proxy_hostid", "proxyid", "proxy_groupid"):
|
||||
if key in host:
|
||||
if bool(int(host[key])):
|
||||
proxy_set = True
|
||||
if proxy_power and proxy_set:
|
||||
# Zabbix <= 6 fix
|
||||
self.logger.warning(
|
||||
f"Host {self.name}: no proxy is configured in NetBox "
|
||||
"but is configured in Zabbix. Removing proxy config in Zabbix"
|
||||
)
|
||||
if "proxy_hostid" in host and bool(host["proxy_hostid"]):
|
||||
self.updateZabbixHost(proxy_hostid=0)
|
||||
# Zabbix 7 proxy
|
||||
elif "proxyid" in host and bool(host["proxyid"]):
|
||||
self.updateZabbixHost(proxyid=0, monitored_by=0)
|
||||
# Zabbix 7 proxy group
|
||||
elif "proxy_groupid" in host and bool(host["proxy_groupid"]):
|
||||
self.updateZabbixHost(proxy_groupid=0, monitored_by=0)
|
||||
# Checks if a proxy has been defined in Zabbix and if proxy_power config has been set
|
||||
if proxy_set and not proxy_power:
|
||||
# Display error message
|
||||
self.logger.error(
|
||||
f"Host {self.name} is configured "
|
||||
f"with proxy in Zabbix but not in NetBox. The"
|
||||
" -p flag was ommited: no "
|
||||
"changes have been made."
|
||||
)
|
||||
if not proxy_set:
|
||||
self.logger.debug(f"Host {self.name}: proxy in-sync.")
|
||||
# Check host inventory mode
|
||||
if str(host["inventory_mode"]) == str(self.inventory_mode):
|
||||
self.logger.debug(f"Host {self.name}: inventory_mode in-sync.")
|
||||
else:
|
||||
self.logger.warning(f"Host {self.name}: inventory_mode OUT of sync.")
|
||||
self.updateZabbixHost(inventory_mode=str(self.inventory_mode))
|
||||
if config["inventory_sync"] and self.inventory_mode in [0, 1]:
|
||||
# Check host inventory mapping
|
||||
if host["inventory"] == self.inventory:
|
||||
self.logger.debug(f"Host {self.name}: inventory in-sync.")
|
||||
else:
|
||||
self.logger.warning(f"Host {self.name}: inventory OUT of sync.")
|
||||
self.updateZabbixHost(inventory=self.inventory)
|
||||
|
||||
# Check host usermacros
|
||||
if config['usermacro_sync']:
|
||||
# Make a full copy synce we dont want to lose the original value
|
||||
# of secret type macros from Netbox
|
||||
netbox_macros = deepcopy(self.usermacros)
|
||||
# Set the sync bit
|
||||
full_sync_bit = bool(str(config['usermacro_sync']).lower() == "full")
|
||||
for macro in netbox_macros:
|
||||
# If the Macro is a secret and full sync is NOT activated
|
||||
if macro["type"] == str(1) and not full_sync_bit:
|
||||
# Remove the value as the Zabbix api does not return the value key
|
||||
# This is required when you want to do a diff between both lists
|
||||
macro.pop("value")
|
||||
# Sort all lists
|
||||
def filter_with_macros(macro):
|
||||
return macro["macro"]
|
||||
host["macros"].sort(key=filter_with_macros)
|
||||
netbox_macros.sort(key=filter_with_macros)
|
||||
# Check if both lists are the same
|
||||
if host["macros"] == netbox_macros:
|
||||
self.logger.debug(f"Host {self.name}: usermacros in-sync.")
|
||||
else:
|
||||
self.logger.warning(f"Host {self.name}: usermacros OUT of sync.")
|
||||
# Update Zabbix with NetBox usermacros
|
||||
self.updateZabbixHost(macros=self.usermacros)
|
||||
|
||||
# Check host tags
|
||||
if config['tag_sync']:
|
||||
if remove_duplicates(host["tags"], sortkey="tag") == self.tags:
|
||||
self.logger.debug(f"Host {self.name}: tags in-sync.")
|
||||
else:
|
||||
self.logger.warning(f"Host {self.name}: tags OUT of sync.")
|
||||
self.updateZabbixHost(tags=self.tags)
|
||||
|
||||
# If only 1 interface has been found
|
||||
# pylint: disable=too-many-nested-blocks
|
||||
if len(host["interfaces"]) == 1:
|
||||
updates = {}
|
||||
# Go through each key / item and check if it matches Zabbix
|
||||
for key, item in self.setInterfaceDetails()[0].items():
|
||||
# Check if NetBox value is found in Zabbix
|
||||
if key in host["interfaces"][0]:
|
||||
# If SNMP is used, go through nested dict
|
||||
# to compare SNMP parameters
|
||||
if isinstance(item, dict) and key == "details":
|
||||
for k, i in item.items():
|
||||
if k in host["interfaces"][0][key]:
|
||||
# Set update if values don't match
|
||||
if host["interfaces"][0][key][k] != str(i):
|
||||
# If dict has not been created, add it
|
||||
if key not in updates:
|
||||
updates[key] = {}
|
||||
updates[key][k] = str(i)
|
||||
# If SNMP version has been changed
|
||||
# break loop and force full SNMP update
|
||||
if k == "version":
|
||||
break
|
||||
# Force full SNMP config update
|
||||
# when version has changed.
|
||||
if key in updates:
|
||||
if "version" in updates[key]:
|
||||
for k, i in item.items():
|
||||
updates[key][k] = str(i)
|
||||
continue
|
||||
# Set update if values don't match
|
||||
if host["interfaces"][0][key] != str(item):
|
||||
updates[key] = item
|
||||
if updates:
|
||||
# If interface updates have been found: push to Zabbix
|
||||
self.logger.warning(f"Host {self.name}: Interface OUT of sync.")
|
||||
if "type" in updates:
|
||||
# Changing interface type not supported. Raise exception.
|
||||
e = (
|
||||
f"Host {self.name}: changing interface type to "
|
||||
f"{str(updates['type'])} is not supported."
|
||||
)
|
||||
self.logger.error(e)
|
||||
raise InterfaceConfigError(e)
|
||||
# Set interfaceID for Zabbix config
|
||||
updates["interfaceid"] = host["interfaces"][0]["interfaceid"]
|
||||
try:
|
||||
# API call to Zabbix
|
||||
self.zabbix.hostinterface.update(updates)
|
||||
e = (f"Host {self.name}: updated interface "
|
||||
f"with data {sanatize_log_output(updates)}.")
|
||||
self.logger.info(e)
|
||||
self.create_journal_entry("info", e)
|
||||
except APIRequestError as e:
|
||||
msg = f"Zabbix returned the following error: {str(e)}."
|
||||
self.logger.error(msg)
|
||||
raise SyncExternalError(msg) from e
|
||||
else:
|
||||
# If no updates are found, Zabbix interface is in-sync
|
||||
e = f"Host {self.name}: interface in-sync."
|
||||
self.logger.debug(e)
|
||||
else:
|
||||
e = (
|
||||
f"Host {self.name} has unsupported interface configuration."
|
||||
f" Host has total of {len(host['interfaces'])} interfaces. "
|
||||
"Manual intervention required."
|
||||
)
|
||||
self.logger.error(e)
|
||||
raise SyncInventoryError(e)
|
||||
|
||||
def create_journal_entry(self, severity, message):
|
||||
"""
|
||||
Send a new Journal entry to NetBox. Usefull for viewing actions
|
||||
in NetBox without having to look in Zabbix or the script log output
|
||||
"""
|
||||
if self.journal:
|
||||
# Check if the severity is valid
|
||||
if severity not in ["info", "success", "warning", "danger"]:
|
||||
self.logger.warning(
|
||||
f"Value {severity} not valid for NB journal entries."
|
||||
)
|
||||
return False
|
||||
journal = {
|
||||
"assigned_object_type": "dcim.device",
|
||||
"assigned_object_id": self.id,
|
||||
"kind": severity,
|
||||
"comments": message,
|
||||
}
|
||||
try:
|
||||
self.nb_journals.create(journal)
|
||||
self.logger.debug(f"Host {self.name}: Created journal entry in NetBox")
|
||||
return True
|
||||
except NetboxRequestError as e:
|
||||
self.logger.warning(
|
||||
"Unable to create journal entry for "
|
||||
f"{self.name}: NB returned {e}"
|
||||
)
|
||||
return False
|
||||
return False
|
||||
|
||||
def zbx_template_comparer(self, tmpls_from_zabbix):
|
||||
"""
|
||||
Compares the NetBox and Zabbix templates with each other.
|
||||
Should there be a mismatch then the function will return false
|
||||
|
||||
INPUT: list of NB and ZBX templates
|
||||
OUTPUT: Boolean True/False
|
||||
"""
|
||||
succesfull_templates = []
|
||||
# Go through each NetBox template
|
||||
for nb_tmpl in self.zbx_templates:
|
||||
# Go through each Zabbix template
|
||||
for pos, zbx_tmpl in enumerate(tmpls_from_zabbix):
|
||||
# Check if template IDs match
|
||||
if nb_tmpl["templateid"] == zbx_tmpl["templateid"]:
|
||||
# Templates match. Remove this template from the Zabbix templates
|
||||
# and add this NB template to the list of successfull templates
|
||||
tmpls_from_zabbix.pop(pos)
|
||||
succesfull_templates.append(nb_tmpl)
|
||||
self.logger.debug(
|
||||
f"Host {self.name}: template "
|
||||
f"{nb_tmpl['name']} is present in Zabbix."
|
||||
)
|
||||
break
|
||||
if (
|
||||
len(succesfull_templates) == len(self.zbx_templates)
|
||||
and len(tmpls_from_zabbix) == 0
|
||||
):
|
||||
# All of the NetBox templates have been confirmed as successfull
|
||||
# and the ZBX template list is empty. This means that
|
||||
# all of the templates match.
|
||||
return True
|
||||
return False
|
48
modules/exceptions.py
Normal file
48
modules/exceptions.py
Normal file
@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
All custom exceptions used for Exception generation
|
||||
"""
|
||||
|
||||
|
||||
class SyncError(Exception):
|
||||
"""Class SyncError"""
|
||||
|
||||
|
||||
class JournalError(Exception):
|
||||
"""Class SyncError"""
|
||||
|
||||
|
||||
class SyncExternalError(SyncError):
|
||||
"""Class SyncExternalError"""
|
||||
|
||||
|
||||
class SyncInventoryError(SyncError):
|
||||
"""Class SyncInventoryError"""
|
||||
|
||||
|
||||
class SyncDuplicateError(SyncError):
|
||||
"""Class SyncDuplicateError"""
|
||||
|
||||
|
||||
class EnvironmentVarError(SyncError):
|
||||
"""Class EnvironmentVarError"""
|
||||
|
||||
|
||||
class InterfaceConfigError(SyncError):
|
||||
"""Class InterfaceConfigError"""
|
||||
|
||||
|
||||
class ProxyConfigError(SyncError):
|
||||
"""Class ProxyConfigError"""
|
||||
|
||||
|
||||
class HostgroupError(SyncError):
|
||||
"""Class HostgroupError"""
|
||||
|
||||
|
||||
class TemplateError(SyncError):
|
||||
"""Class TemplateError"""
|
||||
|
||||
|
||||
class UsermacroError(SyncError):
|
||||
"""Class UsermacroError"""
|
196
modules/hostgroups.py
Normal file
196
modules/hostgroups.py
Normal file
@ -0,0 +1,196 @@
|
||||
"""Module for all hostgroup related code"""
|
||||
|
||||
from logging import getLogger
|
||||
|
||||
from modules.exceptions import HostgroupError
|
||||
from modules.tools import build_path
|
||||
|
||||
|
||||
class Hostgroup:
|
||||
"""Hostgroup class for devices and VM's
|
||||
Takes type (vm or dev) and NB object"""
|
||||
|
||||
# pylint: disable=too-many-arguments, disable=too-many-positional-arguments
|
||||
def __init__(
|
||||
self,
|
||||
obj_type,
|
||||
nb_obj,
|
||||
version,
|
||||
logger=None,
|
||||
nested_sitegroup_flag=False,
|
||||
nested_region_flag=False,
|
||||
nb_regions=None,
|
||||
nb_groups=None,
|
||||
):
|
||||
self.logger = logger if logger else getLogger(__name__)
|
||||
if obj_type not in ("vm", "dev"):
|
||||
msg = f"Unable to create hostgroup with type {type}"
|
||||
self.logger.error()
|
||||
raise HostgroupError(msg)
|
||||
self.type = str(obj_type)
|
||||
self.nb = nb_obj
|
||||
self.name = self.nb.name
|
||||
self.nb_version = version
|
||||
# Used for nested data objects
|
||||
self.set_nesting(
|
||||
nested_sitegroup_flag, nested_region_flag, nb_groups, nb_regions
|
||||
)
|
||||
self._set_format_options()
|
||||
|
||||
def __str__(self):
|
||||
return f"Hostgroup for {self.type} {self.name}"
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
def _set_format_options(self):
|
||||
"""
|
||||
Set all available variables
|
||||
for hostgroup generation
|
||||
"""
|
||||
format_options = {}
|
||||
# Set variables for both type of devices
|
||||
if self.type in ("vm", "dev"):
|
||||
# Role fix for NetBox <=3
|
||||
role = None
|
||||
if self.nb_version.startswith(("2", "3")) and self.type == "dev":
|
||||
role = self.nb.device_role.name if self.nb.device_role else None
|
||||
else:
|
||||
role = self.nb.role.name if self.nb.role else None
|
||||
# Add default formatting options
|
||||
# Check if a site is configured. A site is optional for VMs
|
||||
format_options["region"] = None
|
||||
format_options["site_group"] = None
|
||||
if self.nb.site:
|
||||
if self.nb.site.region:
|
||||
format_options["region"] = self.generate_parents(
|
||||
"region", str(self.nb.site.region)
|
||||
)
|
||||
if self.nb.site.group:
|
||||
format_options["site_group"] = self.generate_parents(
|
||||
"site_group", str(self.nb.site.group)
|
||||
)
|
||||
format_options["role"] = role
|
||||
format_options["site"] = self.nb.site.name if self.nb.site else None
|
||||
format_options["tenant"] = str(self.nb.tenant) if self.nb.tenant else None
|
||||
format_options["tenant_group"] = (
|
||||
str(self.nb.tenant.group) if self.nb.tenant else None
|
||||
)
|
||||
format_options["platform"] = (
|
||||
self.nb.platform.name if self.nb.platform else None
|
||||
)
|
||||
# Variables only applicable for devices
|
||||
if self.type == "dev":
|
||||
format_options["manufacturer"] = self.nb.device_type.manufacturer.name
|
||||
format_options["location"] = (
|
||||
str(self.nb.location) if self.nb.location else None
|
||||
)
|
||||
format_options["rack"] = self.nb.rack.name if self.nb.rack else None
|
||||
# Variables only applicable for VM's
|
||||
if self.type == "vm":
|
||||
# Check if a cluster is configured. Could also be configured in a site.
|
||||
if self.nb.cluster:
|
||||
format_options["cluster"] = self.nb.cluster.name
|
||||
format_options["cluster_type"] = self.nb.cluster.type.name
|
||||
self.format_options = format_options
|
||||
|
||||
def set_nesting(
|
||||
self, nested_sitegroup_flag, nested_region_flag, nb_groups, nb_regions
|
||||
):
|
||||
"""Set nesting options for this Hostgroup"""
|
||||
self.nested_objects = {
|
||||
"site_group": {"flag": nested_sitegroup_flag, "data": nb_groups},
|
||||
"region": {"flag": nested_region_flag, "data": nb_regions},
|
||||
}
|
||||
|
||||
def generate(self, hg_format=None):
|
||||
"""Generate hostgroup based on a provided format"""
|
||||
# Set format to default in case its not specified
|
||||
if not hg_format:
|
||||
hg_format = (
|
||||
"site/manufacturer/role" if self.type == "dev" else "cluster/role"
|
||||
)
|
||||
# Split all given names
|
||||
hg_output = []
|
||||
hg_items = hg_format.split("/")
|
||||
for hg_item in hg_items:
|
||||
# Check if requested data is available as option for this host
|
||||
if hg_item not in self.format_options:
|
||||
# Check if a custom field exists with this name
|
||||
cf_data = self.custom_field_lookup(hg_item)
|
||||
# CF does not exist
|
||||
if not cf_data["result"]:
|
||||
msg = (
|
||||
f"Unable to generate hostgroup for host {self.name}. "
|
||||
f"Item type {hg_item} not supported."
|
||||
)
|
||||
self.logger.error(msg)
|
||||
raise HostgroupError(msg)
|
||||
# CF data is populated
|
||||
if cf_data["cf"]:
|
||||
hg_output.append(cf_data["cf"])
|
||||
continue
|
||||
# Check if there is a value associated to the variable.
|
||||
# For instance, if a device has no location, do not use it with hostgroup calculation
|
||||
hostgroup_value = self.format_options[hg_item]
|
||||
if hostgroup_value:
|
||||
hg_output.append(hostgroup_value)
|
||||
# Check if the hostgroup is populated with at least one item.
|
||||
if bool(hg_output):
|
||||
return "/".join(hg_output)
|
||||
msg = (
|
||||
f"Unable to generate hostgroup for host {self.name}."
|
||||
" Not enough valid items. This is most likely"
|
||||
" due to the use of custom fields that are empty"
|
||||
" or an invalid hostgroup format."
|
||||
)
|
||||
self.logger.error(msg)
|
||||
raise HostgroupError(msg)
|
||||
|
||||
def list_formatoptions(self):
|
||||
"""
|
||||
Function to easily troubleshoot which values
|
||||
are generated for a specific device or VM.
|
||||
"""
|
||||
print(f"The following options are available for host {self.name}")
|
||||
for option_type, value in self.format_options.items():
|
||||
if value is not None:
|
||||
print(f"{option_type} - {value}")
|
||||
print("The following options are not available")
|
||||
for option_type, value in self.format_options.items():
|
||||
if value is None:
|
||||
print(f"{option_type}")
|
||||
|
||||
def custom_field_lookup(self, hg_category):
|
||||
"""
|
||||
Checks if a valid custom field is present in NetBox.
|
||||
INPUT: Custom field name
|
||||
OUTPUT: dictionary with 'result' and 'cf' keys.
|
||||
"""
|
||||
# Check if the custom field exists
|
||||
if hg_category not in self.nb.custom_fields:
|
||||
return {"result": False, "cf": None}
|
||||
# Checks if the custom field has been populated
|
||||
if not bool(self.nb.custom_fields[hg_category]):
|
||||
return {"result": True, "cf": None}
|
||||
# Custom field exists and is populated
|
||||
return {"result": True, "cf": self.nb.custom_fields[hg_category]}
|
||||
|
||||
def generate_parents(self, nest_type, child_object):
|
||||
"""
|
||||
Generates parent objects to implement nested regions / nested site groups
|
||||
INPUT: nest_type to set which type of nesting is going to be processed
|
||||
child_object: the name of the child object (for instance the last NB region)
|
||||
OUTPUT: STRING - Either the single child name or child and parents.
|
||||
"""
|
||||
# Check if this type of nesting is supported.
|
||||
if not nest_type in self.nested_objects:
|
||||
return child_object
|
||||
# If the nested flag is True, perform parent calculation
|
||||
if self.nested_objects[nest_type]["flag"]:
|
||||
final_nested_object = build_path(
|
||||
child_object, self.nested_objects[nest_type]["data"]
|
||||
)
|
||||
return "/".join(final_nested_object)
|
||||
# Nesting is not allowed for this object. Return child_object
|
||||
return child_object
|
109
modules/interface.py
Normal file
109
modules/interface.py
Normal file
@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
All of the Zabbix interface related configuration
|
||||
"""
|
||||
from modules.exceptions import InterfaceConfigError
|
||||
|
||||
|
||||
class ZabbixInterface:
|
||||
"""Class that represents a Zabbix interface."""
|
||||
|
||||
def __init__(self, context, ip):
|
||||
self.context = context
|
||||
self.ip = ip
|
||||
self.skelet = {"main": "1", "useip": "1", "dns": "", "ip": self.ip}
|
||||
self.interface = self.skelet
|
||||
|
||||
def _set_default_port(self):
|
||||
"""Sets default TCP / UDP port for different interface types"""
|
||||
interface_mapping = {1: 10050, 2: 161, 3: 623, 4: 12345}
|
||||
# Check if interface type is listed in mapper.
|
||||
if self.interface["type"] not in interface_mapping:
|
||||
return False
|
||||
# Set default port to interface
|
||||
self.interface["port"] = str(interface_mapping[self.interface["type"]])
|
||||
return True
|
||||
|
||||
def get_context(self):
|
||||
"""check if NetBox custom context has been defined."""
|
||||
if "zabbix" in self.context:
|
||||
zabbix = self.context["zabbix"]
|
||||
if "interface_type" in zabbix:
|
||||
self.interface["type"] = zabbix["interface_type"]
|
||||
if not "interface_port" in zabbix:
|
||||
self._set_default_port()
|
||||
return True
|
||||
self.interface["port"] = zabbix["interface_port"]
|
||||
return True
|
||||
return False
|
||||
return False
|
||||
|
||||
def set_snmp(self):
|
||||
"""Check if interface is type SNMP"""
|
||||
# pylint: disable=too-many-branches
|
||||
if self.interface["type"] == 2:
|
||||
# Checks if SNMP settings are defined in NetBox
|
||||
if "snmp" in self.context["zabbix"]:
|
||||
snmp = self.context["zabbix"]["snmp"]
|
||||
self.interface["details"] = {}
|
||||
# Checks if bulk config has been defined
|
||||
if "bulk" in snmp:
|
||||
self.interface["details"]["bulk"] = str(snmp.pop("bulk"))
|
||||
else:
|
||||
# Fallback to bulk enabled if not specified
|
||||
self.interface["details"]["bulk"] = "1"
|
||||
# SNMP Version config is required in NetBox config context
|
||||
if snmp.get("version"):
|
||||
self.interface["details"]["version"] = str(snmp.pop("version"))
|
||||
else:
|
||||
e = "SNMP version option is not defined."
|
||||
raise InterfaceConfigError(e)
|
||||
# If version 1 or 2 is used, get community string
|
||||
if self.interface["details"]["version"] in ["1", "2"]:
|
||||
if "community" in snmp:
|
||||
# Set SNMP community to confix context value
|
||||
community = snmp["community"]
|
||||
else:
|
||||
# Set SNMP community to default
|
||||
community = "{$SNMP_COMMUNITY}"
|
||||
self.interface["details"]["community"] = str(community)
|
||||
# If version 3 has been used, get all
|
||||
# SNMPv3 NetBox related configs
|
||||
elif self.interface["details"]["version"] == "3":
|
||||
items = [
|
||||
"securityname",
|
||||
"securitylevel",
|
||||
"authpassphrase",
|
||||
"privpassphrase",
|
||||
"authprotocol",
|
||||
"privprotocol",
|
||||
"contextname",
|
||||
]
|
||||
for key, item in snmp.items():
|
||||
if key in items:
|
||||
self.interface["details"][key] = str(item)
|
||||
else:
|
||||
e = "Unsupported SNMP version."
|
||||
raise InterfaceConfigError(e)
|
||||
else:
|
||||
e = "Interface type SNMP but no parameters provided."
|
||||
raise InterfaceConfigError(e)
|
||||
else:
|
||||
e = "Interface type is not SNMP, unable to set SNMP details"
|
||||
raise InterfaceConfigError(e)
|
||||
|
||||
def set_default_snmp(self):
|
||||
"""Set default config to SNMPv2, port 161 and community macro."""
|
||||
self.interface = self.skelet
|
||||
self.interface["type"] = "2"
|
||||
self.interface["port"] = "161"
|
||||
self.interface["details"] = {
|
||||
"version": "2",
|
||||
"community": "{$SNMP_COMMUNITY}",
|
||||
"bulk": "1",
|
||||
}
|
||||
|
||||
def set_default_agent(self):
|
||||
"""Sets interface to Zabbix agent defaults"""
|
||||
self.interface["type"] = "1"
|
||||
self.interface["port"] = "10050"
|
41
modules/logging.py
Normal file
41
modules/logging.py
Normal file
@ -0,0 +1,41 @@
|
||||
"""
|
||||
Logging module for Netbox-Zabbix-sync
|
||||
"""
|
||||
|
||||
import logging
|
||||
from os import path
|
||||
|
||||
logger = logging.getLogger("NetBox-Zabbix-sync")
|
||||
|
||||
|
||||
def get_logger():
|
||||
"""
|
||||
Return the logger for Netbox Zabbix Sync
|
||||
"""
|
||||
return logger
|
||||
|
||||
|
||||
def setup_logger():
|
||||
"""
|
||||
Prepare a logger with stream and file handlers
|
||||
"""
|
||||
# Set logging
|
||||
lgout = logging.StreamHandler()
|
||||
# Logfile in the project root
|
||||
project_root = path.dirname(path.dirname(path.realpath(__file__)))
|
||||
logfile_path = path.join(project_root, "sync.log")
|
||||
lgfile = logging.FileHandler(logfile_path)
|
||||
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
||||
level=logging.WARNING,
|
||||
handlers=[lgout, lgfile],
|
||||
)
|
||||
|
||||
|
||||
def set_log_levels(root_level, own_level):
|
||||
"""
|
||||
Configure log levels for root and Netbox-Zabbix-sync logger
|
||||
"""
|
||||
logging.getLogger().setLevel(root_level)
|
||||
logger.setLevel(own_level)
|
133
modules/tags.py
Normal file
133
modules/tags.py
Normal file
@ -0,0 +1,133 @@
|
||||
#!/usr/bin/env python3
|
||||
# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments, logging-fstring-interpolation
|
||||
"""
|
||||
All of the Zabbix Usermacro related configuration
|
||||
"""
|
||||
from logging import getLogger
|
||||
|
||||
from modules.tools import field_mapper, remove_duplicates
|
||||
|
||||
|
||||
class ZabbixTags:
|
||||
"""Class that represents a Zabbix interface."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
nb,
|
||||
tag_map,
|
||||
tag_sync,
|
||||
tag_lower=True,
|
||||
tag_name=None,
|
||||
tag_value=None,
|
||||
logger=None,
|
||||
host=None,
|
||||
):
|
||||
self.nb = nb
|
||||
self.name = host if host else nb.name
|
||||
self.tag_map = tag_map
|
||||
self.logger = logger if logger else getLogger(__name__)
|
||||
self.tags = {}
|
||||
self.lower = tag_lower
|
||||
self.tag_name = tag_name
|
||||
self.tag_value = tag_value
|
||||
self.tag_sync = tag_sync
|
||||
self.sync = False
|
||||
self._set_config()
|
||||
|
||||
def __repr__(self):
|
||||
return self.name
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def _set_config(self):
|
||||
"""
|
||||
Setup class
|
||||
"""
|
||||
if self.tag_sync:
|
||||
self.sync = True
|
||||
|
||||
return True
|
||||
|
||||
def validate_tag(self, tag_name):
|
||||
"""
|
||||
Validates tag name
|
||||
"""
|
||||
if tag_name and isinstance(tag_name, str) and len(tag_name) <= 256:
|
||||
return True
|
||||
return False
|
||||
|
||||
def validate_value(self, tag_value):
|
||||
"""
|
||||
Validates tag value
|
||||
"""
|
||||
if tag_value and isinstance(tag_value, str) and len(tag_value) <= 256:
|
||||
return True
|
||||
return False
|
||||
|
||||
def render_tag(self, tag_name, tag_value):
|
||||
"""
|
||||
Renders a tag
|
||||
"""
|
||||
tag = {}
|
||||
if self.validate_tag(tag_name):
|
||||
if self.lower:
|
||||
tag["tag"] = tag_name.lower()
|
||||
else:
|
||||
tag["tag"] = tag_name
|
||||
else:
|
||||
self.logger.warning(f"Tag {tag_name} is not a valid tag name, skipping.")
|
||||
return False
|
||||
|
||||
if self.validate_value(tag_value):
|
||||
if self.lower:
|
||||
tag["value"] = tag_value.lower()
|
||||
else:
|
||||
tag["value"] = tag_value
|
||||
else:
|
||||
self.logger.warning(
|
||||
f"Tag {tag_name} has an invalid value: '{tag_value}', skipping."
|
||||
)
|
||||
return False
|
||||
return tag
|
||||
|
||||
def generate(self):
|
||||
"""
|
||||
Generate full set of Usermacros
|
||||
"""
|
||||
# pylint: disable=too-many-branches
|
||||
tags = []
|
||||
# Parse the field mapper for tags
|
||||
if self.tag_map:
|
||||
self.logger.debug(f"Host {self.nb.name}: Starting tag mapper")
|
||||
field_tags = field_mapper(self.nb.name, self.tag_map, self.nb, self.logger)
|
||||
for tag, value in field_tags.items():
|
||||
t = self.render_tag(tag, value)
|
||||
if t:
|
||||
tags.append(t)
|
||||
|
||||
# Parse NetBox config context for tags
|
||||
if (
|
||||
"zabbix" in self.nb.config_context
|
||||
and "tags" in self.nb.config_context["zabbix"]
|
||||
and isinstance(self.nb.config_context["zabbix"]["tags"], list)
|
||||
):
|
||||
for tag in self.nb.config_context["zabbix"]["tags"]:
|
||||
if isinstance(tag, dict):
|
||||
for tagname, value in tag.items():
|
||||
t = self.render_tag(tagname, value)
|
||||
if t:
|
||||
tags.append(t)
|
||||
|
||||
# Pull in NetBox device tags if tag_name is set
|
||||
if self.tag_name and isinstance(self.tag_name, str):
|
||||
for tag in self.nb.tags:
|
||||
if self.tag_value.lower() in ["display", "name", "slug"]:
|
||||
value = tag[self.tag_value]
|
||||
else:
|
||||
value = tag["name"]
|
||||
t = self.render_tag(self.tag_name, value)
|
||||
if t:
|
||||
tags.append(t)
|
||||
|
||||
return remove_duplicates(tags, sortkey="tag")
|
192
modules/tools.py
Normal file
192
modules/tools.py
Normal file
@ -0,0 +1,192 @@
|
||||
"""A collection of tools used by several classes"""
|
||||
from modules.exceptions import HostgroupError
|
||||
|
||||
def convert_recordset(recordset):
|
||||
"""Converts netbox RedcordSet to list of dicts."""
|
||||
recordlist = []
|
||||
for record in recordset:
|
||||
recordlist.append(record.__dict__)
|
||||
return recordlist
|
||||
|
||||
|
||||
def build_path(endpoint, list_of_dicts):
|
||||
"""
|
||||
Builds a path list of related parent/child items.
|
||||
This can be used to generate a joinable list to
|
||||
be used in hostgroups.
|
||||
"""
|
||||
item_path = []
|
||||
itemlist = [i for i in list_of_dicts if i["name"] == endpoint]
|
||||
item = itemlist[0] if len(itemlist) == 1 else None
|
||||
item_path.append(item["name"])
|
||||
while item["_depth"] > 0:
|
||||
itemlist = [i for i in list_of_dicts if i["name"] == str(item["parent"])]
|
||||
item = itemlist[0] if len(itemlist) == 1 else None
|
||||
item_path.append(item["name"])
|
||||
item_path.reverse()
|
||||
return item_path
|
||||
|
||||
|
||||
def proxy_prepper(proxy_list, proxy_group_list):
|
||||
"""
|
||||
Function that takes 2 lists and converts them using a
|
||||
standardized format for further processing.
|
||||
"""
|
||||
output = []
|
||||
for proxy in proxy_list:
|
||||
proxy["type"] = "proxy"
|
||||
proxy["id"] = proxy["proxyid"]
|
||||
proxy["idtype"] = "proxyid"
|
||||
proxy["monitored_by"] = 1
|
||||
output.append(proxy)
|
||||
for group in proxy_group_list:
|
||||
group["type"] = "proxy_group"
|
||||
group["id"] = group["proxy_groupid"]
|
||||
group["idtype"] = "proxy_groupid"
|
||||
group["monitored_by"] = 2
|
||||
output.append(group)
|
||||
return output
|
||||
|
||||
|
||||
def field_mapper(host, mapper, nbdevice, logger):
|
||||
"""
|
||||
Maps NetBox field data to Zabbix properties.
|
||||
Used for Inventory, Usermacros and Tag mappings.
|
||||
"""
|
||||
data = {}
|
||||
# Let's build an dict for each property in the map
|
||||
for nb_field, zbx_field in mapper.items():
|
||||
field_list = nb_field.split("/") # convert str to list based on delimiter
|
||||
# start at the base of the dict...
|
||||
value = nbdevice
|
||||
# ... and step through the dict till we find the needed value
|
||||
for item in field_list:
|
||||
value = value[item] if value else None
|
||||
# Check if the result is usable and expected
|
||||
# We want to apply any int or float 0 values,
|
||||
# even if python thinks those are empty.
|
||||
if (value and isinstance(value, int | float | str)) or (
|
||||
isinstance(value, int | float) and int(value) == 0
|
||||
):
|
||||
data[zbx_field] = str(value)
|
||||
elif not value:
|
||||
# empty value should just be an empty string for API compatibility
|
||||
logger.debug(
|
||||
f"Host {host}: NetBox lookup for "
|
||||
f"'{nb_field}' returned an empty value"
|
||||
)
|
||||
data[zbx_field] = ""
|
||||
else:
|
||||
# Value is not a string or numeral, probably not what the user expected.
|
||||
logger.error(
|
||||
f"Host {host}: Lookup for '{nb_field}'"
|
||||
" returned an unexpected type: it will be skipped."
|
||||
)
|
||||
logger.debug(
|
||||
f"Host {host}: Field mapping complete. "
|
||||
f"Mapped {len(list(filter(None, data.values())))} field(s)"
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
def remove_duplicates(input_list, sortkey=None):
|
||||
"""
|
||||
Removes duplicate entries from a list and sorts the list
|
||||
"""
|
||||
output_list = []
|
||||
if isinstance(input_list, list):
|
||||
output_list = [dict(t) for t in {tuple(d.items()) for d in input_list}]
|
||||
if sortkey and isinstance(sortkey, str):
|
||||
output_list.sort(key=lambda x: x[sortkey])
|
||||
return output_list
|
||||
|
||||
|
||||
def verify_hg_format(hg_format, device_cfs=None, vm_cfs=None, hg_type="dev", logger=None):
|
||||
"""
|
||||
Verifies hostgroup field format
|
||||
"""
|
||||
if not device_cfs:
|
||||
device_cfs = []
|
||||
if not vm_cfs:
|
||||
vm_cfs = []
|
||||
allowed_objects = {"dev": ["location",
|
||||
"rack",
|
||||
"role",
|
||||
"manufacturer",
|
||||
"region",
|
||||
"site",
|
||||
"site_group",
|
||||
"tenant",
|
||||
"tenant_group",
|
||||
"platform",
|
||||
"cluster"]
|
||||
,"vm": ["cluster_type",
|
||||
"role",
|
||||
"manufacturer",
|
||||
"region",
|
||||
"site",
|
||||
"site_group",
|
||||
"tenant",
|
||||
"tenant_group",
|
||||
"cluster",
|
||||
"device",
|
||||
"platform"]
|
||||
,"cfs": {"dev": [], "vm": []}
|
||||
}
|
||||
for cf in device_cfs:
|
||||
allowed_objects['cfs']['dev'].append(cf.name)
|
||||
for cf in vm_cfs:
|
||||
allowed_objects['cfs']['vm'].append(cf.name)
|
||||
hg_objects = []
|
||||
if isinstance(hg_format,list):
|
||||
for f in hg_format:
|
||||
hg_objects = hg_objects + f.split("/")
|
||||
else:
|
||||
hg_objects = hg_format.split("/")
|
||||
hg_objects = sorted(set(hg_objects))
|
||||
for hg_object in hg_objects:
|
||||
if (hg_object not in allowed_objects[hg_type] and
|
||||
hg_object not in allowed_objects['cfs'][hg_type]):
|
||||
e = (
|
||||
f"Hostgroup item {hg_object} is not valid. Make sure you"
|
||||
" use valid items and separate them with '/'."
|
||||
)
|
||||
logger.error(e)
|
||||
raise HostgroupError(e)
|
||||
|
||||
|
||||
def sanatize_log_output(data):
|
||||
"""
|
||||
Used for the update function to Zabbix which
|
||||
shows the data that its using to update the host.
|
||||
Removes and sensitive data from the input.
|
||||
"""
|
||||
if not isinstance(data, dict):
|
||||
return data
|
||||
sanitized_data = data.copy()
|
||||
# Check if there are any sensitive macros defined in the data
|
||||
if "macros" in data:
|
||||
for macro in sanitized_data["macros"]:
|
||||
# Check if macro is secret type
|
||||
if not macro["type"] == str(1):
|
||||
continue
|
||||
macro["value"] = "********"
|
||||
# Check for interface data
|
||||
if "interfaceid" in data:
|
||||
# Interface ID is a value which is most likely not helpful
|
||||
# in logging output or for troubleshooting.
|
||||
del sanitized_data["interfaceid"]
|
||||
# InterfaceID also hints that this is a interface update.
|
||||
# A check is required if there are no macro's used for SNMP security parameters.
|
||||
if not "details" in data:
|
||||
return sanitized_data
|
||||
for key, detail in sanitized_data["details"].items():
|
||||
# If the detail is a secret, we don't want to log it.
|
||||
if key in ("authpassphrase", "privpassphrase", "securityname", "community"):
|
||||
# Check if a macro is used.
|
||||
# If so then logging the output is not a security issue.
|
||||
if detail.startswith("{$") and detail.endswith("}"):
|
||||
continue
|
||||
# A macro is not used, so we sanitize the value.
|
||||
sanitized_data["details"][key] = "********"
|
||||
return sanitized_data
|
122
modules/usermacros.py
Normal file
122
modules/usermacros.py
Normal file
@ -0,0 +1,122 @@
|
||||
#!/usr/bin/env python3
|
||||
# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments, logging-fstring-interpolation
|
||||
"""
|
||||
All of the Zabbix Usermacro related configuration
|
||||
"""
|
||||
from logging import getLogger
|
||||
from re import match
|
||||
|
||||
from modules.tools import field_mapper
|
||||
|
||||
|
||||
class ZabbixUsermacros:
|
||||
"""Class that represents Zabbix usermacros."""
|
||||
|
||||
def __init__(self, nb, usermacro_map, usermacro_sync, logger=None, host=None):
|
||||
self.nb = nb
|
||||
self.name = host if host else nb.name
|
||||
self.usermacro_map = usermacro_map
|
||||
self.logger = logger if logger else getLogger(__name__)
|
||||
self.usermacros = {}
|
||||
self.usermacro_sync = usermacro_sync
|
||||
self.sync = False
|
||||
self.force_sync = False
|
||||
self._set_config()
|
||||
|
||||
def __repr__(self):
|
||||
return self.name
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def _set_config(self):
|
||||
"""
|
||||
Setup class
|
||||
"""
|
||||
if str(self.usermacro_sync).lower() == "full":
|
||||
self.sync = True
|
||||
self.force_sync = True
|
||||
elif self.usermacro_sync:
|
||||
self.sync = True
|
||||
return True
|
||||
|
||||
def validate_macro(self, macro_name):
|
||||
"""
|
||||
Validates usermacro name
|
||||
"""
|
||||
pattern = r"\{\$[A-Z0-9\._]*(\:.*)?\}"
|
||||
return match(pattern, macro_name)
|
||||
|
||||
def render_macro(self, macro_name, macro_properties):
|
||||
"""
|
||||
Renders a full usermacro from partial input
|
||||
"""
|
||||
macro = {}
|
||||
macrotypes = {"text": 0, "secret": 1, "vault": 2}
|
||||
if self.validate_macro(macro_name):
|
||||
macro["macro"] = str(macro_name)
|
||||
if isinstance(macro_properties, dict):
|
||||
if not "value" in macro_properties:
|
||||
self.logger.warning(f"Host {self.name}: Usermacro {macro_name} has "
|
||||
"no value in Netbox, skipping.")
|
||||
return False
|
||||
macro["value"] = macro_properties["value"]
|
||||
|
||||
if (
|
||||
"type" in macro_properties
|
||||
and macro_properties["type"].lower() in macrotypes
|
||||
):
|
||||
macro["type"] = str(macrotypes[macro_properties["type"]])
|
||||
else:
|
||||
macro["type"] = str(0)
|
||||
|
||||
if "description" in macro_properties and isinstance(
|
||||
macro_properties["description"], str
|
||||
):
|
||||
macro["description"] = macro_properties["description"]
|
||||
else:
|
||||
macro["description"] = ""
|
||||
|
||||
elif isinstance(macro_properties, str) and macro_properties:
|
||||
macro["value"] = macro_properties
|
||||
macro["type"] = str(0)
|
||||
macro["description"] = ""
|
||||
|
||||
else:
|
||||
self.logger.warning(f"Host {self.name}: Usermacro {macro_name} "
|
||||
"has no value, skipping.")
|
||||
return False
|
||||
else:
|
||||
self.logger.error(
|
||||
f"Host {self.name}: Usermacro {macro_name} is not a valid usermacro name, skipping."
|
||||
)
|
||||
return False
|
||||
return macro
|
||||
|
||||
def generate(self):
|
||||
"""
|
||||
Generate full set of Usermacros
|
||||
"""
|
||||
macros = []
|
||||
# Parse the field mapper for usermacros
|
||||
if self.usermacro_map:
|
||||
self.logger.debug(f"Host {self.nb.name}: Starting usermacro mapper")
|
||||
field_macros = field_mapper(
|
||||
self.nb.name, self.usermacro_map, self.nb, self.logger
|
||||
)
|
||||
for macro, value in field_macros.items():
|
||||
m = self.render_macro(macro, value)
|
||||
if m:
|
||||
macros.append(m)
|
||||
# Parse NetBox config context for usermacros
|
||||
if (
|
||||
"zabbix" in self.nb.config_context
|
||||
and "usermacros" in self.nb.config_context["zabbix"]
|
||||
):
|
||||
for macro, properties in self.nb.config_context["zabbix"][
|
||||
"usermacros"
|
||||
].items():
|
||||
m = self.render_macro(macro, properties)
|
||||
if m:
|
||||
macros.append(m)
|
||||
return macros
|
81
modules/virtual_machine.py
Normal file
81
modules/virtual_machine.py
Normal file
@ -0,0 +1,81 @@
|
||||
# pylint: disable=duplicate-code
|
||||
"""Module that hosts all functions for virtual machine processing"""
|
||||
from modules.device import PhysicalDevice
|
||||
from modules.exceptions import InterfaceConfigError, SyncInventoryError, TemplateError
|
||||
from modules.hostgroups import Hostgroup
|
||||
from modules.interface import ZabbixInterface
|
||||
from modules.config import load_config
|
||||
# Load config
|
||||
config = load_config()
|
||||
|
||||
|
||||
class VirtualMachine(PhysicalDevice):
|
||||
"""Model for virtual machines"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.hostgroup = None
|
||||
self.zbx_template_names = None
|
||||
|
||||
def _inventory_map(self):
|
||||
"""use VM inventory maps"""
|
||||
return config["vm_inventory_map"]
|
||||
|
||||
def _usermacro_map(self):
|
||||
"""use VM usermacro maps"""
|
||||
return config["vm_usermacro_map"]
|
||||
|
||||
def _tag_map(self):
|
||||
"""use VM tag maps"""
|
||||
return config["vm_tag_map"]
|
||||
|
||||
def set_hostgroup(self, hg_format, nb_site_groups, nb_regions):
|
||||
"""Set the hostgroup for this device"""
|
||||
# Create new Hostgroup instance
|
||||
hg = Hostgroup(
|
||||
"vm",
|
||||
self.nb,
|
||||
self.nb_api_version,
|
||||
logger=self.logger,
|
||||
nested_sitegroup_flag=config["traverse_site_groups"],
|
||||
nested_region_flag=config["traverse_regions"],
|
||||
nb_groups=nb_site_groups,
|
||||
nb_regions=nb_regions,
|
||||
)
|
||||
# Generate hostgroup based on hostgroup format
|
||||
if isinstance(hg_format, list):
|
||||
self.hostgroups = [hg.generate(f) for f in hg_format]
|
||||
else:
|
||||
self.hostgroups.append(hg.generate(hg_format))
|
||||
|
||||
def set_vm_template(self):
|
||||
"""Set Template for VMs. Overwrites default class
|
||||
to skip a lookup of custom fields."""
|
||||
# Gather templates ONLY from the device specific context
|
||||
try:
|
||||
self.zbx_template_names = self.get_templates_context()
|
||||
except TemplateError as e:
|
||||
self.logger.warning(e)
|
||||
return True
|
||||
|
||||
def setInterfaceDetails(self): # pylint: disable=invalid-name
|
||||
"""
|
||||
Overwrites device function to select an agent interface type by default
|
||||
Agent type interfaces are more likely to be used with VMs then SNMP
|
||||
"""
|
||||
try:
|
||||
# Initiate interface class
|
||||
interface = ZabbixInterface(self.nb.config_context, self.ip)
|
||||
# Check if NetBox has device context.
|
||||
# If not fall back to old config.
|
||||
if interface.get_context():
|
||||
# If device is SNMP type, add aditional information.
|
||||
if interface.interface["type"] == 2:
|
||||
interface.set_snmp()
|
||||
else:
|
||||
interface.set_default_agent()
|
||||
return [interface.interface]
|
||||
except InterfaceConfigError as e:
|
||||
message = f"{self.name}: {e}"
|
||||
self.logger.warning(message)
|
||||
raise SyncInventoryError(message) from e
|
File diff suppressed because it is too large
Load Diff
@ -1,2 +1,2 @@
|
||||
pynetbox
|
||||
pyzabbix
|
||||
pynetbox==7.4.1
|
||||
zabbix-utils==2.0.2
|
||||
|
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
139
tests/test_configuration_parsing.py
Normal file
139
tests/test_configuration_parsing.py
Normal file
@ -0,0 +1,139 @@
|
||||
"""Tests for configuration parsing in the modules.config module."""
|
||||
from unittest.mock import patch, MagicMock
|
||||
import os
|
||||
from modules.config import load_config, DEFAULT_CONFIG, load_config_file, load_env_variable
|
||||
|
||||
|
||||
def test_load_config_defaults():
|
||||
"""Test that load_config returns default values when no config file or env vars are present"""
|
||||
with patch('modules.config.load_config_file', return_value=DEFAULT_CONFIG.copy()), \
|
||||
patch('modules.config.load_env_variable', return_value=None):
|
||||
config = load_config()
|
||||
assert config == DEFAULT_CONFIG
|
||||
assert config["templates_config_context"] is False
|
||||
assert config["create_hostgroups"] is True
|
||||
|
||||
|
||||
def test_load_config_file():
|
||||
"""Test that load_config properly loads values from config file"""
|
||||
mock_config = DEFAULT_CONFIG.copy()
|
||||
mock_config["templates_config_context"] = True
|
||||
mock_config["sync_vms"] = True
|
||||
|
||||
with patch('modules.config.load_config_file', return_value=mock_config), \
|
||||
patch('modules.config.load_env_variable', return_value=None):
|
||||
config = load_config()
|
||||
assert config["templates_config_context"] is True
|
||||
assert config["sync_vms"] is True
|
||||
# Unchanged values should remain as defaults
|
||||
assert config["create_journal"] is False
|
||||
|
||||
|
||||
def test_load_env_variables():
|
||||
"""Test that load_config properly loads values from environment variables"""
|
||||
# Mock env variable loading to return values for specific keys
|
||||
def mock_load_env(key):
|
||||
if key == "sync_vms":
|
||||
return True
|
||||
if key == "create_journal":
|
||||
return True
|
||||
return None
|
||||
|
||||
with patch('modules.config.load_config_file', return_value=DEFAULT_CONFIG.copy()), \
|
||||
patch('modules.config.load_env_variable', side_effect=mock_load_env):
|
||||
config = load_config()
|
||||
assert config["sync_vms"] is True
|
||||
assert config["create_journal"] is True
|
||||
# Unchanged values should remain as defaults
|
||||
assert config["templates_config_context"] is False
|
||||
|
||||
|
||||
def test_env_vars_override_config_file():
|
||||
"""Test that environment variables override values from config file"""
|
||||
mock_config = DEFAULT_CONFIG.copy()
|
||||
mock_config["templates_config_context"] = True
|
||||
mock_config["sync_vms"] = False
|
||||
|
||||
# Mock env variable that will override the config file value
|
||||
def mock_load_env(key):
|
||||
if key == "sync_vms":
|
||||
return True
|
||||
return None
|
||||
|
||||
with patch('modules.config.load_config_file', return_value=mock_config), \
|
||||
patch('modules.config.load_env_variable', side_effect=mock_load_env):
|
||||
config = load_config()
|
||||
# This should be overridden by the env var
|
||||
assert config["sync_vms"] is True
|
||||
# This should remain from the config file
|
||||
assert config["templates_config_context"] is True
|
||||
|
||||
|
||||
def test_load_config_file_function():
|
||||
"""Test the load_config_file function directly"""
|
||||
# Test when the file exists
|
||||
with patch('pathlib.Path.exists', return_value=True), \
|
||||
patch('importlib.util.spec_from_file_location') as mock_spec:
|
||||
# Setup the mock module with attributes
|
||||
mock_module = MagicMock()
|
||||
mock_module.templates_config_context = True
|
||||
mock_module.sync_vms = True
|
||||
|
||||
# Setup the mock spec
|
||||
mock_spec_instance = MagicMock()
|
||||
mock_spec.return_value = mock_spec_instance
|
||||
mock_spec_instance.loader.exec_module = lambda x: None
|
||||
|
||||
# Patch module_from_spec to return our mock module
|
||||
with patch('importlib.util.module_from_spec', return_value=mock_module):
|
||||
config = load_config_file(DEFAULT_CONFIG.copy())
|
||||
assert config["templates_config_context"] is True
|
||||
assert config["sync_vms"] is True
|
||||
|
||||
|
||||
def test_load_config_file_not_found():
|
||||
"""Test load_config_file when the config file doesn't exist"""
|
||||
with patch('pathlib.Path.exists', return_value=False):
|
||||
result = load_config_file(DEFAULT_CONFIG.copy())
|
||||
# Should return a dict equal to DEFAULT_CONFIG, not a new object
|
||||
assert result == DEFAULT_CONFIG
|
||||
|
||||
|
||||
def test_load_env_variable_function():
|
||||
"""Test the load_env_variable function directly"""
|
||||
# Create a real environment variable for testing with correct prefix and uppercase
|
||||
test_var = "NBZX_TEMPLATES_CONFIG_CONTEXT"
|
||||
original_env = os.environ.get(test_var, None)
|
||||
try:
|
||||
# Set the environment variable with the proper prefix and case
|
||||
os.environ[test_var] = "True"
|
||||
|
||||
# Test that it's properly read (using lowercase in the function call)
|
||||
value = load_env_variable("templates_config_context")
|
||||
assert value == "True"
|
||||
|
||||
# Test when the environment variable doesn't exist
|
||||
value = load_env_variable("nonexistent_variable")
|
||||
assert value is None
|
||||
finally:
|
||||
# Clean up - restore original environment
|
||||
if original_env is not None:
|
||||
os.environ[test_var] = original_env
|
||||
else:
|
||||
os.environ.pop(test_var, None)
|
||||
|
||||
|
||||
def test_load_config_file_exception_handling():
|
||||
"""Test that load_config_file handles exceptions gracefully"""
|
||||
# This test requires modifying the load_config_file function to handle exceptions
|
||||
# For now, we're just checking that an exception is raised
|
||||
with patch('pathlib.Path.exists', return_value=True), \
|
||||
patch('importlib.util.spec_from_file_location', side_effect=Exception("Import error")):
|
||||
# Since the current implementation doesn't handle exceptions, we should
|
||||
# expect an exception to be raised
|
||||
try:
|
||||
load_config_file(DEFAULT_CONFIG.copy())
|
||||
assert False, "An exception should have been raised"
|
||||
except Exception: # pylint: disable=broad-except
|
||||
# This is expected
|
||||
pass
|
166
tests/test_device_deletion.py
Normal file
166
tests/test_device_deletion.py
Normal file
@ -0,0 +1,166 @@
|
||||
"""Tests for device deletion functionality in the PhysicalDevice class."""
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from zabbix_utils import APIRequestError
|
||||
from modules.device import PhysicalDevice
|
||||
from modules.exceptions import SyncExternalError
|
||||
|
||||
|
||||
class TestDeviceDeletion(unittest.TestCase):
|
||||
"""Test class for device deletion functionality."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures."""
|
||||
# Create mock NetBox device
|
||||
self.mock_nb_device = MagicMock()
|
||||
self.mock_nb_device.id = 123
|
||||
self.mock_nb_device.name = "test-device"
|
||||
self.mock_nb_device.status.label = "Decommissioning"
|
||||
self.mock_nb_device.custom_fields = {"zabbix_hostid": "456"}
|
||||
self.mock_nb_device.config_context = {}
|
||||
|
||||
# Set up a primary IP
|
||||
primary_ip = MagicMock()
|
||||
primary_ip.address = "192.168.1.1/24"
|
||||
self.mock_nb_device.primary_ip = primary_ip
|
||||
|
||||
# Create mock Zabbix API
|
||||
self.mock_zabbix = MagicMock()
|
||||
self.mock_zabbix.version = "6.0"
|
||||
|
||||
# Set up mock host.get response
|
||||
self.mock_zabbix.host.get.return_value = [{"hostid": "456"}]
|
||||
|
||||
# Mock NetBox journal class
|
||||
self.mock_nb_journal = MagicMock()
|
||||
|
||||
# Create logger mock
|
||||
self.mock_logger = MagicMock()
|
||||
|
||||
# Create PhysicalDevice instance with mocks
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
self.device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
journal=True,
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
def test_cleanup_successful_deletion(self):
|
||||
"""Test successful device deletion from Zabbix."""
|
||||
# Setup
|
||||
self.mock_zabbix.host.get.return_value = [{"hostid": "456"}]
|
||||
self.mock_zabbix.host.delete.return_value = {"hostids": ["456"]}
|
||||
|
||||
# Execute
|
||||
self.device.cleanup()
|
||||
|
||||
# Verify
|
||||
self.mock_zabbix.host.get.assert_called_once_with(filter={'hostid': '456'}, output=[])
|
||||
self.mock_zabbix.host.delete.assert_called_once_with('456')
|
||||
self.mock_nb_device.save.assert_called_once()
|
||||
self.assertIsNone(self.mock_nb_device.custom_fields["zabbix_hostid"])
|
||||
self.mock_logger.info.assert_called_with(f"Host {self.device.name}: "
|
||||
"Deleted host from Zabbix.")
|
||||
|
||||
def test_cleanup_device_already_deleted(self):
|
||||
"""Test cleanup when device is already deleted from Zabbix."""
|
||||
# Setup
|
||||
self.mock_zabbix.host.get.return_value = [] # Empty list means host not found
|
||||
|
||||
# Execute
|
||||
self.device.cleanup()
|
||||
|
||||
# Verify
|
||||
self.mock_zabbix.host.get.assert_called_once_with(filter={'hostid': '456'}, output=[])
|
||||
self.mock_zabbix.host.delete.assert_not_called()
|
||||
self.mock_nb_device.save.assert_called_once()
|
||||
self.assertIsNone(self.mock_nb_device.custom_fields["zabbix_hostid"])
|
||||
self.mock_logger.info.assert_called_with(
|
||||
f"Host {self.device.name}: was already deleted from Zabbix. Removed link in NetBox.")
|
||||
|
||||
def test_cleanup_api_error(self):
|
||||
"""Test cleanup when Zabbix API returns an error."""
|
||||
# Setup
|
||||
self.mock_zabbix.host.get.return_value = [{"hostid": "456"}]
|
||||
self.mock_zabbix.host.delete.side_effect = APIRequestError("API Error")
|
||||
|
||||
# Execute and verify
|
||||
with self.assertRaises(SyncExternalError):
|
||||
self.device.cleanup()
|
||||
|
||||
# Verify correct calls were made
|
||||
self.mock_zabbix.host.get.assert_called_once_with(filter={'hostid': '456'}, output=[])
|
||||
self.mock_zabbix.host.delete.assert_called_once_with('456')
|
||||
self.mock_nb_device.save.assert_not_called()
|
||||
self.mock_logger.error.assert_called()
|
||||
|
||||
def test_zeroize_cf(self):
|
||||
"""Test _zeroize_cf method that clears the custom field."""
|
||||
# Execute
|
||||
self.device._zeroize_cf() # pylint: disable=protected-access
|
||||
|
||||
# Verify
|
||||
self.assertIsNone(self.mock_nb_device.custom_fields["zabbix_hostid"])
|
||||
self.mock_nb_device.save.assert_called_once()
|
||||
|
||||
def test_create_journal_entry(self):
|
||||
"""Test create_journal_entry method."""
|
||||
# Setup
|
||||
test_message = "Test journal entry"
|
||||
|
||||
# Execute
|
||||
result = self.device.create_journal_entry("info", test_message)
|
||||
|
||||
# Verify
|
||||
self.assertTrue(result)
|
||||
self.mock_nb_journal.create.assert_called_once()
|
||||
journal_entry = self.mock_nb_journal.create.call_args[0][0]
|
||||
self.assertEqual(journal_entry["assigned_object_type"], "dcim.device")
|
||||
self.assertEqual(journal_entry["assigned_object_id"], 123)
|
||||
self.assertEqual(journal_entry["kind"], "info")
|
||||
self.assertEqual(journal_entry["comments"], test_message)
|
||||
|
||||
def test_create_journal_entry_invalid_severity(self):
|
||||
"""Test create_journal_entry with invalid severity."""
|
||||
# Execute
|
||||
result = self.device.create_journal_entry("invalid", "Test message")
|
||||
|
||||
# Verify
|
||||
self.assertFalse(result)
|
||||
self.mock_nb_journal.create.assert_not_called()
|
||||
self.mock_logger.warning.assert_called()
|
||||
|
||||
def test_create_journal_entry_when_disabled(self):
|
||||
"""Test create_journal_entry when journaling is disabled."""
|
||||
# Setup - create device with journal=False
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
journal=False, # Disable journaling
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Execute
|
||||
result = device.create_journal_entry("info", "Test message")
|
||||
|
||||
# Verify
|
||||
self.assertFalse(result)
|
||||
self.mock_nb_journal.create.assert_not_called()
|
||||
|
||||
def test_cleanup_updates_journal(self):
|
||||
"""Test that cleanup method creates a journal entry."""
|
||||
# Setup
|
||||
self.mock_zabbix.host.get.return_value = [{"hostid": "456"}]
|
||||
|
||||
# Execute
|
||||
with patch.object(self.device, 'create_journal_entry') as mock_journal_entry:
|
||||
self.device.cleanup()
|
||||
|
||||
# Verify
|
||||
mock_journal_entry.assert_called_once_with("warning", "Deleted host from Zabbix")
|
340
tests/test_hostgroups.py
Normal file
340
tests/test_hostgroups.py
Normal file
@ -0,0 +1,340 @@
|
||||
"""Tests for the Hostgroup class in the hostgroups module."""
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch, call
|
||||
from modules.hostgroups import Hostgroup
|
||||
from modules.exceptions import HostgroupError
|
||||
|
||||
|
||||
class TestHostgroups(unittest.TestCase):
|
||||
"""Test class for Hostgroup functionality."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures."""
|
||||
# Create mock logger
|
||||
self.mock_logger = MagicMock()
|
||||
|
||||
# *** Mock NetBox Device setup ***
|
||||
# Create mock device with all properties
|
||||
self.mock_device = MagicMock()
|
||||
self.mock_device.name = "test-device"
|
||||
|
||||
# Set up site information
|
||||
site = MagicMock()
|
||||
site.name = "TestSite"
|
||||
|
||||
# Set up region information
|
||||
region = MagicMock()
|
||||
region.name = "TestRegion"
|
||||
# Ensure region string representation returns the name
|
||||
region.__str__.return_value = "TestRegion"
|
||||
site.region = region
|
||||
|
||||
# Set up site group information
|
||||
site_group = MagicMock()
|
||||
site_group.name = "TestSiteGroup"
|
||||
# Ensure site group string representation returns the name
|
||||
site_group.__str__.return_value = "TestSiteGroup"
|
||||
site.group = site_group
|
||||
|
||||
self.mock_device.site = site
|
||||
|
||||
# Set up role information (varies based on NetBox version)
|
||||
self.mock_device_role = MagicMock()
|
||||
self.mock_device_role.name = "TestRole"
|
||||
# Ensure string representation returns the name
|
||||
self.mock_device_role.__str__.return_value = "TestRole"
|
||||
self.mock_device.device_role = self.mock_device_role
|
||||
self.mock_device.role = self.mock_device_role
|
||||
|
||||
# Set up tenant information
|
||||
tenant = MagicMock()
|
||||
tenant.name = "TestTenant"
|
||||
# Ensure tenant string representation returns the name
|
||||
tenant.__str__.return_value = "TestTenant"
|
||||
tenant_group = MagicMock()
|
||||
tenant_group.name = "TestTenantGroup"
|
||||
# Ensure tenant group string representation returns the name
|
||||
tenant_group.__str__.return_value = "TestTenantGroup"
|
||||
tenant.group = tenant_group
|
||||
self.mock_device.tenant = tenant
|
||||
|
||||
# Set up platform information
|
||||
platform = MagicMock()
|
||||
platform.name = "TestPlatform"
|
||||
self.mock_device.platform = platform
|
||||
|
||||
# Device-specific properties
|
||||
device_type = MagicMock()
|
||||
manufacturer = MagicMock()
|
||||
manufacturer.name = "TestManufacturer"
|
||||
device_type.manufacturer = manufacturer
|
||||
self.mock_device.device_type = device_type
|
||||
|
||||
location = MagicMock()
|
||||
location.name = "TestLocation"
|
||||
# Ensure location string representation returns the name
|
||||
location.__str__.return_value = "TestLocation"
|
||||
self.mock_device.location = location
|
||||
|
||||
# Custom fields
|
||||
self.mock_device.custom_fields = {"test_cf": "TestCF"}
|
||||
|
||||
# *** Mock NetBox VM setup ***
|
||||
# Create mock VM with all properties
|
||||
self.mock_vm = MagicMock()
|
||||
self.mock_vm.name = "test-vm"
|
||||
|
||||
# Reuse site from device
|
||||
self.mock_vm.site = site
|
||||
|
||||
# Set up role for VM
|
||||
self.mock_vm.role = self.mock_device_role
|
||||
|
||||
# Set up tenant for VM (same as device)
|
||||
self.mock_vm.tenant = tenant
|
||||
|
||||
# Set up platform for VM (same as device)
|
||||
self.mock_vm.platform = platform
|
||||
|
||||
# VM-specific properties
|
||||
cluster = MagicMock()
|
||||
cluster.name = "TestCluster"
|
||||
cluster_type = MagicMock()
|
||||
cluster_type.name = "TestClusterType"
|
||||
cluster.type = cluster_type
|
||||
self.mock_vm.cluster = cluster
|
||||
|
||||
# Custom fields
|
||||
self.mock_vm.custom_fields = {"test_cf": "TestCF"}
|
||||
|
||||
# Mock data for nesting tests
|
||||
self.mock_regions_data = [
|
||||
{"name": "ParentRegion", "parent": None, "_depth": 0},
|
||||
{"name": "TestRegion", "parent": "ParentRegion", "_depth": 1}
|
||||
]
|
||||
|
||||
self.mock_groups_data = [
|
||||
{"name": "ParentSiteGroup", "parent": None, "_depth": 0},
|
||||
{"name": "TestSiteGroup", "parent": "ParentSiteGroup", "_depth": 1}
|
||||
]
|
||||
|
||||
def test_device_hostgroup_creation(self):
|
||||
"""Test basic device hostgroup creation."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Test the string representation
|
||||
self.assertEqual(str(hostgroup), "Hostgroup for dev test-device")
|
||||
|
||||
# Check format options were set correctly
|
||||
self.assertEqual(hostgroup.format_options["site"], "TestSite")
|
||||
self.assertEqual(hostgroup.format_options["region"], "TestRegion")
|
||||
self.assertEqual(hostgroup.format_options["site_group"], "TestSiteGroup")
|
||||
self.assertEqual(hostgroup.format_options["role"], "TestRole")
|
||||
self.assertEqual(hostgroup.format_options["tenant"], "TestTenant")
|
||||
self.assertEqual(hostgroup.format_options["tenant_group"], "TestTenantGroup")
|
||||
self.assertEqual(hostgroup.format_options["platform"], "TestPlatform")
|
||||
self.assertEqual(hostgroup.format_options["manufacturer"], "TestManufacturer")
|
||||
self.assertEqual(hostgroup.format_options["location"], "TestLocation")
|
||||
|
||||
def test_vm_hostgroup_creation(self):
|
||||
"""Test basic VM hostgroup creation."""
|
||||
hostgroup = Hostgroup("vm", self.mock_vm, "4.0", self.mock_logger)
|
||||
|
||||
# Test the string representation
|
||||
self.assertEqual(str(hostgroup), "Hostgroup for vm test-vm")
|
||||
|
||||
# Check format options were set correctly
|
||||
self.assertEqual(hostgroup.format_options["site"], "TestSite")
|
||||
self.assertEqual(hostgroup.format_options["region"], "TestRegion")
|
||||
self.assertEqual(hostgroup.format_options["site_group"], "TestSiteGroup")
|
||||
self.assertEqual(hostgroup.format_options["role"], "TestRole")
|
||||
self.assertEqual(hostgroup.format_options["tenant"], "TestTenant")
|
||||
self.assertEqual(hostgroup.format_options["tenant_group"], "TestTenantGroup")
|
||||
self.assertEqual(hostgroup.format_options["platform"], "TestPlatform")
|
||||
self.assertEqual(hostgroup.format_options["cluster"], "TestCluster")
|
||||
self.assertEqual(hostgroup.format_options["cluster_type"], "TestClusterType")
|
||||
|
||||
def test_invalid_object_type(self):
|
||||
"""Test that an invalid object type raises an exception."""
|
||||
with self.assertRaises(HostgroupError):
|
||||
Hostgroup("invalid", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
def test_device_hostgroup_formats(self):
|
||||
"""Test different hostgroup formats for devices."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Default format: site/manufacturer/role
|
||||
default_result = hostgroup.generate()
|
||||
self.assertEqual(default_result, "TestSite/TestManufacturer/TestRole")
|
||||
|
||||
# Custom format: site/region
|
||||
custom_result = hostgroup.generate("site/region")
|
||||
self.assertEqual(custom_result, "TestSite/TestRegion")
|
||||
|
||||
# Custom format: site/tenant/platform/location
|
||||
complex_result = hostgroup.generate("site/tenant/platform/location")
|
||||
self.assertEqual(complex_result, "TestSite/TestTenant/TestPlatform/TestLocation")
|
||||
|
||||
def test_vm_hostgroup_formats(self):
|
||||
"""Test different hostgroup formats for VMs."""
|
||||
hostgroup = Hostgroup("vm", self.mock_vm, "4.0", self.mock_logger)
|
||||
|
||||
# Default format: cluster/role
|
||||
default_result = hostgroup.generate()
|
||||
self.assertEqual(default_result, "TestCluster/TestRole")
|
||||
|
||||
# Custom format: site/tenant
|
||||
custom_result = hostgroup.generate("site/tenant")
|
||||
self.assertEqual(custom_result, "TestSite/TestTenant")
|
||||
|
||||
# Custom format: cluster/cluster_type/platform
|
||||
complex_result = hostgroup.generate("cluster/cluster_type/platform")
|
||||
self.assertEqual(complex_result, "TestCluster/TestClusterType/TestPlatform")
|
||||
|
||||
def test_device_netbox_version_differences(self):
|
||||
"""Test hostgroup generation with different NetBox versions."""
|
||||
# NetBox v2.x
|
||||
hostgroup_v2 = Hostgroup("dev", self.mock_device, "2.11", self.mock_logger)
|
||||
self.assertEqual(hostgroup_v2.format_options["role"], "TestRole")
|
||||
|
||||
# NetBox v3.x
|
||||
hostgroup_v3 = Hostgroup("dev", self.mock_device, "3.5", self.mock_logger)
|
||||
self.assertEqual(hostgroup_v3.format_options["role"], "TestRole")
|
||||
|
||||
# NetBox v4.x (already tested in other methods)
|
||||
|
||||
def test_custom_field_lookup(self):
|
||||
"""Test custom field lookup functionality."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Test custom field exists and is populated
|
||||
cf_result = hostgroup.custom_field_lookup("test_cf")
|
||||
self.assertTrue(cf_result["result"])
|
||||
self.assertEqual(cf_result["cf"], "TestCF")
|
||||
|
||||
# Test custom field doesn't exist
|
||||
cf_result = hostgroup.custom_field_lookup("nonexistent_cf")
|
||||
self.assertFalse(cf_result["result"])
|
||||
self.assertIsNone(cf_result["cf"])
|
||||
|
||||
def test_hostgroup_with_custom_field(self):
|
||||
"""Test hostgroup generation including a custom field."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Generate with custom field included
|
||||
result = hostgroup.generate("site/test_cf/role")
|
||||
self.assertEqual(result, "TestSite/TestCF/TestRole")
|
||||
|
||||
def test_missing_hostgroup_format_item(self):
|
||||
"""Test handling of missing hostgroup format items."""
|
||||
# Create a device with minimal attributes
|
||||
minimal_device = MagicMock()
|
||||
minimal_device.name = "minimal-device"
|
||||
minimal_device.site = None
|
||||
minimal_device.tenant = None
|
||||
minimal_device.platform = None
|
||||
minimal_device.custom_fields = {}
|
||||
|
||||
# Create role
|
||||
role = MagicMock()
|
||||
role.name = "MinimalRole"
|
||||
minimal_device.role = role
|
||||
|
||||
# Create device_type with manufacturer
|
||||
device_type = MagicMock()
|
||||
manufacturer = MagicMock()
|
||||
manufacturer.name = "MinimalManufacturer"
|
||||
device_type.manufacturer = manufacturer
|
||||
minimal_device.device_type = device_type
|
||||
|
||||
# Create hostgroup
|
||||
hostgroup = Hostgroup("dev", minimal_device, "4.0", self.mock_logger)
|
||||
|
||||
# Generate with default format
|
||||
result = hostgroup.generate()
|
||||
# Site is missing, so only manufacturer and role should be included
|
||||
self.assertEqual(result, "MinimalManufacturer/MinimalRole")
|
||||
|
||||
# Test with invalid format
|
||||
with self.assertRaises(HostgroupError):
|
||||
hostgroup.generate("site/nonexistent/role")
|
||||
|
||||
def test_hostgroup_missing_required_attributes(self):
|
||||
"""Test handling when no valid hostgroup can be generated."""
|
||||
# Create a VM with minimal attributes that won't satisfy any format
|
||||
minimal_vm = MagicMock()
|
||||
minimal_vm.name = "minimal-vm"
|
||||
minimal_vm.site = None
|
||||
minimal_vm.tenant = None
|
||||
minimal_vm.platform = None
|
||||
minimal_vm.role = None
|
||||
minimal_vm.cluster = None
|
||||
minimal_vm.custom_fields = {}
|
||||
|
||||
hostgroup = Hostgroup("vm", minimal_vm, "4.0", self.mock_logger)
|
||||
|
||||
# With default format of cluster/role, both are None, so should raise an error
|
||||
with self.assertRaises(HostgroupError):
|
||||
hostgroup.generate()
|
||||
|
||||
def test_nested_region_hostgroups(self):
|
||||
"""Test hostgroup generation with nested regions."""
|
||||
# Mock the build_path function to return a predictable result
|
||||
with patch('modules.hostgroups.build_path') as mock_build_path:
|
||||
# Configure the mock to return a list of regions in the path
|
||||
mock_build_path.return_value = ["ParentRegion", "TestRegion"]
|
||||
|
||||
# Create hostgroup with nested regions enabled
|
||||
hostgroup = Hostgroup(
|
||||
"dev",
|
||||
self.mock_device,
|
||||
"4.0",
|
||||
self.mock_logger,
|
||||
nested_region_flag=True,
|
||||
nb_regions=self.mock_regions_data
|
||||
)
|
||||
|
||||
# Generate hostgroup with region
|
||||
result = hostgroup.generate("site/region/role")
|
||||
# Should include the parent region
|
||||
self.assertEqual(result, "TestSite/ParentRegion/TestRegion/TestRole")
|
||||
|
||||
def test_nested_sitegroup_hostgroups(self):
|
||||
"""Test hostgroup generation with nested site groups."""
|
||||
# Mock the build_path function to return a predictable result
|
||||
with patch('modules.hostgroups.build_path') as mock_build_path:
|
||||
# Configure the mock to return a list of site groups in the path
|
||||
mock_build_path.return_value = ["ParentSiteGroup", "TestSiteGroup"]
|
||||
|
||||
# Create hostgroup with nested site groups enabled
|
||||
hostgroup = Hostgroup(
|
||||
"dev",
|
||||
self.mock_device,
|
||||
"4.0",
|
||||
self.mock_logger,
|
||||
nested_sitegroup_flag=True,
|
||||
nb_groups=self.mock_groups_data
|
||||
)
|
||||
|
||||
# Generate hostgroup with site_group
|
||||
result = hostgroup.generate("site/site_group/role")
|
||||
# Should include the parent site group
|
||||
self.assertEqual(result, "TestSite/ParentSiteGroup/TestSiteGroup/TestRole")
|
||||
|
||||
|
||||
def test_list_formatoptions(self):
|
||||
"""Test the list_formatoptions method for debugging."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Patch sys.stdout to capture print output
|
||||
with patch('sys.stdout') as mock_stdout:
|
||||
hostgroup.list_formatoptions()
|
||||
|
||||
# Check that print was called with expected output
|
||||
calls = [call.write(f"The following options are available for host test-device"),
|
||||
call.write('\n')]
|
||||
mock_stdout.assert_has_calls(calls, any_order=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
247
tests/test_interface.py
Normal file
247
tests/test_interface.py
Normal file
@ -0,0 +1,247 @@
|
||||
"""Tests for the ZabbixInterface class in the interface module."""
|
||||
import unittest
|
||||
from modules.interface import ZabbixInterface
|
||||
from modules.exceptions import InterfaceConfigError
|
||||
|
||||
|
||||
class TestZabbixInterface(unittest.TestCase):
|
||||
"""Test class for ZabbixInterface functionality."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures."""
|
||||
self.test_ip = "192.168.1.1"
|
||||
self.empty_context = {}
|
||||
self.default_interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
|
||||
# Create some test contexts for different scenarios
|
||||
self.snmpv2_context = {
|
||||
"zabbix": {
|
||||
"interface_type": 2,
|
||||
"interface_port": "161",
|
||||
"snmp": {
|
||||
"version": 2,
|
||||
"community": "public",
|
||||
"bulk": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.snmpv3_context = {
|
||||
"zabbix": {
|
||||
"interface_type": 2,
|
||||
"snmp": {
|
||||
"version": 3,
|
||||
"securityname": "snmpuser",
|
||||
"securitylevel": "authPriv",
|
||||
"authprotocol": "SHA",
|
||||
"authpassphrase": "authpass123",
|
||||
"privprotocol": "AES",
|
||||
"privpassphrase": "privpass123",
|
||||
"contextname": "context1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.agent_context = {
|
||||
"zabbix": {
|
||||
"interface_type": 1,
|
||||
"interface_port": "10050"
|
||||
}
|
||||
}
|
||||
|
||||
def test_init(self):
|
||||
"""Test initialization of ZabbixInterface."""
|
||||
interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
|
||||
# Check basic properties
|
||||
self.assertEqual(interface.ip, self.test_ip)
|
||||
self.assertEqual(interface.context, self.empty_context)
|
||||
self.assertEqual(interface.interface["ip"], self.test_ip)
|
||||
self.assertEqual(interface.interface["main"], "1")
|
||||
self.assertEqual(interface.interface["useip"], "1")
|
||||
self.assertEqual(interface.interface["dns"], "")
|
||||
|
||||
def test_get_context_empty(self):
|
||||
"""Test get_context with empty context."""
|
||||
interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
result = interface.get_context()
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_get_context_with_interface_type(self):
|
||||
"""Test get_context with interface_type but no port."""
|
||||
context = {"zabbix": {"interface_type": 2}}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
|
||||
# Should set type and default port
|
||||
result = interface.get_context()
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(interface.interface["type"], 2)
|
||||
self.assertEqual(interface.interface["port"], "161") # Default port for SNMP
|
||||
|
||||
def test_get_context_with_interface_type_and_port(self):
|
||||
"""Test get_context with both interface_type and port."""
|
||||
context = {"zabbix": {"interface_type": 1, "interface_port": "12345"}}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
|
||||
# Should set type and specified port
|
||||
result = interface.get_context()
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(interface.interface["type"], 1)
|
||||
self.assertEqual(interface.interface["port"], "12345")
|
||||
|
||||
def test_set_default_port(self):
|
||||
"""Test _set_default_port for different interface types."""
|
||||
interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
|
||||
# Test for agent type (1)
|
||||
interface.interface["type"] = 1
|
||||
interface._set_default_port() # pylint: disable=protected-access
|
||||
self.assertEqual(interface.interface["port"], "10050")
|
||||
|
||||
# Test for SNMP type (2)
|
||||
interface.interface["type"] = 2
|
||||
interface._set_default_port() # pylint: disable=protected-access
|
||||
self.assertEqual(interface.interface["port"], "161")
|
||||
|
||||
# Test for IPMI type (3)
|
||||
interface.interface["type"] = 3
|
||||
interface._set_default_port() # pylint: disable=protected-access
|
||||
self.assertEqual(interface.interface["port"], "623")
|
||||
|
||||
# Test for JMX type (4)
|
||||
interface.interface["type"] = 4
|
||||
interface._set_default_port() # pylint: disable=protected-access
|
||||
self.assertEqual(interface.interface["port"], "12345")
|
||||
|
||||
# Test for unsupported type
|
||||
interface.interface["type"] = 99
|
||||
result = interface._set_default_port() # pylint: disable=protected-access
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_set_snmp_v2(self):
|
||||
"""Test set_snmp with SNMPv2 configuration."""
|
||||
interface = ZabbixInterface(self.snmpv2_context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp
|
||||
interface.set_snmp()
|
||||
|
||||
# Check SNMP details
|
||||
self.assertEqual(interface.interface["details"]["version"], "2")
|
||||
self.assertEqual(interface.interface["details"]["community"], "public")
|
||||
self.assertEqual(interface.interface["details"]["bulk"], "1")
|
||||
|
||||
def test_set_snmp_v3(self):
|
||||
"""Test set_snmp with SNMPv3 configuration."""
|
||||
interface = ZabbixInterface(self.snmpv3_context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp
|
||||
interface.set_snmp()
|
||||
|
||||
# Check SNMP details
|
||||
self.assertEqual(interface.interface["details"]["version"], "3")
|
||||
self.assertEqual(interface.interface["details"]["securityname"], "snmpuser")
|
||||
self.assertEqual(interface.interface["details"]["securitylevel"], "authPriv")
|
||||
self.assertEqual(interface.interface["details"]["authprotocol"], "SHA")
|
||||
self.assertEqual(interface.interface["details"]["authpassphrase"], "authpass123")
|
||||
self.assertEqual(interface.interface["details"]["privprotocol"], "AES")
|
||||
self.assertEqual(interface.interface["details"]["privpassphrase"], "privpass123")
|
||||
self.assertEqual(interface.interface["details"]["contextname"], "context1")
|
||||
|
||||
def test_set_snmp_no_snmp_config(self):
|
||||
"""Test set_snmp with missing SNMP configuration."""
|
||||
# Create context with interface type but no SNMP config
|
||||
context = {"zabbix": {"interface_type": 2}}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp - should raise exception
|
||||
with self.assertRaises(InterfaceConfigError):
|
||||
interface.set_snmp()
|
||||
|
||||
def test_set_snmp_unsupported_version(self):
|
||||
"""Test set_snmp with unsupported SNMP version."""
|
||||
# Create context with invalid SNMP version
|
||||
context = {
|
||||
"zabbix": {
|
||||
"interface_type": 2,
|
||||
"snmp": {
|
||||
"version": 4 # Invalid version
|
||||
}
|
||||
}
|
||||
}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp - should raise exception
|
||||
with self.assertRaises(InterfaceConfigError):
|
||||
interface.set_snmp()
|
||||
|
||||
def test_set_snmp_no_version(self):
|
||||
"""Test set_snmp with missing SNMP version."""
|
||||
# Create context without SNMP version
|
||||
context = {
|
||||
"zabbix": {
|
||||
"interface_type": 2,
|
||||
"snmp": {
|
||||
"community": "public" # No version specified
|
||||
}
|
||||
}
|
||||
}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp - should raise exception
|
||||
with self.assertRaises(InterfaceConfigError):
|
||||
interface.set_snmp()
|
||||
|
||||
def test_set_snmp_non_snmp_interface(self):
|
||||
"""Test set_snmp with non-SNMP interface type."""
|
||||
interface = ZabbixInterface(self.agent_context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp - should raise exception
|
||||
with self.assertRaises(InterfaceConfigError):
|
||||
interface.set_snmp()
|
||||
|
||||
def test_set_default_snmp(self):
|
||||
"""Test set_default_snmp method."""
|
||||
interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
interface.set_default_snmp()
|
||||
|
||||
# Check interface properties
|
||||
self.assertEqual(interface.interface["type"], "2")
|
||||
self.assertEqual(interface.interface["port"], "161")
|
||||
self.assertEqual(interface.interface["details"]["version"], "2")
|
||||
self.assertEqual(interface.interface["details"]["community"], "{$SNMP_COMMUNITY}")
|
||||
self.assertEqual(interface.interface["details"]["bulk"], "1")
|
||||
|
||||
def test_set_default_agent(self):
|
||||
"""Test set_default_agent method."""
|
||||
interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
interface.set_default_agent()
|
||||
|
||||
# Check interface properties
|
||||
self.assertEqual(interface.interface["type"], "1")
|
||||
self.assertEqual(interface.interface["port"], "10050")
|
||||
|
||||
def test_snmpv2_no_community(self):
|
||||
"""Test SNMPv2 with no community string specified."""
|
||||
# Create context with SNMPv2 but no community
|
||||
context = {
|
||||
"zabbix": {
|
||||
"interface_type": 2,
|
||||
"snmp": {
|
||||
"version": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp
|
||||
interface.set_snmp()
|
||||
|
||||
# Should use default community string
|
||||
self.assertEqual(interface.interface["details"]["community"], "{$SNMP_COMMUNITY}")
|
429
tests/test_physical_device.py
Normal file
429
tests/test_physical_device.py
Normal file
@ -0,0 +1,429 @@
|
||||
"""Tests for the PhysicalDevice class in the device module."""
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from modules.device import PhysicalDevice
|
||||
from modules.exceptions import TemplateError, SyncInventoryError
|
||||
|
||||
|
||||
class TestPhysicalDevice(unittest.TestCase):
|
||||
"""Test class for PhysicalDevice functionality."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures."""
|
||||
# Create mock NetBox device
|
||||
self.mock_nb_device = MagicMock()
|
||||
self.mock_nb_device.id = 123
|
||||
self.mock_nb_device.name = "test-device"
|
||||
self.mock_nb_device.status.label = "Active"
|
||||
self.mock_nb_device.custom_fields = {"zabbix_hostid": None}
|
||||
self.mock_nb_device.config_context = {}
|
||||
|
||||
# Set up a primary IP
|
||||
primary_ip = MagicMock()
|
||||
primary_ip.address = "192.168.1.1/24"
|
||||
self.mock_nb_device.primary_ip = primary_ip
|
||||
|
||||
# Create mock Zabbix API
|
||||
self.mock_zabbix = MagicMock()
|
||||
self.mock_zabbix.version = "6.0"
|
||||
|
||||
# Mock NetBox journal class
|
||||
self.mock_nb_journal = MagicMock()
|
||||
|
||||
# Create logger mock
|
||||
self.mock_logger = MagicMock()
|
||||
|
||||
# Create PhysicalDevice instance with mocks
|
||||
with patch('modules.device.config',
|
||||
{"device_cf": "zabbix_hostid",
|
||||
"template_cf": "zabbix_template",
|
||||
"templates_config_context": False,
|
||||
"templates_config_context_overrule": False,
|
||||
"traverse_regions": False,
|
||||
"traverse_site_groups": False,
|
||||
"inventory_mode": "disabled",
|
||||
"inventory_sync": False,
|
||||
"device_inventory_map": {}
|
||||
}):
|
||||
self.device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
journal=True,
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
def test_init(self):
|
||||
"""Test the initialization of the PhysicalDevice class."""
|
||||
# Check that basic properties are set correctly
|
||||
self.assertEqual(self.device.name, "test-device")
|
||||
self.assertEqual(self.device.id, 123)
|
||||
self.assertEqual(self.device.status, "Active")
|
||||
self.assertEqual(self.device.ip, "192.168.1.1")
|
||||
self.assertEqual(self.device.cidr, "192.168.1.1/24")
|
||||
|
||||
def test_init_no_primary_ip(self):
|
||||
"""Test initialization when device has no primary IP."""
|
||||
# Set primary_ip to None
|
||||
self.mock_nb_device.primary_ip = None
|
||||
|
||||
# Creating device should raise SyncInventoryError
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
with self.assertRaises(SyncInventoryError):
|
||||
PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
def test_set_basics_with_special_characters(self):
|
||||
"""Test _setBasics when device name contains special characters."""
|
||||
# Set name with special characters that
|
||||
# will actually trigger the special character detection
|
||||
self.mock_nb_device.name = "test-devïce"
|
||||
|
||||
# We need to patch the search function to simulate finding special characters
|
||||
with patch('modules.device.search') as mock_search, \
|
||||
patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
# Make the search function return True to simulate special characters
|
||||
mock_search.return_value = True
|
||||
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# With the mocked search function, the name should be changed to NETBOX_ID format
|
||||
self.assertEqual(device.name, f"NETBOX_ID{self.mock_nb_device.id}")
|
||||
# And visible_name should be set to the original name
|
||||
self.assertEqual(device.visible_name, "test-devïce")
|
||||
# use_visible_name flag should be set
|
||||
self.assertTrue(device.use_visible_name)
|
||||
|
||||
def test_get_templates_context(self):
|
||||
"""Test get_templates_context with valid config."""
|
||||
# Set up config_context with valid template data
|
||||
self.mock_nb_device.config_context = {
|
||||
"zabbix": {
|
||||
"templates": ["Template1", "Template2"]
|
||||
}
|
||||
}
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Test that templates are returned correctly
|
||||
templates = device.get_templates_context()
|
||||
self.assertEqual(templates, ["Template1", "Template2"])
|
||||
|
||||
def test_get_templates_context_with_string(self):
|
||||
"""Test get_templates_context with a string instead of list."""
|
||||
# Set up config_context with a string template
|
||||
self.mock_nb_device.config_context = {
|
||||
"zabbix": {
|
||||
"templates": "Template1"
|
||||
}
|
||||
}
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Test that template is wrapped in a list
|
||||
templates = device.get_templates_context()
|
||||
self.assertEqual(templates, ["Template1"])
|
||||
|
||||
def test_get_templates_context_no_zabbix_key(self):
|
||||
"""Test get_templates_context when zabbix key is missing."""
|
||||
# Set up config_context without zabbix key
|
||||
self.mock_nb_device.config_context = {}
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Test that TemplateError is raised
|
||||
with self.assertRaises(TemplateError):
|
||||
device.get_templates_context()
|
||||
|
||||
def test_get_templates_context_no_templates_key(self):
|
||||
"""Test get_templates_context when templates key is missing."""
|
||||
# Set up config_context without templates key
|
||||
self.mock_nb_device.config_context = {"zabbix": {}}
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Test that TemplateError is raised
|
||||
with self.assertRaises(TemplateError):
|
||||
device.get_templates_context()
|
||||
|
||||
def test_set_template_with_config_context(self):
|
||||
"""Test set_template with templates_config_context=True."""
|
||||
# Set up config_context with templates
|
||||
self.mock_nb_device.config_context = {
|
||||
"zabbix": {
|
||||
"templates": ["Template1"]
|
||||
}
|
||||
}
|
||||
|
||||
# Mock get_templates_context to return expected templates
|
||||
with patch.object(PhysicalDevice, 'get_templates_context', return_value=["Template1"]):
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call set_template with prefer_config_context=True
|
||||
result = device.set_template(prefer_config_context=True, overrule_custom=False)
|
||||
|
||||
# Check result and template names
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(device.zbx_template_names, ["Template1"])
|
||||
|
||||
def test_set_inventory_disabled_mode(self):
|
||||
"""Test set_inventory with inventory_mode=disabled."""
|
||||
# Configure with disabled inventory mode
|
||||
config_patch = {
|
||||
"device_cf": "zabbix_hostid",
|
||||
"inventory_mode": "disabled",
|
||||
"inventory_sync": False
|
||||
}
|
||||
|
||||
with patch('modules.device.config', config_patch):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call set_inventory with the config patch still active
|
||||
with patch('modules.device.config', config_patch):
|
||||
result = device.set_inventory({})
|
||||
|
||||
# Check result
|
||||
self.assertTrue(result)
|
||||
# Default value for disabled inventory
|
||||
self.assertEqual(device.inventory_mode, -1)
|
||||
|
||||
def test_set_inventory_manual_mode(self):
|
||||
"""Test set_inventory with inventory_mode=manual."""
|
||||
# Configure with manual inventory mode
|
||||
config_patch = {
|
||||
"device_cf": "zabbix_hostid",
|
||||
"inventory_mode": "manual",
|
||||
"inventory_sync": False
|
||||
}
|
||||
|
||||
with patch('modules.device.config', config_patch):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call set_inventory with the config patch still active
|
||||
with patch('modules.device.config', config_patch):
|
||||
result = device.set_inventory({})
|
||||
|
||||
# Check result
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(device.inventory_mode, 0) # Manual mode
|
||||
|
||||
def test_set_inventory_automatic_mode(self):
|
||||
"""Test set_inventory with inventory_mode=automatic."""
|
||||
# Configure with automatic inventory mode
|
||||
config_patch = {
|
||||
"device_cf": "zabbix_hostid",
|
||||
"inventory_mode": "automatic",
|
||||
"inventory_sync": False
|
||||
}
|
||||
|
||||
with patch('modules.device.config', config_patch):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call set_inventory with the config patch still active
|
||||
with patch('modules.device.config', config_patch):
|
||||
result = device.set_inventory({})
|
||||
|
||||
# Check result
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(device.inventory_mode, 1) # Automatic mode
|
||||
|
||||
def test_set_inventory_with_inventory_sync(self):
|
||||
"""Test set_inventory with inventory_sync=True."""
|
||||
# Configure with inventory sync enabled
|
||||
config_patch = {
|
||||
"device_cf": "zabbix_hostid",
|
||||
"inventory_mode": "manual",
|
||||
"inventory_sync": True,
|
||||
"device_inventory_map": {
|
||||
"name": "name",
|
||||
"serial": "serialno_a"
|
||||
}
|
||||
}
|
||||
|
||||
with patch('modules.device.config', config_patch):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Create a mock device with the required attributes
|
||||
mock_device_data = {
|
||||
"name": "test-device",
|
||||
"serial": "ABC123"
|
||||
}
|
||||
|
||||
# Call set_inventory with the config patch still active
|
||||
with patch('modules.device.config', config_patch):
|
||||
result = device.set_inventory(mock_device_data)
|
||||
|
||||
# Check result
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(device.inventory_mode, 0) # Manual mode
|
||||
self.assertEqual(device.inventory, {
|
||||
"name": "test-device",
|
||||
"serialno_a": "ABC123"
|
||||
})
|
||||
|
||||
def test_iscluster_true(self):
|
||||
"""Test isCluster when device is part of a cluster."""
|
||||
# Set up virtual_chassis
|
||||
self.mock_nb_device.virtual_chassis = MagicMock()
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Check isCluster result
|
||||
self.assertTrue(device.isCluster())
|
||||
|
||||
def test_is_cluster_false(self):
|
||||
"""Test isCluster when device is not part of a cluster."""
|
||||
# Set virtual_chassis to None
|
||||
self.mock_nb_device.virtual_chassis = None
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Check isCluster result
|
||||
self.assertFalse(device.isCluster())
|
||||
|
||||
|
||||
def test_promote_master_device_primary(self):
|
||||
"""Test promoteMasterDevice when device is primary in cluster."""
|
||||
# Set up virtual chassis with master device
|
||||
mock_vc = MagicMock()
|
||||
mock_vc.name = "virtual-chassis-1"
|
||||
mock_master = MagicMock()
|
||||
mock_master.id = self.mock_nb_device.id # Set master ID to match the current device
|
||||
mock_vc.master = mock_master
|
||||
self.mock_nb_device.virtual_chassis = mock_vc
|
||||
|
||||
# Create device with the updated mock
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call promoteMasterDevice and check the result
|
||||
result = device.promoteMasterDevice()
|
||||
|
||||
# Should return True for primary device
|
||||
self.assertTrue(result)
|
||||
# Device name should be updated to virtual chassis name
|
||||
self.assertEqual(device.name, "virtual-chassis-1")
|
||||
|
||||
|
||||
def test_promote_master_device_secondary(self):
|
||||
"""Test promoteMasterDevice when device is secondary in cluster."""
|
||||
# Set up virtual chassis with a different master device
|
||||
mock_vc = MagicMock()
|
||||
mock_vc.name = "virtual-chassis-1"
|
||||
mock_master = MagicMock()
|
||||
mock_master.id = self.mock_nb_device.id + 1 # Different ID than the current device
|
||||
mock_vc.master = mock_master
|
||||
self.mock_nb_device.virtual_chassis = mock_vc
|
||||
|
||||
# Create device with the updated mock
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call promoteMasterDevice and check the result
|
||||
result = device.promoteMasterDevice()
|
||||
|
||||
# Should return False for secondary device
|
||||
self.assertFalse(result)
|
||||
# Device name should not be modified
|
||||
self.assertEqual(device.name, "test-device")
|
62
tests/test_tools.py
Normal file
62
tests/test_tools.py
Normal file
@ -0,0 +1,62 @@
|
||||
from modules.tools import sanatize_log_output
|
||||
|
||||
def test_sanatize_log_output_secrets():
|
||||
data = {
|
||||
"macros": [
|
||||
{"macro": "{$SECRET}", "type": "1", "value": "supersecret"},
|
||||
{"macro": "{$PLAIN}", "type": "0", "value": "notsecret"},
|
||||
]
|
||||
}
|
||||
sanitized = sanatize_log_output(data)
|
||||
assert sanitized["macros"][0]["value"] == "********"
|
||||
assert sanitized["macros"][1]["value"] == "notsecret"
|
||||
|
||||
def test_sanatize_log_output_interface_secrets():
|
||||
data = {
|
||||
"interfaceid": 123,
|
||||
"details": {
|
||||
"authpassphrase": "supersecret",
|
||||
"privpassphrase": "anothersecret",
|
||||
"securityname": "sensitiveuser",
|
||||
"community": "public",
|
||||
"other": "normalvalue"
|
||||
}
|
||||
}
|
||||
sanitized = sanatize_log_output(data)
|
||||
# Sensitive fields should be sanitized
|
||||
assert sanitized["details"]["authpassphrase"] == "********"
|
||||
assert sanitized["details"]["privpassphrase"] == "********"
|
||||
assert sanitized["details"]["securityname"] == "********"
|
||||
# Non-sensitive fields should remain
|
||||
assert sanitized["details"]["community"] == "********"
|
||||
assert sanitized["details"]["other"] == "normalvalue"
|
||||
# interfaceid should be removed
|
||||
assert "interfaceid" not in sanitized
|
||||
|
||||
def test_sanatize_log_output_interface_macros():
|
||||
data = {
|
||||
"interfaceid": 123,
|
||||
"details": {
|
||||
"authpassphrase": "{$SECRET_MACRO}",
|
||||
"privpassphrase": "{$SECRET_MACRO}",
|
||||
"securityname": "{$USER_MACRO}",
|
||||
"community": "{$SNNMP_COMMUNITY}",
|
||||
}
|
||||
}
|
||||
sanitized = sanatize_log_output(data)
|
||||
# Macro values should not be sanitized
|
||||
assert sanitized["details"]["authpassphrase"] == "{$SECRET_MACRO}"
|
||||
assert sanitized["details"]["privpassphrase"] == "{$SECRET_MACRO}"
|
||||
assert sanitized["details"]["securityname"] == "{$USER_MACRO}"
|
||||
assert sanitized["details"]["community"] == "{$SNNMP_COMMUNITY}"
|
||||
assert "interfaceid" not in sanitized
|
||||
|
||||
def test_sanatize_log_output_plain_data():
|
||||
data = {"foo": "bar", "baz": 123}
|
||||
sanitized = sanatize_log_output(data)
|
||||
assert sanitized == data
|
||||
|
||||
def test_sanatize_log_output_non_dict():
|
||||
data = [1, 2, 3]
|
||||
sanitized = sanatize_log_output(data)
|
||||
assert sanitized == data
|
125
tests/test_usermacros.py
Normal file
125
tests/test_usermacros.py
Normal file
@ -0,0 +1,125 @@
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from modules.device import PhysicalDevice
|
||||
from modules.usermacros import ZabbixUsermacros
|
||||
|
||||
class DummyNB:
|
||||
def __init__(self, name="dummy", config_context=None, **kwargs):
|
||||
self.name = name
|
||||
self.config_context = config_context or {}
|
||||
for k, v in kwargs.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
def __getitem__(self, key):
|
||||
# Allow dict-style access for test compatibility
|
||||
if hasattr(self, key):
|
||||
return getattr(self, key)
|
||||
if key in self.config_context:
|
||||
return self.config_context[key]
|
||||
raise KeyError(key)
|
||||
|
||||
class TestUsermacroSync(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.nb = DummyNB(serial="1234")
|
||||
self.logger = MagicMock()
|
||||
self.usermacro_map = {"serial": "{$HW_SERIAL}"}
|
||||
|
||||
@patch("modules.device.config", {"usermacro_sync": False})
|
||||
def test_usermacro_sync_false(self):
|
||||
device = PhysicalDevice.__new__(PhysicalDevice)
|
||||
device.nb = self.nb
|
||||
device.logger = self.logger
|
||||
device.name = "dummy"
|
||||
device._usermacro_map = MagicMock(return_value=self.usermacro_map)
|
||||
# call set_usermacros
|
||||
result = device.set_usermacros()
|
||||
self.assertEqual(device.usermacros, [])
|
||||
self.assertTrue(result is True or result is None)
|
||||
|
||||
@patch("modules.device.config", {"usermacro_sync": True})
|
||||
def test_usermacro_sync_true(self):
|
||||
device = PhysicalDevice.__new__(PhysicalDevice)
|
||||
device.nb = self.nb
|
||||
device.logger = self.logger
|
||||
device.name = "dummy"
|
||||
device._usermacro_map = MagicMock(return_value=self.usermacro_map)
|
||||
result = device.set_usermacros()
|
||||
self.assertIsInstance(device.usermacros, list)
|
||||
self.assertGreater(len(device.usermacros), 0)
|
||||
|
||||
@patch("modules.device.config", {"usermacro_sync": "full"})
|
||||
def test_usermacro_sync_full(self):
|
||||
device = PhysicalDevice.__new__(PhysicalDevice)
|
||||
device.nb = self.nb
|
||||
device.logger = self.logger
|
||||
device.name = "dummy"
|
||||
device._usermacro_map = MagicMock(return_value=self.usermacro_map)
|
||||
result = device.set_usermacros()
|
||||
self.assertIsInstance(device.usermacros, list)
|
||||
self.assertGreater(len(device.usermacros), 0)
|
||||
|
||||
class TestZabbixUsermacros(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.nb = DummyNB()
|
||||
self.logger = MagicMock()
|
||||
|
||||
def test_validate_macro_valid(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
self.assertTrue(macros.validate_macro("{$TEST_MACRO}"))
|
||||
self.assertTrue(macros.validate_macro("{$A1_2.3}"))
|
||||
self.assertTrue(macros.validate_macro("{$FOO:bar}"))
|
||||
|
||||
def test_validate_macro_invalid(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
self.assertFalse(macros.validate_macro("$TEST_MACRO"))
|
||||
self.assertFalse(macros.validate_macro("{TEST_MACRO}"))
|
||||
self.assertFalse(macros.validate_macro("{$test}")) # lower-case not allowed
|
||||
self.assertFalse(macros.validate_macro(""))
|
||||
|
||||
def test_render_macro_dict(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
macro = macros.render_macro("{$FOO}", {"value": "bar", "type": "secret", "description": "desc"})
|
||||
self.assertEqual(macro["macro"], "{$FOO}")
|
||||
self.assertEqual(macro["value"], "bar")
|
||||
self.assertEqual(macro["type"], "1")
|
||||
self.assertEqual(macro["description"], "desc")
|
||||
|
||||
def test_render_macro_dict_missing_value(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
result = macros.render_macro("{$FOO}", {"type": "text"})
|
||||
self.assertFalse(result)
|
||||
self.logger.warning.assert_called()
|
||||
|
||||
def test_render_macro_str(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
macro = macros.render_macro("{$FOO}", "bar")
|
||||
self.assertEqual(macro["macro"], "{$FOO}")
|
||||
self.assertEqual(macro["value"], "bar")
|
||||
self.assertEqual(macro["type"], "0")
|
||||
self.assertEqual(macro["description"], "")
|
||||
|
||||
def test_render_macro_invalid_name(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
result = macros.render_macro("FOO", "bar")
|
||||
self.assertFalse(result)
|
||||
self.logger.error.assert_called()
|
||||
|
||||
def test_generate_from_map(self):
|
||||
nb = DummyNB(memory="bar", role="baz")
|
||||
usermacro_map = {"memory": "{$FOO}", "role": "{$BAR}"}
|
||||
macros = ZabbixUsermacros(nb, usermacro_map, True, logger=self.logger)
|
||||
result = macros.generate()
|
||||
self.assertEqual(len(result), 2)
|
||||
self.assertEqual(result[0]["macro"], "{$FOO}")
|
||||
self.assertEqual(result[1]["macro"], "{$BAR}")
|
||||
|
||||
def test_generate_from_config_context(self):
|
||||
config_context = {"zabbix": {"usermacros": {"{$FOO}": {"value": "bar"}}}}
|
||||
nb = DummyNB(config_context=config_context)
|
||||
macros = ZabbixUsermacros(nb, {}, True, logger=self.logger)
|
||||
result = macros.generate()
|
||||
self.assertEqual(len(result), 1)
|
||||
self.assertEqual(result[0]["macro"], "{$FOO}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
Loading…
Reference in New Issue
Block a user