mirror of
https://github.com/TheNetworkGuy/netbox-zabbix-sync.git
synced 2026-01-07 04:27:31 -06:00
Compare commits
318 Commits
0.9
...
fd66a4c943
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fd66a4c943 | ||
|
|
fdaeb79d4d | ||
|
|
765b4713a6 | ||
|
|
c275e08953 | ||
|
|
9cc229c2f7 | ||
|
|
40592a589d | ||
|
|
8197f41788 | ||
|
|
efb42916fd | ||
|
|
d75b0c2728 | ||
|
|
2fa05ffe92 | ||
|
|
b81d4abfcd | ||
|
|
047fb33332 | ||
|
|
bf512ada0b | ||
|
|
337184159b | ||
|
|
b9cf7b5bbe | ||
|
|
58365f5228 | ||
|
|
37774cfec3 | ||
|
|
c27505b927 | ||
|
|
bc12064b6a | ||
|
|
422d343c1f | ||
|
|
123b243f56 | ||
|
|
7d9bb9f637 | ||
|
|
17ba97be45 | ||
|
|
5810cbe621 | ||
|
|
b5d7596de7 | ||
|
|
18f52c1d40 | ||
|
|
79e82c4365 | ||
|
|
9259e73617 | ||
|
|
c58a3e8dd5 | ||
|
|
3e1657e575 | ||
|
|
161b310ba3 | ||
|
|
cf2c841d23 | ||
|
|
b258b02b91 | ||
|
|
e82c098e26 | ||
|
|
3910e0de2d | ||
|
|
98c13919c5 | ||
|
|
e718560689 | ||
|
|
57c7f83e6a | ||
|
|
e0ec3c0632 | ||
|
|
e4a1a17ded | ||
|
|
f15e53185b | ||
|
|
5923682d48 | ||
|
|
29a54e5a86 | ||
|
|
4a53b53789 | ||
|
|
6d4f1ac0a5 | ||
|
|
a522c98929 | ||
|
|
1de0b0781b | ||
|
|
1cf24fbcb5 | ||
|
|
c2b25e0cd2 | ||
|
|
9933c97e94 | ||
|
|
435fd1fa78 | ||
|
|
099ebcace5 | ||
|
|
906c719863 | ||
|
|
2a3d586302 | ||
|
|
753633e7d2 | ||
|
|
de82d5ac71 | ||
|
|
9912f24450 | ||
|
|
d056a20de2 | ||
|
|
a57b51870f | ||
|
|
dbc7acaf98 | ||
|
|
87b33706c0 | ||
|
|
affd4c6998 | ||
|
|
22982c3607 | ||
|
|
dec2cf6996 | ||
|
|
940f2d6afb | ||
|
|
d79f96a5b4 | ||
|
|
2f40ec467b | ||
|
|
e0d28633c3 | ||
|
|
0a20e270ed | ||
|
|
a5be9538d9 | ||
|
|
b31e41ca6b | ||
|
|
ba530ecd58 | ||
|
|
a3259c4fe3 | ||
|
|
5e390396ba | ||
|
|
ee6d13bfdf | ||
|
|
8fe7e5763b | ||
|
|
a7a79ea81e | ||
|
|
b62e8203b6 | ||
|
|
bfadd88542 | ||
|
|
bd4d21c5d8 | ||
|
|
148ce47c10 | ||
|
|
7969de50bf | ||
|
|
7394bf8d1d | ||
|
|
8ce2cab86f | ||
|
|
76723d2823 | ||
|
|
c58e5aba1e | ||
|
|
baf23403a0 | ||
|
|
3115eaa04e | ||
|
|
c8fda04ce8 | ||
|
|
7b8827fa94 | ||
|
|
b705e1341f | ||
|
|
8df17f208c | ||
|
|
22d735dd82 | ||
|
|
a325863aec | ||
|
|
9e1a90833d | ||
|
|
45e633b5ed | ||
|
|
298e6c4370 | ||
|
|
77b0798b65 | ||
|
|
27ee4c341f | ||
|
|
f7eb47a8a8 | ||
|
|
bc53737e02 | ||
|
|
539ad64c8d | ||
|
|
bbe28d9705 | ||
|
|
2998dfde54 | ||
|
|
d60eb1cb2d | ||
|
|
98edf0ad99 | ||
|
|
772fef0930 | ||
|
|
68cf28565d | ||
|
|
0c715d4f96 | ||
|
|
819126ce36 | ||
|
|
04a610cf84 | ||
|
|
e91eecffaa | ||
|
|
eb307337f6 | ||
|
|
5fd89a1f8a | ||
|
|
cb0500d0c0 | ||
|
|
7383583c43 | ||
|
|
dad7d2911f | ||
|
|
4fd582970d | ||
|
|
ad2ace942a | ||
|
|
989f6fa96e | ||
|
|
f303e7e01d | ||
|
|
38d61dcde7 | ||
|
|
feb719542d | ||
|
|
ea5b7d3196 | ||
|
|
28193cc120 | ||
|
|
908e7eeda9 | ||
|
|
e9a86334d9 | ||
|
|
2ea2edb6a6 | ||
|
|
37b3bfc7fb | ||
|
|
6abdac2eb4 | ||
|
|
13fe406b63 | ||
|
|
20a3c67fd4 | ||
|
|
b56a4332b9 | ||
|
|
73d34851fb | ||
|
|
10313ef5cf | ||
|
|
93c88333a6 | ||
|
|
50b7ede81b | ||
|
|
3e52edef2d | ||
|
|
4449e040ce | ||
|
|
aa6be1312e | ||
|
|
50c13c20cb | ||
|
|
964045f53e | ||
|
|
6bdaf4e5b7 | ||
|
|
5a3467538e | ||
|
|
50918e43fa | ||
|
|
7781bc6732 | ||
|
|
9ab5e09dd5 | ||
|
|
886c5b24b9 | ||
|
|
b314b2c883 | ||
|
|
0c798ec968 | ||
|
|
a5312365f9 | ||
|
|
53066d2d51 | ||
|
|
525904cf43 | ||
|
|
1e269780ce | ||
|
|
15d63ce3b8 | ||
|
|
c810b06718 | ||
|
|
825d788cfe | ||
|
|
48a04c58e3 | ||
|
|
733df33b71 | ||
|
|
593c8707af | ||
|
|
523393308d | ||
|
|
d65fa5b699 | ||
|
|
fd70045c6d | ||
|
|
f9453cc23c | ||
|
|
3d4e7803cc | ||
|
|
edb9cd6ab6 | ||
|
|
53d679e638 | ||
|
|
72558d3825 | ||
|
|
eea7df660a | ||
|
|
1b831a2d39 | ||
|
|
6d4e250b23 | ||
|
|
cebefd681e | ||
|
|
4264dc9b31 | ||
|
|
c67180138e | ||
|
|
b8bb3fb3f0 | ||
|
|
5f78a2c789 | ||
|
|
1157ed9e64 | ||
|
|
c7d3dab27c | ||
|
|
ba2f77a640 | ||
|
|
4c91c660a8 | ||
|
|
8272e34c12 | ||
|
|
4c982ff0f5 | ||
|
|
7a671d6625 | ||
|
|
5617275594 | ||
|
|
1673f7bb59 | ||
|
|
c76e36ad38 | ||
|
|
b0eee8ad9b | ||
|
|
9ff6b66c96 | ||
|
|
ffb8d5239c | ||
|
|
73d5306898 | ||
|
|
f301244306 | ||
|
|
867749ddd6 | ||
|
|
d0941ff909 | ||
|
|
434722df53 | ||
|
|
9131c940c5 | ||
|
|
8b670ba395 | ||
|
|
4ec8036c88 | ||
|
|
81764b589a | ||
|
|
acab7dd6d2 | ||
|
|
2177234d7f | ||
|
|
3f4d173ac0 | ||
|
|
0996059c5f | ||
|
|
0155c29fcc | ||
|
|
5d4ff9c5ed | ||
|
|
204937b784 | ||
|
|
e0827ac428 | ||
|
|
09a6906a63 | ||
|
|
30545ec0f3 | ||
|
|
56c19d97de | ||
|
|
ffc2aa1947 | ||
|
|
9417908994 | ||
|
|
06f97b132a | ||
|
|
20096a215b | ||
|
|
f1da1cfb50 | ||
|
|
5093823287 | ||
|
|
c1504987f1 | ||
|
|
d598a9739a | ||
|
|
7bf72de0f9 | ||
|
|
66f24e6891 | ||
|
|
bff34a8e38 | ||
|
|
886ef2a172 | ||
|
|
9c07d7dbc4 | ||
|
|
9f29d2b27b | ||
|
|
e827953d8d | ||
|
|
053028b283 | ||
|
|
2e867d1129 | ||
|
|
a0ea21d731 | ||
|
|
70a5c3e384 | ||
|
|
91796395ef | ||
|
|
610a73c061 | ||
|
|
4de022496e | ||
|
|
0603d8c244 | ||
|
|
2b92f8da9b | ||
|
|
d1ec1114ac | ||
|
|
acad07eed4 | ||
|
|
da4fec6bf1 | ||
|
|
07049ea6d8 | ||
|
|
2094799a51 | ||
|
|
c0c52f973e | ||
|
|
39b63aa420 | ||
|
|
017b5623f5 | ||
|
|
9be09bca10 | ||
|
|
23997f9423 | ||
|
|
e8a733cbd0 | ||
|
|
be76386584 | ||
|
|
b5a01e09e8 | ||
|
|
ecec3ee46e | ||
|
|
7099df93d1 | ||
|
|
d1e864c75b | ||
|
|
6f044cb228 | ||
|
|
2e7890784b | ||
|
|
c695353fce | ||
|
|
e0b473a6d4 | ||
|
|
8e9594172b | ||
|
|
8a749e63cf | ||
|
|
ddc65a6d58 | ||
|
|
58d894832e | ||
|
|
b9713792d7 | ||
|
|
45192531f9 | ||
|
|
72fde13ef4 | ||
|
|
78b9d5ae8b | ||
|
|
60140b4b74 | ||
|
|
0b9b8a4898 | ||
|
|
441d7e7e95 | ||
|
|
c185b7364d | ||
|
|
e56451f5e1 | ||
|
|
dee6a079a5 | ||
|
|
2b62caca85 | ||
|
|
e9143eb24c | ||
|
|
4eed151e22 | ||
|
|
3e638c6f78 | ||
|
|
634f4b77d5 | ||
|
|
c006e7feb5 | ||
|
|
091c9746c0 | ||
|
|
364d376f55 | ||
|
|
ab2a341fa7 | ||
|
|
fbb9eeb48c | ||
|
|
5b08d27a5e | ||
|
|
583d845c40 | ||
|
|
27a4a5c6eb | ||
|
|
537710a4b9 | ||
|
|
5defc1a25e | ||
|
|
d6973dc32d | ||
|
|
71f604a6f6 | ||
|
|
b94a0df02d | ||
|
|
3079a88de8 | ||
|
|
4aa8b6d2fb | ||
|
|
e82631c89d | ||
|
|
18d29c98d3 | ||
|
|
661ce88287 | ||
|
|
4b7f3ec0b9 | ||
|
|
3a39c314be | ||
|
|
bf325c6839 | ||
|
|
5922d3e8ae | ||
|
|
dcd84e836b | ||
|
|
33cf3e5358 | ||
|
|
7c988f9ff8 | ||
|
|
d46b749af0 | ||
|
|
e05c35a3ea | ||
|
|
142aae75e0 | ||
|
|
c538c51b7b | ||
|
|
0d7c581ee2 | ||
|
|
c684ac4a9d | ||
|
|
2fcd21a723 | ||
|
|
23bef6b549 | ||
|
|
0d02e096e9 | ||
|
|
3c7079117a | ||
|
|
89d5f22064 | ||
|
|
15d40873b0 | ||
|
|
de8143e89f | ||
|
|
173fdbf19f | ||
|
|
d55bb4053b | ||
|
|
c8e42b366f | ||
|
|
1f4a81e2e4 | ||
|
|
8aba95525b | ||
|
|
e24351aea2 | ||
|
|
3f28986c09 | ||
|
|
e2ddb068e9 |
22
.devcontainer/devcontainer.json
Normal file
22
.devcontainer/devcontainer.json
Normal file
@@ -0,0 +1,22 @@
|
||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||
// README at: https://github.com/devcontainers/templates/tree/main/src/python
|
||||
{
|
||||
"name": "Python 3",
|
||||
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
||||
"image": "mcr.microsoft.com/devcontainers/python:1-3.12-bullseye",
|
||||
|
||||
// Features to add to the dev container. More info: https://containers.dev/features.
|
||||
// "features": {},
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
|
||||
// Use 'postCreateCommand' to run commands after the container is created.
|
||||
"postCreateCommand": "pip3 install --user -r requirements.txt && pip3 install --user pylint pytest coverage pytest-cov"
|
||||
|
||||
// Configure tool-specific properties.
|
||||
// "customizations": {},
|
||||
|
||||
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
||||
// "remoteUser": "root"
|
||||
}
|
||||
55
.github/workflows/publish-image.yml
vendored
Normal file
55
.github/workflows/publish-image.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
---
|
||||
name: Build and Push Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
release:
|
||||
types: [published]
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
|
||||
jobs:
|
||||
test_quality:
|
||||
uses: ./.github/workflows/quality.yml
|
||||
test_code:
|
||||
uses: ./.github/workflows/run_tests.yml
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@369eb591f429131d6889c46b94e711f089e6ca96
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: |
|
||||
index:org.opencontainers.image.description=Python script to synchronise NetBox devices to Zabbix.
|
||||
27
.github/workflows/quality.yml
vendored
Normal file
27
.github/workflows/quality.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: Pylint Quality control
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
python_quality_testing:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.12","3.13"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pylint
|
||||
pip install -r requirements.txt
|
||||
- name: Analysing the code with pylint
|
||||
run: |
|
||||
pylint --module-naming-style=any modules/* netbox_zabbix_sync.py
|
||||
32
.github/workflows/run_tests.yml
vendored
Normal file
32
.github/workflows/run_tests.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
name: Pytest code testing
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test_code:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.12
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pytest pytest-mock coverage pytest-cov
|
||||
pip install -r requirements.txt
|
||||
- name: Testing the code with PyTest
|
||||
run: |
|
||||
cp config.py.example config.py
|
||||
pytest tests
|
||||
- name: Run tests with coverage
|
||||
run: |
|
||||
cp config.py.example config.py
|
||||
coverage run -m pytest tests
|
||||
- name: Check coverage percentage
|
||||
run: |
|
||||
coverage report --fail-under=70
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,5 +1,12 @@
|
||||
*.log
|
||||
.venv
|
||||
.env
|
||||
config.py
|
||||
Pipfile
|
||||
Pipfile.lock
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
.vscode
|
||||
.flake
|
||||
.coverage
|
||||
20
Dockerfile
Normal file
20
Dockerfile
Normal file
@@ -0,0 +1,20 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
FROM python:3.12-alpine
|
||||
RUN mkdir -p /opt/netbox-zabbix && chown -R 1000:1000 /opt/netbox-zabbix
|
||||
|
||||
RUN mkdir -p /opt/netbox-zabbix
|
||||
RUN addgroup -g 1000 -S netbox-zabbix && adduser -u 1000 -S netbox-zabbix -G netbox-zabbix
|
||||
RUN chown -R 1000:1000 /opt/netbox-zabbix
|
||||
|
||||
WORKDIR /opt/netbox-zabbix
|
||||
|
||||
COPY --chown=1000:1000 . /opt/netbox-zabbix
|
||||
|
||||
USER 1000:1000
|
||||
|
||||
RUN if ! [ -f ./config.py ]; then cp ./config.py.example ./config.py; fi
|
||||
USER root
|
||||
RUN pip install -r ./requirements.txt
|
||||
USER 1000:1000
|
||||
ENTRYPOINT ["python"]
|
||||
CMD ["/opt/netbox-zabbix/netbox_zabbix_sync.py", "-v"]
|
||||
793
README.md
793
README.md
@@ -1,39 +1,104 @@
|
||||
# NetBox to Zabbix synchronization
|
||||
|
||||
# Netbox to Zabbix synchronization
|
||||
A script to create, update and delete Zabbix hosts using NetBox device objects. Tested and compatible with all [currently supported Zabbix releases](https://www.zabbix.com/life_cycle_and_release_policy).
|
||||
|
||||
A script to create, update and delete Zabbix hosts using Netbox device objects.
|
||||
## Installation via Docker
|
||||
|
||||
To pull the latest stable version to your local cache, use the following docker
|
||||
pull command:
|
||||
|
||||
## Installation
|
||||
### Packages
|
||||
Make sure that you have a python environment with the following packages installed.
|
||||
```bash
|
||||
docker pull ghcr.io/thenetworkguy/netbox-zabbix-sync:main
|
||||
```
|
||||
pynetbox
|
||||
pyzabbix
|
||||
|
||||
Make sure to specify the needed environment variables for the script to work
|
||||
(see [here](#set-environment-variables)) on the command line or use an
|
||||
[env file](https://docs.docker.com/reference/cli/docker/container/run/#env).
|
||||
|
||||
```bash
|
||||
docker run -d -t -i -e ZABBIX_HOST='https://zabbix.local' \
|
||||
-e ZABBIX_TOKEN='othersecrettoken' \
|
||||
-e NETBOX_HOST='https://netbox.local' \
|
||||
-e NETBOX_TOKEN='secrettoken' \
|
||||
--name netbox-zabbix-sync ghcr.io/thenetworkguy/netbox-zabbix-sync:main
|
||||
```
|
||||
|
||||
This should run a one-time sync. You can check the sync with
|
||||
`docker logs netbox-zabbix-sync`.
|
||||
|
||||
The image uses the default `config.py` for its configuration, you can use a
|
||||
volume mount in the docker run command to override with your own config file if
|
||||
needed (see [config file](#config-file)):
|
||||
|
||||
```bash
|
||||
docker run -d -t -i -v $(pwd)/config.py:/opt/netbox-zabbix/config.py ...
|
||||
```
|
||||
|
||||
## Installation from Source
|
||||
|
||||
### Cloning the repository
|
||||
```
|
||||
|
||||
```bash
|
||||
git clone https://github.com/TheNetworkGuy/netbox-zabbix-sync.git
|
||||
```
|
||||
|
||||
### Config file
|
||||
First time user? Copy the config.py.example file to config.py. This file is used for modifying filters and setting variables such as custom field names.
|
||||
### Packages
|
||||
|
||||
Make sure that you have a python environment with the following packages
|
||||
installed. You can also use the `requirements.txt` file for installation with
|
||||
pip.
|
||||
|
||||
```sh
|
||||
# Packages:
|
||||
pynetbox
|
||||
zabbix-utils
|
||||
|
||||
# Install them through requirements.txt from a venv:
|
||||
virtualenv .venv
|
||||
source .venv/bin/activate
|
||||
.venv/bin/pip --require-virtualenv install -r requirements.txt
|
||||
```
|
||||
|
||||
### Config file
|
||||
|
||||
First time user? Copy the `config.py.example` file to `config.py`. This file is
|
||||
used for modifying filters and setting variables such as custom field names.
|
||||
|
||||
```sh
|
||||
cp config.py.example config.py
|
||||
```
|
||||
|
||||
### Set environment variables
|
||||
Set the following environment variables
|
||||
```
|
||||
|
||||
Set the following environment variables:
|
||||
|
||||
```bash
|
||||
ZABBIX_HOST="https://zabbix.local"
|
||||
ZABBIX_USER="username"
|
||||
ZABBIX_PASS="Password"
|
||||
NETBOX_HOST="https://netbox.local"
|
||||
NETBOX_TOKEN="secrettoken"
|
||||
```
|
||||
### Netbox custom fields
|
||||
Use the following custom fields in Netbox (if you are using config context for the template information then the zabbix_template field is not required):
|
||||
|
||||
Or, you can use a Zabbix API token to login instead of using a username and
|
||||
password. In that case `ZABBIX_USER` and `ZABBIX_PASS` will be ignored.
|
||||
|
||||
```bash
|
||||
ZABBIX_TOKEN=othersecrettoken
|
||||
```
|
||||
|
||||
If you are using custom SSL certificates for NetBox and/or Zabbix, you can set
|
||||
the following environment variable to the path of your CA bundle file:
|
||||
|
||||
```sh
|
||||
export REQUESTS_CA_BUNDLE=/path/to/your/ca-bundle.crt
|
||||
```
|
||||
|
||||
### NetBox custom fields
|
||||
|
||||
Use the following custom fields in NetBox (if you are using config context for
|
||||
the template information then the zabbix_template field is not required):
|
||||
|
||||
```
|
||||
* Type: Integer
|
||||
* Name: zabbix_hostid
|
||||
@@ -41,6 +106,7 @@ Use the following custom fields in Netbox (if you are using config context for t
|
||||
* Default: null
|
||||
* Object: dcim > device
|
||||
```
|
||||
|
||||
```
|
||||
* Type: Text
|
||||
* Name: zabbix_template
|
||||
@@ -48,23 +114,271 @@ Use the following custom fields in Netbox (if you are using config context for t
|
||||
* Default: null
|
||||
* Object: dcim > device_type
|
||||
```
|
||||
You can make the hostID field hidden or read-only to prevent human intervention.
|
||||
|
||||
This is optional and there is a use case for leaving it read-write in the UI to manually change the ID. For example to re-run a sync.
|
||||
You can make the `zabbix_hostid` field hidden or read-only to prevent human
|
||||
intervention.
|
||||
|
||||
This is optional, but there may be cases where you want to leave it
|
||||
read-write in the UI. For example to manually change or clear the ID and re-run a sync.
|
||||
|
||||
## Virtual Machine (VM) Syncing
|
||||
|
||||
In order to use VM syncing, make sure that the `zabbix_id` custom field is also
|
||||
present on Virtual machine objects in NetBox.
|
||||
|
||||
Use the `config.py` file and set the `sync_vms` variable to `True`.
|
||||
|
||||
You can set the `vm_hostgroup_format` variable to a customizable value for VM
|
||||
hostgroups. The default is `cluster_type/cluster/role`.
|
||||
|
||||
To enable filtering for VM's, check the `nb_vm_filter` variable out. It works
|
||||
the same as with the device filter (see documentation under "Hostgroup layout").
|
||||
Note that not all filtering capabilities and properties of devices are
|
||||
applicable to VM's and vice-versa. Check the NetBox API documentation to see
|
||||
which filtering options are available for each object type.
|
||||
|
||||
## Config file
|
||||
|
||||
### Hostgroup
|
||||
|
||||
Setting the `create_hostgroups` variable to `False` requires manual hostgroup
|
||||
creation for devices in a new category. I would recommend setting this variable
|
||||
to `True` since leaving it on `False` results in a lot of manual work.
|
||||
|
||||
The format can be set with the `hostgroup_format` variable for devices and
|
||||
`vm_hostgroup_format` for virtual machines.
|
||||
|
||||
Any nested parent hostgroups will also be created automatically. For instance
|
||||
the region `Berlin` with parent region `Germany` will create the hostgroup
|
||||
`Germany/Berlin`.
|
||||
|
||||
Make sure that the Zabbix user has proper permissions to create hosts. The
|
||||
hostgroups are in a nested format. This means that proper permissions only need
|
||||
to be applied to the site name hostgroup and cascaded to any child hostgroups.
|
||||
|
||||
#### Layout
|
||||
|
||||
The default hostgroup layout is "site/manufacturer/device_role". You can change
|
||||
this behaviour with the hostgroup_format variable. The following values can be
|
||||
used:
|
||||
|
||||
**Both devices and virtual machines**
|
||||
|
||||
| name | description |
|
||||
| ------------- | ------------------------------------------------------------------------------------ |
|
||||
| role | Role name of a device or VM |
|
||||
| region | The region name |
|
||||
| site | Site name |
|
||||
| site_group | Site group name |
|
||||
| tenant | Tenant name |
|
||||
| tenant_group | Tenant group name |
|
||||
| platform | Software platform of a device or VM |
|
||||
| custom fields | See the section "Layout -> Custom Fields" to use custom fields as hostgroup variable |
|
||||
|
||||
**Only for devices**
|
||||
|
||||
| name | description |
|
||||
| ------------ | ------------------------ |
|
||||
| location | The device location name |
|
||||
| manufacturer | Device manufacturer name |
|
||||
| rack | Rack |
|
||||
|
||||
**Only for VMs**
|
||||
|
||||
| name | description |
|
||||
| ------------ | --------------- |
|
||||
| cluster | VM cluster name |
|
||||
| cluster_type | VM cluster type |
|
||||
| device | parent device |
|
||||
|
||||
You can specify the value separated by a "/" like so:
|
||||
|
||||
```python
|
||||
hostgroup_format = "tenant/site/location/role"
|
||||
```
|
||||
|
||||
You can also provice a list of groups like so:
|
||||
|
||||
```python
|
||||
hostgroup_format = ["region/site_group/site", "role", "tenant_group/tenant"]
|
||||
```
|
||||
|
||||
|
||||
**Group traversal**
|
||||
|
||||
The default behaviour for `region` is to only use the directly assigned region
|
||||
in the rendered hostgroup name. However, by setting `traverse_region` to `True`
|
||||
in `config.py` the script will render a full region path of all parent regions
|
||||
for the hostgroup name. `traverse_site_groups` controls the same behaviour for
|
||||
site_groups.
|
||||
|
||||
**Hardcoded text**
|
||||
|
||||
You can add hardcoded text in the hostgroup format by using quotes, this will
|
||||
insert the literal text:
|
||||
|
||||
```python
|
||||
hostgroup_format = "'MyDevices'/location/role"
|
||||
```
|
||||
|
||||
In this case, the prefix MyDevices will be used for all generated groups.
|
||||
|
||||
**Custom fields**
|
||||
|
||||
You can use the value of custom fields for hostgroup generation. This allows
|
||||
more freedom and even allows a full static mapping instead of a dynamic rendered
|
||||
hostgroup name.
|
||||
|
||||
For instance a custom field with the name `mycustomfieldname` and type string
|
||||
has the following values for 2 devices:
|
||||
|
||||
```
|
||||
Device A has the value Train for custom field mycustomfieldname.
|
||||
Device B has the value Bus for custom field mycustomfieldname.
|
||||
Both devices are located in the site Paris.
|
||||
```
|
||||
|
||||
With the hostgroup format `site/mycustomfieldname` the following hostgroups will
|
||||
be generated:
|
||||
|
||||
```
|
||||
Device A: Paris/Train
|
||||
Device B: Paris/Bus
|
||||
```
|
||||
|
||||
**Empty variables or hostgroups**
|
||||
|
||||
Should the content of a variable be empty, then the hostgroup position is
|
||||
skipped.
|
||||
|
||||
For example, consider the following scenario with 2 devices, both the same
|
||||
device type and site. One of them is linked to a tenant, the other one does not
|
||||
have a relationship with a tenant.
|
||||
|
||||
- Device_role: PDU
|
||||
- Site: HQ-AMS
|
||||
|
||||
```python
|
||||
hostgroup_format = "site/tenant/role"
|
||||
```
|
||||
|
||||
When running the script like above, the following hostgroup (HG) will be
|
||||
generated for both hosts:
|
||||
|
||||
- Device A with no relationship with a tenant: HQ-AMS/PDU
|
||||
- Device B with a relationship to tenant "Fork Industries": HQ-AMS/Fork
|
||||
Industries/PDU
|
||||
|
||||
The same logic applies to custom fields being used in the HG format:
|
||||
|
||||
```python
|
||||
hostgroup_format = "site/mycustomfieldname"
|
||||
```
|
||||
|
||||
For device A with the value "ABC123" in the custom field "mycustomfieldname" ->
|
||||
HQ-AMS/ABC123 For a device which does not have a value in the custom field
|
||||
"mycustomfieldname" -> HQ-AMS
|
||||
|
||||
Should there be a scenario where a custom field does not have a value under a
|
||||
device, and the HG format only uses this single variable, then this will result
|
||||
in an error:
|
||||
|
||||
```
|
||||
hostgroup_format = "mycustomfieldname"
|
||||
|
||||
NetBox-Zabbix-sync - ERROR - ESXI1 has no reliable hostgroup. This is most likely due to the use of custom fields that are empty.
|
||||
```
|
||||
|
||||
### Extended site properties
|
||||
|
||||
By default, NetBox will only return the following properties under the 'site' key for a device:
|
||||
|
||||
- site id
|
||||
- (api) url
|
||||
- display name
|
||||
- name
|
||||
- slug
|
||||
- description
|
||||
|
||||
However, NetBox-Zabbix-Sync allows you to extend these site properties with the full site information
|
||||
so you can use this data in inventory fields, tags and usermacros.
|
||||
|
||||
To enable this functionality, enable the following setting in your configuration file:
|
||||
|
||||
`extended_site_properties = True`
|
||||
|
||||
Keep in mind that enabling this option will increase the number of API calls to your NetBox instance,
|
||||
this might impact performance on large syncs.
|
||||
|
||||
### Device status
|
||||
|
||||
By setting a status on a NetBox device you determine how the host is added (or
|
||||
updated) in Zabbix. There are, by default, 3 options:
|
||||
|
||||
- Delete the host from Zabbix (triggered by NetBox status "Decommissioning" and
|
||||
"Inventory")
|
||||
- Create the host in Zabbix but with a disabled status (Trigger by "Offline",
|
||||
"Planned", "Staged" and "Failed")
|
||||
- Create the host in Zabbix with an enabled status (For now only enabled with
|
||||
the "Active" status)
|
||||
|
||||
You can modify this behaviour by changing the following list variables in the
|
||||
script:
|
||||
|
||||
- `zabbix_device_removal`
|
||||
- `zabbix_device_disable`
|
||||
|
||||
### Zabbix Inventory
|
||||
|
||||
This script allows you to enable the inventory on managed Zabbix hosts and sync
|
||||
NetBox device properties to the specified inventory fields. To map NetBox
|
||||
information to NetBox inventory fields, set `inventory_sync` to `True`.
|
||||
|
||||
You can set the inventory mode to "disabled", "manual" or "automatic" with the
|
||||
`inventory_mode` variable. See
|
||||
[Zabbix Manual](https://www.zabbix.com/documentation/current/en/manual/config/hosts/inventory#building-inventory)
|
||||
for more information about the modes.
|
||||
|
||||
Use the `device_inventory_map` variable to map which NetBox properties are used in
|
||||
which Zabbix Inventory fields. For nested properties, you can use the '/'
|
||||
seperator. For example, the following map will assign the custom field
|
||||
'mycustomfield' to the 'alias' Zabbix inventory field:
|
||||
|
||||
For Virtual Machines, use `vm_inventory_map`.
|
||||
|
||||
```python
|
||||
inventory_sync = True
|
||||
inventory_mode = "manual"
|
||||
device_inventory_map = {"custom_fields/mycustomfield/name": "alias"}
|
||||
vm_inventory_map = {"custom_fields/mycustomfield/name": "alias"}
|
||||
```
|
||||
|
||||
See `config.py.example` for an extensive example map. Any Zabbix Inventory fields
|
||||
that are not included in the map will not be touched by the script, so you can
|
||||
safely add manual values or use items to automatically add values to other
|
||||
fields.
|
||||
|
||||
### Template source
|
||||
You can either use a Netbox device type custom field or Netbox config context for the Zabbix template information.
|
||||
|
||||
Using a custom field allows for only one template. You can assign multiple templates to one host using the config context source.
|
||||
Should you make use of an advanced templating structure with lots of nesting then i would recommend sticking to the custom field.
|
||||
You can either use a NetBox device type custom field or NetBox config context
|
||||
for the Zabbix template information.
|
||||
|
||||
You can change the behaviour in the config file. By default this setting is false but you can set it to true to use config context:
|
||||
```
|
||||
Using a custom field allows for only one template. You can assign multiple
|
||||
templates to one host using the config context source. Should you make use of an
|
||||
advanced templating structure with lots of nesting then i would recommend
|
||||
sticking to the custom field.
|
||||
|
||||
You can change the behaviour in the config file. By default this setting is
|
||||
false but you can set it to true to use config context:
|
||||
|
||||
```python
|
||||
templates_config_context = True
|
||||
```
|
||||
|
||||
After that make sure that for each host there is at least one template defined in the config context in this format:
|
||||
```
|
||||
After that make sure that for each host there is at least one template defined
|
||||
in the config context in this format:
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
"templates": [
|
||||
@@ -77,115 +391,251 @@ After that make sure that for each host there is at least one template defined i
|
||||
}
|
||||
```
|
||||
|
||||
You can also opt for the default device type custom field behaviour but with the added benefit of overwriting the template should a device in Netbox have a device specific context defined. In this case the device specific context template(s) will take priority over the device type custom field template.
|
||||
```
|
||||
You can also opt for the default device type custom field behaviour but with the
|
||||
added benefit of overwriting the template should a device in NetBox have a
|
||||
device specific context defined. In this case the device specific context
|
||||
template(s) will take priority over the device type custom field template.
|
||||
|
||||
```python
|
||||
templates_config_context_overrule = True
|
||||
```
|
||||
|
||||
### Tags
|
||||
|
||||
This script can sync host tags to your Zabbix hosts for use in filtering,
|
||||
SLA calculations and event correlation.
|
||||
|
||||
Tags can be synced from the following sources:
|
||||
|
||||
1. NetBox device/vm tags
|
||||
2. NetBox config context
|
||||
3. NetBox fields
|
||||
|
||||
Syncing tags will override any tags that were set manually on the host,
|
||||
making NetBox the single source-of-truth for managing tags.
|
||||
|
||||
To enable syncing, turn on `tag_sync` in the config file.
|
||||
By default, this script will modify tag names and tag values to lowercase.
|
||||
You can change this behavior by setting `tag_lower` to `False`.
|
||||
|
||||
```python
|
||||
tag_sync = True
|
||||
tag_lower = True
|
||||
```
|
||||
|
||||
#### Device tags
|
||||
|
||||
As NetBox doesn't follow the tag/value pattern for tags, we will need a tag
|
||||
name set to register the netbox tags.
|
||||
|
||||
By default the tag name is "NetBox", but you can change this to whatever you want.
|
||||
The value for the tag can be set to 'name', 'display', or 'slug', which refers to the
|
||||
property of the NetBox tag object that will be used as the value in Zabbix.
|
||||
|
||||
```python
|
||||
tag_name = 'NetBox'
|
||||
tag_value = 'name'
|
||||
```
|
||||
|
||||
#### Config context
|
||||
|
||||
You can supply custom tags via config context by adding the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
"tags": [
|
||||
{
|
||||
"MyTagName": "MyTagValue"
|
||||
},
|
||||
{
|
||||
"environment": "production"
|
||||
}
|
||||
],
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This will allow you to assign tags based on the config context rules.
|
||||
|
||||
#### NetBox Field
|
||||
|
||||
NetBox field can also be used as input for tags, just like inventory and usermacros.
|
||||
To enable syncing from fields, make sure to configure a `device_tag_map` and/or a `vm_tag_map`.
|
||||
|
||||
```python
|
||||
device_tag_map = {"site/name": "site",
|
||||
"rack/name": "rack",
|
||||
"platform/name": "target"}
|
||||
|
||||
vm_tag_map = {"site/name": "site",
|
||||
"cluster/name": "cluster",
|
||||
"platform/name": "target"}
|
||||
```
|
||||
|
||||
To turn off field syncing, set the maps to empty dictionaries:
|
||||
|
||||
```python
|
||||
device_tag_map = {}
|
||||
vm_tag_map = {}
|
||||
```
|
||||
|
||||
|
||||
### Usermacros
|
||||
|
||||
You can choose to use NetBox as a source for Host usermacros by
|
||||
enabling the following option in the configuration file:
|
||||
|
||||
```python
|
||||
usermacro_sync = True
|
||||
```
|
||||
|
||||
Please be advised that enabling this option will _clear_ any usermacros
|
||||
manually set on the managed hosts and override them with the usermacros
|
||||
from NetBox.
|
||||
|
||||
There are two NetBox sources that can be used to populate usermacros:
|
||||
|
||||
1. NetBox config context
|
||||
2. NetBox fields
|
||||
|
||||
#### Config context
|
||||
|
||||
By defining a dictionary `usermacros` within the `zabbix` key in
|
||||
config context, you can dynamically assign usermacro values based on
|
||||
anything that you can target based on
|
||||
[config contexts](https://netboxlabs.com/docs/netbox/en/stable/features/context-data/)
|
||||
within NetBox.
|
||||
|
||||
Through this method, it is possible to define the following types of usermacros:
|
||||
|
||||
1. Text
|
||||
2. Secret
|
||||
3. Vault
|
||||
|
||||
The default macro type is text, if no `type` and `value` have been set.
|
||||
It is also possible to create usermacros with
|
||||
[context](https://www.zabbix.com/documentation/7.0/en/manual/config/macros/user_macros_context).
|
||||
|
||||
Examples:
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
"usermacros": {
|
||||
"{$USER_MACRO}": "test value",
|
||||
"{$CONTEXT_MACRO:\"test\"}": "test value",
|
||||
"{$CONTEXT_REGEX_MACRO:regex:\".*\"}": "test value",
|
||||
"{$SECRET_MACRO}": {
|
||||
"type": "secret",
|
||||
"value": "PaSsPhRaSe"
|
||||
},
|
||||
"{$VAULT_MACRO}": {
|
||||
"type": "vault",
|
||||
"value": "secret/vmware:password"
|
||||
},
|
||||
"{$USER_MACRO2}": {
|
||||
"type": "text",
|
||||
"value": "another test value"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Please be aware that secret usermacros are only synced _once_ by default.
|
||||
This is the default behavior because Zabbix API won't return the value of
|
||||
secrets so the script cannot compare the values with those set in NetBox.
|
||||
|
||||
If you update a secret usermacro value, just remove the value from the host
|
||||
in Zabbix and the new value will be synced during the next run.
|
||||
|
||||
Alternatively, you can set the following option in the config file:
|
||||
|
||||
```python
|
||||
usermacro_sync = "full"
|
||||
```
|
||||
|
||||
This will force a full usermacro sync on every run on hosts that have secret usermacros set.
|
||||
That way, you will know for sure the secret values are always up to date.
|
||||
|
||||
Keep in mind that NetBox will show your secrets in plain text.
|
||||
If true secrecy is required, consider switching to
|
||||
[vault](https://www.zabbix.com/documentation/current/en/manual/config/macros/secret_macros#vault-secret)
|
||||
usermacros.
|
||||
|
||||
#### Netbox Fields
|
||||
|
||||
To use NetBox fields as a source for usermacros, you will need to set up usermacro maps
|
||||
for devices and/or virtual machines in the configuration file.
|
||||
This method only supports `text` type usermacros.
|
||||
|
||||
For example:
|
||||
|
||||
```python
|
||||
usermacro_sync = True
|
||||
device_usermacro_map = {"serial": "{$HW_SERIAL}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"}
|
||||
vm_usermacro_map = {"memory": "{$TOTAL_MEMORY}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"}
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Permissions
|
||||
|
||||
### Netbox
|
||||
Make sure that the Netbox user has proper permissions for device read and modify (modify to set the Zabbix HostID custom field) operations. The user should also have read-only access to the device types.
|
||||
### NetBox
|
||||
|
||||
Make sure that the NetBox user has proper permissions for device read and modify
|
||||
(modify to set the Zabbix HostID custom field) operations. The user should also
|
||||
have read-only access to the device types.
|
||||
|
||||
### Zabbix
|
||||
Make sure that the Zabbix user has permissions to read hostgroups and proxy servers. The user should have full rights on creating, modifying and deleting hosts.
|
||||
|
||||
If you want to automatically create hostgroups then the create permission on host-groups should also be applied.
|
||||
Make sure that the Zabbix user has permissions to read hostgroups and proxy
|
||||
servers. The user should have full rights on creating, modifying and deleting
|
||||
hosts.
|
||||
|
||||
If you want to automatically create hostgroups then the create permission on
|
||||
host-groups should also be applied.
|
||||
|
||||
### Custom links
|
||||
To make the user experience easier you could add a custom link that redirects users to the Zabbix latest data.
|
||||
|
||||
To make the user experience easier you could add a custom link that redirects
|
||||
users to the Zabbix latest data.
|
||||
|
||||
```
|
||||
* Name: zabbix_latestData
|
||||
* Text: {% if obj.cf["zabbix_hostid"] %}Show host in Zabbix{% endif %}
|
||||
* URL: http://myzabbixserver.local/zabbix.php?action=latest.view&hostids[]={{ obj.cf["zabbix_hostid"] }}
|
||||
* Text: {% if object.cf["zabbix_hostid"] %}Show host in Zabbix{% endif %}
|
||||
* URL: http://myzabbixserver.local/zabbix.php?action=latest.view&hostids[]={{ object.cf["zabbix_hostid"] }}
|
||||
```
|
||||
|
||||
## Running the script
|
||||
|
||||
```
|
||||
python3 netbox_zabbix_sync.py
|
||||
```
|
||||
|
||||
### Flags
|
||||
| Flag | Option | Description |
|
||||
| ------------ | ------------ | ------------ |
|
||||
| -c | cluster | For clustered devices: only add the primary node of a cluster and use the cluster name as hostname. |
|
||||
| -H | hostgroup | Create non-existing hostgroups in Zabbix. Usefull for a first run to add all required hostgroups. |
|
||||
| -l | layout | Set the hostgroup layout. Default is site/manufacturer/dev_role. Posible options (seperated with '/'): site, manufacturer, dev_role, tenant |
|
||||
| -v | verbose | Log with debugging on. |
|
||||
| -j | journal | Create journal entries in Netbox when a host gets added, modified or deleted in Zabbix |
|
||||
| -p | proxy-power | Force a full proxy sync (includes deleting the proxy in Zabbix if not present in config context in Netbox) |
|
||||
|
||||
#### Hostgroup
|
||||
In case of omitting the -H flag, manual hostgroup creation is required for devices in a new category.
|
||||
| Flag | Option | Description |
|
||||
| ---- | --------- | ------------------------------------- |
|
||||
| -v | verbose | Log with info on. |
|
||||
| -vv | debug | Log with debugging on. |
|
||||
| -vvv | debug-all | Log with debugging on for all modules |
|
||||
|
||||
The format can be set with the -l flag. If not provided the default format will be:
|
||||
{Site name}/{Manufacturer name}/{Device role name}
|
||||
|
||||
Make sure that the Zabbix user has proper permissions to create hosts.
|
||||
The hostgroups are in a nested format. This means that proper permissions only need to be applied to the site name hostgroup and cascaded to any child hostgroups.
|
||||
|
||||
#### layout
|
||||
The default hostgroup layout is "site/manufacturer/device_role".
|
||||
|
||||
**Variables**
|
||||
|
||||
You can change this behaviour with the --layout flag. The following variables can be used:
|
||||
| name | description |
|
||||
| ------------ | ------------ |
|
||||
|tenant|Tenant name|
|
||||
|site|Site name|
|
||||
|manufacturer|Manufacturer name|
|
||||
|device_role|The device role name|
|
||||
|
||||
You can specify the variables like so, sperated by a "/":
|
||||
```
|
||||
python3 netbox_zabbix_sync.py -l tenant/site/device_role
|
||||
```
|
||||
**custom fields**
|
||||
|
||||
You can also use the value of custom fields under the device object.
|
||||
|
||||
This allows more freedom and even allows a ful static mapping instead of a dynamic rendered hostgroup name.
|
||||
```
|
||||
python3 netbox_zabbix_sync.py -l site/mycustomfieldname
|
||||
```
|
||||
**Empty variables or hostgroups**
|
||||
|
||||
Should the content of a variable be empty, then the hostgroup position is skipped.
|
||||
|
||||
For example, consider the following scenario with 2 devices, both the same device type and site. One of them is linked to a tenant, the other one does not have a relationship with a tenant.
|
||||
- Device_role: PDU
|
||||
- Site: HQ-AMS
|
||||
```
|
||||
python3 netbox_zabbix_sync.py -l site/tenant/device_role
|
||||
```
|
||||
When running the script like above, the following hostgroup (HG) will be generated for both hosts:
|
||||
- Device A with no relationship with a tenant: HQ-AMS/PDU
|
||||
- Device B with a relationship to tenant "Fork Industries": HQ-AMS/Fork Industries/PDU
|
||||
|
||||
The same logic applies to custom fields being used in the HG format:
|
||||
```
|
||||
python3 netbox_zabbix_sync.py -l site/mycustomfieldname
|
||||
```
|
||||
For device A with the value "ABC123" in the custom field "mycustomfieldname" -> HQ-AMS/ABC123
|
||||
For a device which does not have a value in the custom field "mycustomfieldname" -> HQ-AMS
|
||||
|
||||
Should there be a scenario where a custom field does not have a value under a device, and the HG format only uses this signle variable, then this will result in an error:
|
||||
```
|
||||
python3 netbox_zabbix_sync.py -l mycustomfieldname
|
||||
|
||||
Netbox-Zabbix-sync - ERROR - ESXI1 has no reliable hostgroup. This is most likely due to the use of custom fields that are empty.
|
||||
```
|
||||
### Device status
|
||||
By setting a status on a Netbox device you determine how the host is added (or updated) in Zabbix. There are, by default, 3 options:
|
||||
* Delete the host from Zabbix (triggered by Netbox status "Decommissioning" and "Inventory")
|
||||
* Create the host in Zabbix but with a disabled status (Trigger by "Offline", "Planned", "Staged" and "Failed")
|
||||
* Create the host in Zabbix with an enabled status (For now only enabled with the "Active" status)
|
||||
|
||||
You can modify this behaviour by changing the following list variables in the script:
|
||||
- zabbix_device_removal
|
||||
- zabbix_device_disable
|
||||
## Config context
|
||||
|
||||
### Zabbix proxy
|
||||
You can set the proxy for a device using the 'proxy' key in config context.
|
||||
|
||||
#### Config Context
|
||||
You can set the proxy for a device using the `proxy` key in config context.
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
@@ -193,31 +643,100 @@ You can set the proxy for a device using the 'proxy' key in config context.
|
||||
}
|
||||
}
|
||||
```
|
||||
Because of the posible amount of destruction when setting up Netbox but forgetting the proxy command, the sync works a bit different. By default everything is synced except in a situation where the Zabbix host has a proxy configured but nothing is configured in Netbox. To force deletion and a full sync, use the -p flag.
|
||||
|
||||
### Set interface parameters within Netbox
|
||||
When adding a new device, you can set the interface type with custom context. By default, the following configuration is applied when no config context is provided:
|
||||
It is now possible to specify proxy groups with the introduction of Proxy groups
|
||||
in Zabbix 7. Specifying a group in the config context on older Zabbix releases
|
||||
will have no impact and the script will ignore the statement.
|
||||
|
||||
* SNMPv2
|
||||
* UDP 161
|
||||
* Bulk requests enabled
|
||||
* SNMP community: {$SNMP_COMMUNITY}
|
||||
|
||||
Due to Zabbix limitations of changing interface type with a linked template, changing the interface type from within Netbox is not supported and the script will generate an error.
|
||||
|
||||
For example when changing a SNMP interface to an Agent interface:
|
||||
```
|
||||
Netbox-Zabbix-sync - WARNING - Device: Interface OUT of sync.
|
||||
Netbox-Zabbix-sync - ERROR - Device: changing interface type to 1 is not supported.
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
"proxy_group": "yourawesomeproxygroup.local"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To configure the interface parameters you'll need to use custom context. Custom context was used to make this script as customizable as posible for each environment. For example, you could:
|
||||
* Set the custom context directly on a device
|
||||
* Set the custom context on a label, which you would add to a device (for instance, SNMPv3)
|
||||
* Set the custom context on a device role
|
||||
* Set the custom context on a site or region
|
||||
The script will prefer groups when specifying both a proxy and group. This is
|
||||
done with the assumption that groups are more resilient and HA ready, making it
|
||||
a more logical choice to use for proxy linkage. This also makes migrating from a
|
||||
proxy to proxy group easier since the group take priority over the individual
|
||||
proxy.
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
"proxy": "yourawesomeproxy.local",
|
||||
"proxy_group": "yourawesomeproxygroup.local"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
In the example above the host will use the group on Zabbix 7. On Zabbix 6 and
|
||||
below the host will use the proxy. Zabbix 7 will use the proxy value when
|
||||
omitting the proxy_group value.
|
||||
|
||||
#### Custom Field
|
||||
|
||||
Alternatively, you can use a custom field for assigning a device or VM to
|
||||
a Zabbix proxy or proxy group. The custom fields can be assigned to both
|
||||
Devices and VMs.
|
||||
|
||||
You can also assign these custom fields to a site to allow all devices/VMs
|
||||
in that site to be configured with the same proxy or proxy group.
|
||||
In order for this to work, `extended_site_properties` needs to be enabled in
|
||||
the configuration as well.
|
||||
|
||||
To use the custom fields for proxy configuration, configure one or both
|
||||
of the following settings in the configuration file with the actual names of your
|
||||
custom fields:
|
||||
|
||||
```python
|
||||
proxy_cf = "zabbix_proxy"
|
||||
proxy_group_cf = "zabbix_proxy_group"
|
||||
```
|
||||
|
||||
As with config context proxy configuration, proxy group will take precedence over
|
||||
standalone proxy when configured.
|
||||
Proxy settings configured on the device or VM will in their turn take precedence
|
||||
over any site configuration.
|
||||
|
||||
If the custom fields have no value but the proxy or proxy group is configured in config context,
|
||||
that setting will be used.
|
||||
|
||||
### Set interface parameters within NetBox
|
||||
|
||||
When adding a new device, you can set the interface type with custom context. By
|
||||
default, the following configuration is applied when no config context is
|
||||
provided:
|
||||
|
||||
- SNMPv2
|
||||
- UDP 161
|
||||
- Bulk requests enabled
|
||||
- SNMP community: {$SNMP_COMMUNITY}
|
||||
|
||||
Due to Zabbix limitations of changing interface type with a linked template,
|
||||
changing the interface type from within NetBox is not supported and the script
|
||||
will generate an error.
|
||||
|
||||
For example, when changing a SNMP interface to an Agent interface:
|
||||
|
||||
```
|
||||
NetBox-Zabbix-sync - WARNING - Device: Interface OUT of sync.
|
||||
NetBox-Zabbix-sync - ERROR - Device: changing interface type to 1 is not supported.
|
||||
```
|
||||
|
||||
To configure the interface parameters you'll need to use custom context. Custom
|
||||
context was used to make this script as customizable as possible for each
|
||||
environment. For example, you could:
|
||||
|
||||
- Set the custom context directly on a device
|
||||
- Set the custom context on a tag, which you would add to a device (for
|
||||
instance, SNMPv3)
|
||||
- Set the custom context on a device role
|
||||
- Set the custom context on a site or region
|
||||
|
||||
##### Agent interface configuration example
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
@@ -226,7 +745,9 @@ To configure the interface parameters you'll need to use custom context. Custom
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### SNMPv2 interface configuration example
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
@@ -240,7 +761,9 @@ To configure the interface parameters you'll need to use custom context. Custom
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### SNMPv3 interface configuration example
|
||||
|
||||
```json
|
||||
{
|
||||
"zabbix": {
|
||||
@@ -256,4 +779,14 @@ To configure the interface parameters you'll need to use custom context. Custom
|
||||
}
|
||||
}
|
||||
```
|
||||
Note: Not all SNMP data is required for a working configuration. [The following parameters are allowed ](https://www.zabbix.com/documentation/current/manual/api/reference/hostinterface/object#details_tag "The following parameters are allowed ")but are not all required, depending on your environment.
|
||||
|
||||
I would recommend using usermacros for sensitive data such as community strings
|
||||
since the data in NetBox is plain-text.
|
||||
|
||||
> **_NOTE:_** Not all SNMP data is required for a working configuration.
|
||||
> [The following parameters are allowed](https://www.zabbix.com/documentation/current/manual/api/reference/hostinterface/object#details_tag "The following parameters are allowed") but
|
||||
> are not all required, depending on your environment.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,28 +1,159 @@
|
||||
# Template logic.
|
||||
## Template logic.
|
||||
# Set to true to enable the template source information
|
||||
# coming from config context instead of a custom field.
|
||||
templates_config_context = False
|
||||
# Set to true to give config context templates a
|
||||
|
||||
# Set to true to give config context templates a
|
||||
# higher priority then custom field templates
|
||||
templates_config_context_overrule = False
|
||||
|
||||
# Set template and device Netbox "custom field" names
|
||||
# Set template and device NetBox "custom field" names
|
||||
# Template_cf is not used when templates_config_context is enabled
|
||||
template_cf = "zabbix_template"
|
||||
device_cf = "zabbix_hostid"
|
||||
|
||||
# Netbox to Zabbix device state convertion
|
||||
## Enable clustering of devices with virtual chassis setup
|
||||
clustering = False
|
||||
|
||||
## Enable hostgroup generation. Requires permissions in Zabbix
|
||||
create_hostgroups = True
|
||||
|
||||
## Create journal entries
|
||||
create_journal = False
|
||||
|
||||
## Virtual machine sync
|
||||
# Set sync_vms to True in order to use this new feature
|
||||
# Use the hostgroup vm_hostgroup_format mapper for specific
|
||||
# hostgroup atributes of VM's such as cluster_type and cluster
|
||||
sync_vms = False
|
||||
# Check the README documentation for values to use in the VM hostgroup format.
|
||||
vm_hostgroup_format = "cluster_type/cluster/role"
|
||||
|
||||
## Proxy Sync
|
||||
# Set to true to enable removal of proxy's under hosts. Use with caution and make sure that you specified
|
||||
# all the required proxy's in the device config context before enabeling this option.
|
||||
# With this option disabled proxy's will only be added and modified for Zabbix hosts.
|
||||
full_proxy_sync = False
|
||||
|
||||
## NetBox to Zabbix device state convertion
|
||||
zabbix_device_removal = ["Decommissioning", "Inventory"]
|
||||
zabbix_device_disable = ["Offline", "Planned", "Staged", "Failed"]
|
||||
|
||||
# Custom filter for device filtering. Variable must be present but can be left empty with no filtering.
|
||||
# A couple of examples are as follows:
|
||||
## Hostgroup mapping
|
||||
# See the README documentation for available options
|
||||
# You can also use CF (custom field) names under the device. The CF content will be used for the hostgroup generation.
|
||||
#
|
||||
# When using region in the group name, the default behaviour is to use name of the directly assigned region.
|
||||
# By setting traverse_regions to True the full path of all parent regions will be used in the hostgroup, e.g.:
|
||||
#
|
||||
# 'Global/Europe/Netherlands/Amsterdam' instead of just 'Amsterdam'.
|
||||
#
|
||||
# traverse_site_groups controls the same behaviour for any assigned site_groups.
|
||||
hostgroup_format = "site/manufacturer/role"
|
||||
traverse_regions = False
|
||||
traverse_site_groups = False
|
||||
|
||||
# nb_device_filter = {} #No filter
|
||||
# nb_device_filter = {"tag": "zabbix"} #Use a tag
|
||||
# nb_device_filter = {"site": "HQ-AMS"} #Use a site name
|
||||
# nb_device_filter = {"site": ["HQ-AMS", "HQ-FRA"]} #Device must be in either one of these sites
|
||||
# nb_device_filter = {"site": "HQ-AMS", "tag": "zabbix", "role__n": ["PDU", "console-server"]} #Device must be in site HQ-AMS, have the tag zabbix and must not be part of the PDU or console-server role
|
||||
## Extended site properties
|
||||
# By default, NetBox will only return basic site info for any device or VM.
|
||||
# By setting `extended_site_properties` to True, the script will query NetBox for additional site info.
|
||||
# Be aware that this will increase the number of API queries to NetBox.
|
||||
extended_site_properties = False
|
||||
|
||||
# Default device filter, only get devices which have a name in Netbox.
|
||||
nb_device_filter = {"name__n": "null"}
|
||||
## Filtering
|
||||
# Custom device filter, variable must be present but can be left empty with no filtering.
|
||||
# A couple of examples:
|
||||
# nb_device_filter = {} #No filter
|
||||
# nb_device_filter = {"tag": "zabbix"} #Use a tag
|
||||
# nb_device_filter = {"site": "HQ-AMS"} #Use a site name
|
||||
# nb_device_filter = {"site": ["HQ-AMS", "HQ-FRA"]} #Device must be in either one of these sites
|
||||
# nb_device_filter = {"site": "HQ-AMS", "tag": "zabbix", "role__n": ["PDU", "console-server"]} #Device must be in site HQ-AMS, have the tag zabbix and must not be part of the PDU or console-server role
|
||||
|
||||
# Default device filter, only get devices which have a name in NetBox:
|
||||
nb_device_filter = {"name__n": "null"}
|
||||
# Default filter for VMs
|
||||
nb_vm_filter = {"name__n": "null"}
|
||||
|
||||
## Inventory
|
||||
# See https://www.zabbix.com/documentation/current/en/manual/config/hosts/inventory#building-inventory
|
||||
# Choice between disabled, manual or automatic.
|
||||
# Make sure to select at least manual or automatic in use with the inventory_sync function.
|
||||
inventory_mode = "disabled"
|
||||
|
||||
# To allow syncing of NetBox device properties, set inventory_sync to True
|
||||
inventory_sync = False
|
||||
|
||||
# inventory_map is used to map NetBox properties to Zabbix Inventory fields.
|
||||
# For nested properties, you can use the '/' seperator.
|
||||
# For example, the following map will assign the custom field 'mycustomfield' to the 'alias' Zabbix inventory field:
|
||||
#
|
||||
# device_inventory_map = { "custom_fields/mycustomfield/name": "alias"}
|
||||
#
|
||||
# The following maps should provide some nice defaults:
|
||||
device_inventory_map = { "asset_tag": "asset_tag",
|
||||
"virtual_chassis/name": "chassis",
|
||||
"status/label": "deployment_status",
|
||||
"location/name": "location",
|
||||
"latitude": "location_lat",
|
||||
"longitude": "location_lon",
|
||||
"comments": "notes",
|
||||
"name": "name",
|
||||
"rack/name": "site_rack",
|
||||
"serial": "serialno_a",
|
||||
"device_type/model": "type",
|
||||
"device_type/manufacturer/name": "vendor",
|
||||
"oob_ip/address": "oob_ip" }
|
||||
# Replace latitude and longitude with site/latitude and and site/longitude to use
|
||||
# site geo data. Enable extended_site_properties for this to work!
|
||||
|
||||
# We also support inventory mapping on Virtual Machines.
|
||||
vm_inventory_map = { "status/label": "deployment_status",
|
||||
"comments": "notes",
|
||||
"name": "name" }
|
||||
|
||||
# To allow syncing of usermacros from NetBox, set to True.
|
||||
# this will enable both field mapping and config context usermacros.
|
||||
#
|
||||
# If set to "full", it will force the update of secret usermacros every run.
|
||||
# Please see the README.md for more information.
|
||||
usermacro_sync = False
|
||||
|
||||
# device usermacro_map to map NetBox fields to usermacros.
|
||||
device_usermacro_map = {"serial": "{$HW_SERIAL}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"display_url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"}
|
||||
|
||||
# virtual machine usermacro_map to map NetBox fields to usermacros.
|
||||
vm_usermacro_map = {"memory": "{$TOTAL_MEMORY}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"display_url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}"}
|
||||
|
||||
# To sync host tags to Zabbix, set to True.
|
||||
tag_sync = False
|
||||
|
||||
# Setting tag_lower to True will lower capital letters in tag names and values
|
||||
# This is more inline with the Zabbix way of working with tags.
|
||||
#
|
||||
# You can however set this to False to ensure capital letters are synced to Zabbix tags.
|
||||
tag_lower = True
|
||||
|
||||
# We can sync NetBox device/VM tags to Zabbix, but as NetBox tags don't follow the key/value
|
||||
# pattern, we need to specify a tag name to register the NetBox tags in Zabbix.
|
||||
#
|
||||
# If tag_name is set to False, we won't sync NetBox device/VM tags to Zabbix.
|
||||
tag_name = 'NetBox'
|
||||
|
||||
# We can choose to use 'name', 'slug' or 'display' NetBox tag properties as a value in Zabbix.
|
||||
# 'name'is used by default.
|
||||
tag_value = "name"
|
||||
|
||||
# device tag_map to map NetBox fields to host tags.
|
||||
device_tag_map = {"site/name": "site",
|
||||
"rack/name": "rack",
|
||||
"platform/name": "target"}
|
||||
|
||||
# Virtual machine tag_map to map NetBox fields to host tags.
|
||||
vm_tag_map = {"site/name": "site",
|
||||
"cluster/name": "cluster",
|
||||
"platform/name": "target"}
|
||||
|
||||
132
modules/config.py
Normal file
132
modules/config.py
Normal file
@@ -0,0 +1,132 @@
|
||||
"""
|
||||
Module for parsing configuration from the top level config.py file
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from importlib import util
|
||||
from os import environ, path
|
||||
from logging import getLogger
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
# PLEASE NOTE: This is a sample config file. Please do NOT make any edits in this file!
|
||||
# You should create your own config.py and it will overwrite the default config.
|
||||
|
||||
DEFAULT_CONFIG = {
|
||||
"templates_config_context": False,
|
||||
"templates_config_context_overrule": False,
|
||||
"template_cf": "zabbix_template",
|
||||
"device_cf": "zabbix_hostid",
|
||||
"proxy_cf": False,
|
||||
"proxy_group_cf": False,
|
||||
"clustering": False,
|
||||
"create_hostgroups": True,
|
||||
"create_journal": False,
|
||||
"sync_vms": False,
|
||||
"vm_hostgroup_format": "cluster_type/cluster/role",
|
||||
"full_proxy_sync": False,
|
||||
"zabbix_device_removal": ["Decommissioning", "Inventory"],
|
||||
"zabbix_device_disable": ["Offline", "Planned", "Staged", "Failed"],
|
||||
"hostgroup_format": "site/manufacturer/role",
|
||||
"traverse_regions": False,
|
||||
"traverse_site_groups": False,
|
||||
"nb_device_filter": {"name__n": "null"},
|
||||
"nb_vm_filter": {"name__n": "null"},
|
||||
"inventory_mode": "disabled",
|
||||
"inventory_sync": False,
|
||||
"extended_site_properties": False,
|
||||
"device_inventory_map": {
|
||||
"asset_tag": "asset_tag",
|
||||
"virtual_chassis/name": "chassis",
|
||||
"status/label": "deployment_status",
|
||||
"location/name": "location",
|
||||
"latitude": "location_lat",
|
||||
"longitude": "location_lon",
|
||||
"comments": "notes",
|
||||
"name": "name",
|
||||
"rack/name": "site_rack",
|
||||
"serial": "serialno_a",
|
||||
"device_type/model": "type",
|
||||
"device_type/manufacturer/name": "vendor",
|
||||
"oob_ip/address": "oob_ip",
|
||||
},
|
||||
"vm_inventory_map": {
|
||||
"status/label": "deployment_status",
|
||||
"comments": "notes",
|
||||
"name": "name",
|
||||
},
|
||||
"usermacro_sync": False,
|
||||
"device_usermacro_map": {
|
||||
"serial": "{$HW_SERIAL}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}",
|
||||
},
|
||||
"vm_usermacro_map": {
|
||||
"memory": "{$TOTAL_MEMORY}",
|
||||
"role/name": "{$DEV_ROLE}",
|
||||
"url": "{$NB_URL}",
|
||||
"id": "{$NB_ID}",
|
||||
},
|
||||
"tag_sync": False,
|
||||
"tag_lower": True,
|
||||
"tag_name": "NetBox",
|
||||
"tag_value": "name",
|
||||
"device_tag_map": {
|
||||
"site/name": "site",
|
||||
"rack/name": "rack",
|
||||
"platform/name": "target",
|
||||
},
|
||||
"vm_tag_map": {
|
||||
"site/name": "site",
|
||||
"cluster/name": "cluster",
|
||||
"platform/name": "target",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def load_config():
|
||||
"""Returns combined config from all sources"""
|
||||
# Overwrite default config with config.py
|
||||
conf = load_config_file(config_default=DEFAULT_CONFIG)
|
||||
# Overwrite default config and config.py with environment variables
|
||||
for key in conf:
|
||||
value_setting = load_env_variable(key)
|
||||
if value_setting is not None:
|
||||
conf[key] = value_setting
|
||||
return conf
|
||||
|
||||
|
||||
def load_env_variable(config_environvar):
|
||||
"""Returns config from environment variable"""
|
||||
prefix = "NBZX_"
|
||||
config_environvar = prefix + config_environvar.upper()
|
||||
if config_environvar in environ:
|
||||
return environ[config_environvar]
|
||||
return None
|
||||
|
||||
|
||||
def load_config_file(config_default, config_file="config.py"):
|
||||
"""Returns config from config.py file"""
|
||||
# Find the script path and config file next to it.
|
||||
script_dir = path.dirname(path.dirname(path.abspath(__file__)))
|
||||
config_path = Path(path.join(script_dir, config_file))
|
||||
|
||||
# If the script directory is not found, try the current working directory
|
||||
if not config_path.exists():
|
||||
config_path = Path(config_file)
|
||||
|
||||
# If both checks fail then fallback to the default config
|
||||
if not config_path.exists():
|
||||
return config_default
|
||||
|
||||
dconf = config_default.copy()
|
||||
# Dynamically import the config module
|
||||
spec = util.spec_from_file_location("config", config_path)
|
||||
config_module = util.module_from_spec(spec)
|
||||
spec.loader.exec_module(config_module)
|
||||
# Update DEFAULT_CONFIG with variables from the config module
|
||||
for key in dconf:
|
||||
if hasattr(config_module, key):
|
||||
dconf[key] = getattr(config_module, key)
|
||||
return dconf
|
||||
1017
modules/device.py
Normal file
1017
modules/device.py
Normal file
File diff suppressed because it is too large
Load Diff
48
modules/exceptions.py
Normal file
48
modules/exceptions.py
Normal file
@@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
All custom exceptions used for Exception generation
|
||||
"""
|
||||
|
||||
|
||||
class SyncError(Exception):
|
||||
"""Class SyncError"""
|
||||
|
||||
|
||||
class JournalError(Exception):
|
||||
"""Class SyncError"""
|
||||
|
||||
|
||||
class SyncExternalError(SyncError):
|
||||
"""Class SyncExternalError"""
|
||||
|
||||
|
||||
class SyncInventoryError(SyncError):
|
||||
"""Class SyncInventoryError"""
|
||||
|
||||
|
||||
class SyncDuplicateError(SyncError):
|
||||
"""Class SyncDuplicateError"""
|
||||
|
||||
|
||||
class EnvironmentVarError(SyncError):
|
||||
"""Class EnvironmentVarError"""
|
||||
|
||||
|
||||
class InterfaceConfigError(SyncError):
|
||||
"""Class InterfaceConfigError"""
|
||||
|
||||
|
||||
class ProxyConfigError(SyncError):
|
||||
"""Class ProxyConfigError"""
|
||||
|
||||
|
||||
class HostgroupError(SyncError):
|
||||
"""Class HostgroupError"""
|
||||
|
||||
|
||||
class TemplateError(SyncError):
|
||||
"""Class TemplateError"""
|
||||
|
||||
|
||||
class UsermacroError(SyncError):
|
||||
"""Class UsermacroError"""
|
||||
204
modules/hostgroups.py
Normal file
204
modules/hostgroups.py
Normal file
@@ -0,0 +1,204 @@
|
||||
"""Module for all hostgroup related code"""
|
||||
|
||||
from logging import getLogger
|
||||
|
||||
from modules.exceptions import HostgroupError
|
||||
from modules.tools import build_path, cf_to_string
|
||||
|
||||
|
||||
class Hostgroup:
|
||||
"""Hostgroup class for devices and VM's
|
||||
Takes type (vm or dev) and NB object"""
|
||||
|
||||
# pylint: disable=too-many-arguments, disable=too-many-positional-arguments
|
||||
# pylint: disable=logging-fstring-interpolation
|
||||
def __init__(
|
||||
self,
|
||||
obj_type,
|
||||
nb_obj,
|
||||
version,
|
||||
logger=None,
|
||||
nested_sitegroup_flag=False,
|
||||
nested_region_flag=False,
|
||||
nb_regions=None,
|
||||
nb_groups=None,
|
||||
):
|
||||
self.logger = logger if logger else getLogger(__name__)
|
||||
if obj_type not in ("vm", "dev"):
|
||||
msg = f"Unable to create hostgroup with type {type}"
|
||||
self.logger.error()
|
||||
raise HostgroupError(msg)
|
||||
self.type = str(obj_type)
|
||||
self.nb = nb_obj
|
||||
self.name = self.nb.name
|
||||
self.nb_version = version
|
||||
# Used for nested data objects
|
||||
self.set_nesting(
|
||||
nested_sitegroup_flag, nested_region_flag, nb_groups, nb_regions
|
||||
)
|
||||
self._set_format_options()
|
||||
|
||||
def __str__(self):
|
||||
return f"Hostgroup for {self.type} {self.name}"
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
def _set_format_options(self):
|
||||
"""
|
||||
Set all available variables
|
||||
for hostgroup generation
|
||||
"""
|
||||
format_options = {}
|
||||
# Set variables for both type of devices
|
||||
if self.type in ("vm", "dev"):
|
||||
# Role fix for NetBox <=3
|
||||
role = None
|
||||
if self.nb_version.startswith(("2", "3")) and self.type == "dev":
|
||||
role = self.nb.device_role.name if self.nb.device_role else None
|
||||
else:
|
||||
role = self.nb.role.name if self.nb.role else None
|
||||
# Add default formatting options
|
||||
# Check if a site is configured. A site is optional for VMs
|
||||
format_options["region"] = None
|
||||
format_options["site_group"] = None
|
||||
if self.nb.site:
|
||||
if self.nb.site.region:
|
||||
format_options["region"] = self.generate_parents(
|
||||
"region", str(self.nb.site.region)
|
||||
)
|
||||
if self.nb.site.group:
|
||||
format_options["site_group"] = self.generate_parents(
|
||||
"site_group", str(self.nb.site.group)
|
||||
)
|
||||
format_options["role"] = role
|
||||
format_options["site"] = self.nb.site.name if self.nb.site else None
|
||||
format_options["tenant"] = str(self.nb.tenant) if self.nb.tenant else None
|
||||
format_options["tenant_group"] = (
|
||||
str(self.nb.tenant.group) if self.nb.tenant else None
|
||||
)
|
||||
format_options["platform"] = (
|
||||
self.nb.platform.name if self.nb.platform else None
|
||||
)
|
||||
# Variables only applicable for devices
|
||||
if self.type == "dev":
|
||||
format_options["manufacturer"] = self.nb.device_type.manufacturer.name
|
||||
format_options["location"] = (
|
||||
str(self.nb.location) if self.nb.location else None
|
||||
)
|
||||
format_options["rack"] = self.nb.rack.name if self.nb.rack else None
|
||||
# Variables only applicable for VM's
|
||||
if self.type == "vm":
|
||||
# Check if a cluster is configured. Could also be configured in a site.
|
||||
if self.nb.cluster:
|
||||
format_options["cluster"] = self.nb.cluster.name
|
||||
format_options["cluster_type"] = self.nb.cluster.type.name
|
||||
self.format_options = format_options
|
||||
self.logger.debug(
|
||||
"Host %s: Resolved properties for use in hostgroups: %s",
|
||||
self.name,
|
||||
self.format_options,
|
||||
)
|
||||
|
||||
def set_nesting(
|
||||
self, nested_sitegroup_flag, nested_region_flag, nb_groups, nb_regions
|
||||
):
|
||||
"""Set nesting options for this Hostgroup"""
|
||||
self.nested_objects = {
|
||||
"site_group": {"flag": nested_sitegroup_flag, "data": nb_groups},
|
||||
"region": {"flag": nested_region_flag, "data": nb_regions},
|
||||
}
|
||||
|
||||
def generate(self, hg_format):
|
||||
"""Generate hostgroup based on a provided format"""
|
||||
# Split all given names
|
||||
hg_output = []
|
||||
hg_items = hg_format.split("/")
|
||||
for hg_item in hg_items:
|
||||
# Check if requested data is available as option for this host
|
||||
if hg_item not in self.format_options:
|
||||
if hg_item.startswith(("'", '"')) and hg_item.endswith(("'", '"')):
|
||||
hg_item = hg_item.strip("'")
|
||||
hg_item = hg_item.strip('"')
|
||||
hg_output.append(hg_item)
|
||||
else:
|
||||
# Check if a custom field exists with this name
|
||||
cf_data = self.custom_field_lookup(hg_item)
|
||||
# CF does not exist
|
||||
if not cf_data["result"]:
|
||||
msg = (
|
||||
f"Unable to generate hostgroup for host {self.name}. "
|
||||
f"Item type {hg_item} not supported."
|
||||
)
|
||||
self.logger.error(msg)
|
||||
raise HostgroupError(msg)
|
||||
# CF data is populated
|
||||
if cf_data["cf"]:
|
||||
hg_output.append(cf_to_string(cf_data["cf"]))
|
||||
continue
|
||||
# Check if there is a value associated to the variable.
|
||||
# For instance, if a device has no location, do not use it with hostgroup calculation
|
||||
hostgroup_value = self.format_options[hg_item]
|
||||
if hostgroup_value:
|
||||
hg_output.append(hostgroup_value)
|
||||
else:
|
||||
self.logger.info(
|
||||
"Host %s: Used field '%s' has no value.", self.name, hg_item
|
||||
)
|
||||
# Check if the hostgroup is populated with at least one item.
|
||||
if bool(hg_output):
|
||||
return "/".join(hg_output)
|
||||
msg = (
|
||||
f"Host {self.name}: Generating hostgroup name for '{hg_format}' failed. "
|
||||
f"This is most likely due to fields that have no value."
|
||||
)
|
||||
self.logger.warning(msg)
|
||||
return None
|
||||
|
||||
def list_formatoptions(self):
|
||||
"""
|
||||
Function to easily troubleshoot which values
|
||||
are generated for a specific device or VM.
|
||||
"""
|
||||
print(f"The following options are available for host {self.name}")
|
||||
for option_type, value in self.format_options.items():
|
||||
if value is not None:
|
||||
print(f"{option_type} - {value}")
|
||||
print("The following options are not available")
|
||||
for option_type, value in self.format_options.items():
|
||||
if value is None:
|
||||
print(f"{option_type}")
|
||||
|
||||
def custom_field_lookup(self, hg_category):
|
||||
"""
|
||||
Checks if a valid custom field is present in NetBox.
|
||||
INPUT: Custom field name
|
||||
OUTPUT: dictionary with 'result' and 'cf' keys.
|
||||
"""
|
||||
# Check if the custom field exists
|
||||
if hg_category not in self.nb.custom_fields:
|
||||
return {"result": False, "cf": None}
|
||||
# Checks if the custom field has been populated
|
||||
if not bool(self.nb.custom_fields[hg_category]):
|
||||
return {"result": True, "cf": None}
|
||||
# Custom field exists and is populated
|
||||
return {"result": True, "cf": self.nb.custom_fields[hg_category]}
|
||||
|
||||
def generate_parents(self, nest_type, child_object):
|
||||
"""
|
||||
Generates parent objects to implement nested regions / nested site groups
|
||||
INPUT: nest_type to set which type of nesting is going to be processed
|
||||
child_object: the name of the child object (for instance the last NB region)
|
||||
OUTPUT: STRING - Either the single child name or child and parents.
|
||||
"""
|
||||
# Check if this type of nesting is supported.
|
||||
if not nest_type in self.nested_objects:
|
||||
return child_object
|
||||
# If the nested flag is True, perform parent calculation
|
||||
if self.nested_objects[nest_type]["flag"]:
|
||||
final_nested_object = build_path(
|
||||
child_object, self.nested_objects[nest_type]["data"]
|
||||
)
|
||||
return "/".join(final_nested_object)
|
||||
# Nesting is not allowed for this object. Return child_object
|
||||
return child_object
|
||||
109
modules/interface.py
Normal file
109
modules/interface.py
Normal file
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
All of the Zabbix interface related configuration
|
||||
"""
|
||||
from modules.exceptions import InterfaceConfigError
|
||||
|
||||
|
||||
class ZabbixInterface:
|
||||
"""Class that represents a Zabbix interface."""
|
||||
|
||||
def __init__(self, context, ip):
|
||||
self.context = context
|
||||
self.ip = ip
|
||||
self.skelet = {"main": "1", "useip": "1", "dns": "", "ip": self.ip}
|
||||
self.interface = self.skelet
|
||||
|
||||
def _set_default_port(self):
|
||||
"""Sets default TCP / UDP port for different interface types"""
|
||||
interface_mapping = {1: 10050, 2: 161, 3: 623, 4: 12345}
|
||||
# Check if interface type is listed in mapper.
|
||||
if self.interface["type"] not in interface_mapping:
|
||||
return False
|
||||
# Set default port to interface
|
||||
self.interface["port"] = str(interface_mapping[self.interface["type"]])
|
||||
return True
|
||||
|
||||
def get_context(self):
|
||||
"""check if NetBox custom context has been defined."""
|
||||
if "zabbix" in self.context:
|
||||
zabbix = self.context["zabbix"]
|
||||
if "interface_type" in zabbix:
|
||||
self.interface["type"] = zabbix["interface_type"]
|
||||
if not "interface_port" in zabbix:
|
||||
self._set_default_port()
|
||||
return True
|
||||
self.interface["port"] = zabbix["interface_port"]
|
||||
return True
|
||||
return False
|
||||
return False
|
||||
|
||||
def set_snmp(self):
|
||||
"""Check if interface is type SNMP"""
|
||||
# pylint: disable=too-many-branches
|
||||
if self.interface["type"] == 2:
|
||||
# Checks if SNMP settings are defined in NetBox
|
||||
if "snmp" in self.context["zabbix"]:
|
||||
snmp = self.context["zabbix"]["snmp"]
|
||||
self.interface["details"] = {}
|
||||
# Checks if bulk config has been defined
|
||||
if "bulk" in snmp:
|
||||
self.interface["details"]["bulk"] = str(snmp.pop("bulk"))
|
||||
else:
|
||||
# Fallback to bulk enabled if not specified
|
||||
self.interface["details"]["bulk"] = "1"
|
||||
# SNMP Version config is required in NetBox config context
|
||||
if snmp.get("version"):
|
||||
self.interface["details"]["version"] = str(snmp.pop("version"))
|
||||
else:
|
||||
e = "SNMP version option is not defined."
|
||||
raise InterfaceConfigError(e)
|
||||
# If version 1 or 2 is used, get community string
|
||||
if self.interface["details"]["version"] in ["1", "2"]:
|
||||
if "community" in snmp:
|
||||
# Set SNMP community to confix context value
|
||||
community = snmp["community"]
|
||||
else:
|
||||
# Set SNMP community to default
|
||||
community = "{$SNMP_COMMUNITY}"
|
||||
self.interface["details"]["community"] = str(community)
|
||||
# If version 3 has been used, get all
|
||||
# SNMPv3 NetBox related configs
|
||||
elif self.interface["details"]["version"] == "3":
|
||||
items = [
|
||||
"securityname",
|
||||
"securitylevel",
|
||||
"authpassphrase",
|
||||
"privpassphrase",
|
||||
"authprotocol",
|
||||
"privprotocol",
|
||||
"contextname",
|
||||
]
|
||||
for key, item in snmp.items():
|
||||
if key in items:
|
||||
self.interface["details"][key] = str(item)
|
||||
else:
|
||||
e = "Unsupported SNMP version."
|
||||
raise InterfaceConfigError(e)
|
||||
else:
|
||||
e = "Interface type SNMP but no parameters provided."
|
||||
raise InterfaceConfigError(e)
|
||||
else:
|
||||
e = "Interface type is not SNMP, unable to set SNMP details"
|
||||
raise InterfaceConfigError(e)
|
||||
|
||||
def set_default_snmp(self):
|
||||
"""Set default config to SNMPv2, port 161 and community macro."""
|
||||
self.interface = self.skelet
|
||||
self.interface["type"] = "2"
|
||||
self.interface["port"] = "161"
|
||||
self.interface["details"] = {
|
||||
"version": "2",
|
||||
"community": "{$SNMP_COMMUNITY}",
|
||||
"bulk": "1",
|
||||
}
|
||||
|
||||
def set_default_agent(self):
|
||||
"""Sets interface to Zabbix agent defaults"""
|
||||
self.interface["type"] = "1"
|
||||
self.interface["port"] = "10050"
|
||||
41
modules/logging.py
Normal file
41
modules/logging.py
Normal file
@@ -0,0 +1,41 @@
|
||||
"""
|
||||
Logging module for Netbox-Zabbix-sync
|
||||
"""
|
||||
|
||||
import logging
|
||||
from os import path
|
||||
|
||||
logger = logging.getLogger("NetBox-Zabbix-sync")
|
||||
|
||||
|
||||
def get_logger():
|
||||
"""
|
||||
Return the logger for Netbox Zabbix Sync
|
||||
"""
|
||||
return logger
|
||||
|
||||
|
||||
def setup_logger():
|
||||
"""
|
||||
Prepare a logger with stream and file handlers
|
||||
"""
|
||||
# Set logging
|
||||
lgout = logging.StreamHandler()
|
||||
# Logfile in the project root
|
||||
project_root = path.dirname(path.dirname(path.realpath(__file__)))
|
||||
logfile_path = path.join(project_root, "sync.log")
|
||||
lgfile = logging.FileHandler(logfile_path)
|
||||
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
||||
level=logging.WARNING,
|
||||
handlers=[lgout, lgfile],
|
||||
)
|
||||
|
||||
|
||||
def set_log_levels(root_level, own_level):
|
||||
"""
|
||||
Configure log levels for root and Netbox-Zabbix-sync logger
|
||||
"""
|
||||
logging.getLogger().setLevel(root_level)
|
||||
logger.setLevel(own_level)
|
||||
136
modules/tags.py
Normal file
136
modules/tags.py
Normal file
@@ -0,0 +1,136 @@
|
||||
#!/usr/bin/env python3
|
||||
# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments, logging-fstring-interpolation
|
||||
"""
|
||||
All of the Zabbix Usermacro related configuration
|
||||
"""
|
||||
|
||||
from logging import getLogger
|
||||
|
||||
from modules.tools import field_mapper, remove_duplicates
|
||||
|
||||
|
||||
class ZabbixTags:
|
||||
"""Class that represents a Zabbix interface."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
nb,
|
||||
tag_map,
|
||||
tag_sync=False,
|
||||
tag_lower=True,
|
||||
tag_name=None,
|
||||
tag_value=None,
|
||||
logger=None,
|
||||
host=None,
|
||||
):
|
||||
self.nb = nb
|
||||
self.name = host if host else nb.name
|
||||
self.tag_map = tag_map
|
||||
self.logger = logger if logger else getLogger(__name__)
|
||||
self.tags = {}
|
||||
self.lower = tag_lower
|
||||
self.tag_name = tag_name
|
||||
self.tag_value = tag_value
|
||||
self.tag_sync = tag_sync
|
||||
self.sync = False
|
||||
self._set_config()
|
||||
|
||||
def __repr__(self):
|
||||
return self.name
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def _set_config(self):
|
||||
"""
|
||||
Setup class
|
||||
"""
|
||||
if self.tag_sync:
|
||||
self.sync = True
|
||||
|
||||
return True
|
||||
|
||||
def validate_tag(self, tag_name):
|
||||
"""
|
||||
Validates tag name
|
||||
"""
|
||||
if tag_name and isinstance(tag_name, str) and len(tag_name) <= 256:
|
||||
return True
|
||||
return False
|
||||
|
||||
def validate_value(self, tag_value):
|
||||
"""
|
||||
Validates tag value
|
||||
"""
|
||||
if tag_value and isinstance(tag_value, str) and len(tag_value) <= 256:
|
||||
return True
|
||||
return False
|
||||
|
||||
def render_tag(self, tag_name, tag_value):
|
||||
"""
|
||||
Renders a tag
|
||||
"""
|
||||
tag = {}
|
||||
if self.validate_tag(tag_name):
|
||||
if self.lower:
|
||||
tag["tag"] = tag_name.lower()
|
||||
else:
|
||||
tag["tag"] = tag_name
|
||||
else:
|
||||
self.logger.warning("Tag '%s' is not a valid tag name, skipping.", tag_name)
|
||||
return False
|
||||
|
||||
if self.validate_value(tag_value):
|
||||
if self.lower:
|
||||
tag["value"] = tag_value.lower()
|
||||
else:
|
||||
tag["value"] = tag_value
|
||||
else:
|
||||
self.logger.info(
|
||||
"Tag '%s' has an invalid value: '%s', skipping.", tag_name, tag_value
|
||||
)
|
||||
return False
|
||||
return tag
|
||||
|
||||
def generate(self):
|
||||
"""
|
||||
Generate full set of Usermacros
|
||||
"""
|
||||
# pylint: disable=too-many-branches
|
||||
tags = []
|
||||
# Parse the field mapper for tags
|
||||
if self.tag_map:
|
||||
self.logger.debug("Host %s: Starting tag mapper.", self.nb.name)
|
||||
field_tags = field_mapper(self.nb.name, self.tag_map, self.nb, self.logger)
|
||||
for tag, value in field_tags.items():
|
||||
t = self.render_tag(tag, value)
|
||||
if t:
|
||||
tags.append(t)
|
||||
|
||||
# Parse NetBox config context for tags
|
||||
if (
|
||||
"zabbix" in self.nb.config_context
|
||||
and "tags" in self.nb.config_context["zabbix"]
|
||||
and isinstance(self.nb.config_context["zabbix"]["tags"], list)
|
||||
):
|
||||
for tag in self.nb.config_context["zabbix"]["tags"]:
|
||||
if isinstance(tag, dict):
|
||||
for tagname, value in tag.items():
|
||||
t = self.render_tag(tagname, value)
|
||||
if t:
|
||||
tags.append(t)
|
||||
|
||||
# Pull in NetBox device tags if tag_name is set
|
||||
if self.tag_name and isinstance(self.tag_name, str):
|
||||
for tag in self.nb.tags:
|
||||
if self.tag_value.lower() in ["display", "name", "slug"]:
|
||||
value = tag[self.tag_value]
|
||||
else:
|
||||
value = tag["name"]
|
||||
t = self.render_tag(self.tag_name, value)
|
||||
if t:
|
||||
tags.append(t)
|
||||
|
||||
tags = remove_duplicates(tags, sortkey="tag")
|
||||
self.logger.debug("Host %s: Resolved tags: %s", self.name, tags)
|
||||
return tags
|
||||
249
modules/tools.py
Normal file
249
modules/tools.py
Normal file
@@ -0,0 +1,249 @@
|
||||
"""A collection of tools used by several classes"""
|
||||
|
||||
from typing import Any, Callable, Optional, overload
|
||||
from modules.exceptions import HostgroupError
|
||||
|
||||
|
||||
def convert_recordset(recordset):
|
||||
"""Converts netbox RedcordSet to list of dicts."""
|
||||
recordlist = []
|
||||
for record in recordset:
|
||||
recordlist.append(record.__dict__)
|
||||
return recordlist
|
||||
|
||||
|
||||
def build_path(endpoint, list_of_dicts):
|
||||
"""
|
||||
Builds a path list of related parent/child items.
|
||||
This can be used to generate a joinable list to
|
||||
be used in hostgroups.
|
||||
"""
|
||||
item_path = []
|
||||
itemlist = [i for i in list_of_dicts if i["name"] == endpoint]
|
||||
item = itemlist[0] if len(itemlist) == 1 else None
|
||||
item_path.append(item["name"])
|
||||
while item["_depth"] > 0:
|
||||
itemlist = [i for i in list_of_dicts if i["name"] == str(item["parent"])]
|
||||
item = itemlist[0] if len(itemlist) == 1 else None
|
||||
item_path.append(item["name"])
|
||||
item_path.reverse()
|
||||
return item_path
|
||||
|
||||
|
||||
def proxy_prepper(proxy_list, proxy_group_list):
|
||||
"""
|
||||
Function that takes 2 lists and converts them using a
|
||||
standardized format for further processing.
|
||||
"""
|
||||
output = []
|
||||
for proxy in proxy_list:
|
||||
proxy["type"] = "proxy"
|
||||
proxy["id"] = proxy["proxyid"]
|
||||
proxy["idtype"] = "proxyid"
|
||||
proxy["monitored_by"] = 1
|
||||
output.append(proxy)
|
||||
for group in proxy_group_list:
|
||||
group["type"] = "proxy_group"
|
||||
group["id"] = group["proxy_groupid"]
|
||||
group["idtype"] = "proxy_groupid"
|
||||
group["monitored_by"] = 2
|
||||
output.append(group)
|
||||
return output
|
||||
|
||||
|
||||
def cf_to_string(cf, key="name", logger=None):
|
||||
"""
|
||||
Converts a dict custom fields to string
|
||||
"""
|
||||
if isinstance(cf, dict):
|
||||
if key in cf:
|
||||
return cf[key]
|
||||
logger.error(
|
||||
"Conversion of custom field failed, '%s' not found in cf dict.", key
|
||||
)
|
||||
return None
|
||||
return cf
|
||||
|
||||
|
||||
def field_mapper(host, mapper, nbdevice, logger):
|
||||
"""
|
||||
Maps NetBox field data to Zabbix properties.
|
||||
Used for Inventory, Usermacros and Tag mappings.
|
||||
"""
|
||||
data = {}
|
||||
# Let's build an dict for each property in the map
|
||||
for nb_field, zbx_field in mapper.items():
|
||||
field_list = nb_field.split("/") # convert str to list based on delimiter
|
||||
# start at the base of the dict...
|
||||
value = nbdevice
|
||||
# ... and step through the dict till we find the needed value
|
||||
for item in field_list:
|
||||
value = value[item] if value else None
|
||||
# Check if the result is usable and expected
|
||||
# We want to apply any int or float 0 values,
|
||||
# even if python thinks those are empty.
|
||||
if (value and isinstance(value, int | float | str)) or (
|
||||
isinstance(value, int | float) and int(value) == 0
|
||||
):
|
||||
data[zbx_field] = str(value)
|
||||
elif not value:
|
||||
# empty value should just be an empty string for API compatibility
|
||||
logger.info(
|
||||
"Host %s: NetBox lookup for '%s' returned an empty value.",
|
||||
host,
|
||||
nb_field,
|
||||
)
|
||||
data[zbx_field] = ""
|
||||
else:
|
||||
# Value is not a string or numeral, probably not what the user expected.
|
||||
logger.info(
|
||||
"Host %s: Lookup for '%s' returned an unexpected type: it will be skipped.",
|
||||
host,
|
||||
nb_field,
|
||||
)
|
||||
logger.debug(
|
||||
"Host %s: Field mapping complete. Mapped %s field(s).",
|
||||
host,
|
||||
len(list(filter(None, data.values()))),
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
@overload
|
||||
def remove_duplicates(
|
||||
input_list: list[dict[Any, Any]],
|
||||
sortkey: Optional[str | Callable[[dict[str, Any]], str]] = None,
|
||||
): ...
|
||||
|
||||
|
||||
@overload
|
||||
def remove_duplicates(
|
||||
input_list: dict[Any, Any],
|
||||
sortkey: Optional[str | Callable[[dict[str, Any]], str]] = None,
|
||||
):
|
||||
"""
|
||||
deprecated: input_list as dict is deprecated, use list of dicts instead
|
||||
"""
|
||||
|
||||
|
||||
def remove_duplicates(
|
||||
input_list: list[dict[Any, Any]] | dict[Any, Any],
|
||||
sortkey: Optional[str | Callable[[dict[str, Any]], str]] = None,
|
||||
):
|
||||
"""
|
||||
Removes duplicate entries from a list and sorts the list
|
||||
|
||||
sortkey: Optional; key to sort the list on. Can be a string or a callable function.
|
||||
"""
|
||||
output_list = []
|
||||
if isinstance(input_list, list):
|
||||
output_list = [dict(t) for t in {tuple(d.items()) for d in input_list}]
|
||||
|
||||
if sortkey and isinstance(sortkey, str):
|
||||
output_list.sort(key=lambda x: x[sortkey])
|
||||
|
||||
elif sortkey and callable(sortkey):
|
||||
output_list.sort(key=sortkey)
|
||||
|
||||
return output_list
|
||||
|
||||
|
||||
def verify_hg_format(
|
||||
hg_format, device_cfs=None, vm_cfs=None, hg_type="dev", logger=None
|
||||
):
|
||||
"""
|
||||
Verifies hostgroup field format
|
||||
"""
|
||||
if not device_cfs:
|
||||
device_cfs = []
|
||||
if not vm_cfs:
|
||||
vm_cfs = []
|
||||
allowed_objects = {
|
||||
"dev": [
|
||||
"location",
|
||||
"rack",
|
||||
"role",
|
||||
"manufacturer",
|
||||
"region",
|
||||
"site",
|
||||
"site_group",
|
||||
"tenant",
|
||||
"tenant_group",
|
||||
"platform",
|
||||
"cluster",
|
||||
],
|
||||
"vm": [
|
||||
"cluster_type",
|
||||
"role",
|
||||
"manufacturer",
|
||||
"region",
|
||||
"site",
|
||||
"site_group",
|
||||
"tenant",
|
||||
"tenant_group",
|
||||
"cluster",
|
||||
"device",
|
||||
"platform",
|
||||
],
|
||||
"cfs": {"dev": [], "vm": []},
|
||||
}
|
||||
for cf in device_cfs:
|
||||
allowed_objects["cfs"]["dev"].append(cf.name)
|
||||
for cf in vm_cfs:
|
||||
allowed_objects["cfs"]["vm"].append(cf.name)
|
||||
hg_objects = []
|
||||
if isinstance(hg_format, list):
|
||||
for f in hg_format:
|
||||
hg_objects = hg_objects + f.split("/")
|
||||
else:
|
||||
hg_objects = hg_format.split("/")
|
||||
hg_objects = sorted(set(hg_objects))
|
||||
for hg_object in hg_objects:
|
||||
if (
|
||||
hg_object not in allowed_objects[hg_type]
|
||||
and hg_object not in allowed_objects["cfs"][hg_type]
|
||||
and not hg_object.startswith(('"', "'"))
|
||||
):
|
||||
e = (
|
||||
f"Hostgroup item {hg_object} is not valid. Make sure you"
|
||||
" use valid items and separate them with '/'."
|
||||
)
|
||||
logger.warning(e)
|
||||
raise HostgroupError(e)
|
||||
|
||||
|
||||
def sanatize_log_output(data):
|
||||
"""
|
||||
Used for the update function to Zabbix which
|
||||
shows the data that its using to update the host.
|
||||
Removes any sensitive data from the input.
|
||||
"""
|
||||
if not isinstance(data, dict):
|
||||
return data
|
||||
sanitized_data = data.copy()
|
||||
# Check if there are any sensitive macros defined in the data
|
||||
if "macros" in data:
|
||||
for macro in sanitized_data["macros"]:
|
||||
# Check if macro is secret type
|
||||
if not (macro["type"] == str(1) or macro["type"] == 1):
|
||||
continue
|
||||
macro["value"] = "********"
|
||||
# Check for interface data
|
||||
if "interfaceid" in data:
|
||||
# Interface ID is a value which is most likely not helpful
|
||||
# in logging output or for troubleshooting.
|
||||
del sanitized_data["interfaceid"]
|
||||
# InterfaceID also hints that this is a interface update.
|
||||
# A check is required if there are no macro's used for SNMP security parameters.
|
||||
if not "details" in data:
|
||||
return sanitized_data
|
||||
for key, detail in sanitized_data["details"].items():
|
||||
# If the detail is a secret, we don't want to log it.
|
||||
if key in ("authpassphrase", "privpassphrase", "securityname", "community"):
|
||||
# Check if a macro is used.
|
||||
# If so then logging the output is not a security issue.
|
||||
if detail.startswith("{$") and detail.endswith("}"):
|
||||
continue
|
||||
# A macro is not used, so we sanitize the value.
|
||||
sanitized_data["details"][key] = "********"
|
||||
return sanitized_data
|
||||
136
modules/usermacros.py
Normal file
136
modules/usermacros.py
Normal file
@@ -0,0 +1,136 @@
|
||||
#!/usr/bin/env python3
|
||||
# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments, logging-fstring-interpolation
|
||||
"""
|
||||
All of the Zabbix Usermacro related configuration
|
||||
"""
|
||||
|
||||
from logging import getLogger
|
||||
from re import match
|
||||
|
||||
from modules.tools import field_mapper, sanatize_log_output
|
||||
|
||||
|
||||
class ZabbixUsermacros:
|
||||
"""Class that represents Zabbix usermacros."""
|
||||
|
||||
def __init__(self, nb, usermacro_map, usermacro_sync, logger=None, host=None):
|
||||
self.nb = nb
|
||||
self.name = host if host else nb.name
|
||||
self.usermacro_map = usermacro_map
|
||||
self.logger = logger if logger else getLogger(__name__)
|
||||
self.usermacros = {}
|
||||
self.usermacro_sync = usermacro_sync
|
||||
self.sync = False
|
||||
self.force_sync = False
|
||||
self._set_config()
|
||||
|
||||
def __repr__(self):
|
||||
return self.name
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def _set_config(self):
|
||||
"""
|
||||
Setup class
|
||||
"""
|
||||
if str(self.usermacro_sync).lower() == "full":
|
||||
self.sync = True
|
||||
self.force_sync = True
|
||||
elif self.usermacro_sync:
|
||||
self.sync = True
|
||||
return True
|
||||
|
||||
def validate_macro(self, macro_name):
|
||||
"""
|
||||
Validates usermacro name
|
||||
"""
|
||||
pattern = r"\{\$[A-Z0-9\._]*(\:.*)?\}"
|
||||
return match(pattern, macro_name)
|
||||
|
||||
def render_macro(self, macro_name, macro_properties):
|
||||
"""
|
||||
Renders a full usermacro from partial input
|
||||
"""
|
||||
macro = {}
|
||||
macrotypes = {"text": 0, "secret": 1, "vault": 2}
|
||||
if self.validate_macro(macro_name):
|
||||
macro["macro"] = str(macro_name)
|
||||
if isinstance(macro_properties, dict):
|
||||
if not "value" in macro_properties:
|
||||
self.logger.info(
|
||||
"Host %s: Usermacro %s has no value in Netbox, skipping.",
|
||||
self.name,
|
||||
macro_name,
|
||||
)
|
||||
return False
|
||||
macro["value"] = macro_properties["value"]
|
||||
|
||||
if (
|
||||
"type" in macro_properties
|
||||
and macro_properties["type"].lower() in macrotypes
|
||||
):
|
||||
macro["type"] = str(macrotypes[macro_properties["type"]])
|
||||
else:
|
||||
macro["type"] = str(0)
|
||||
|
||||
if "description" in macro_properties and isinstance(
|
||||
macro_properties["description"], str
|
||||
):
|
||||
macro["description"] = macro_properties["description"]
|
||||
else:
|
||||
macro["description"] = ""
|
||||
|
||||
elif isinstance(macro_properties, str) and macro_properties:
|
||||
macro["value"] = macro_properties
|
||||
macro["type"] = str(0)
|
||||
macro["description"] = ""
|
||||
|
||||
else:
|
||||
self.logger.info(
|
||||
"Host %s: Usermacro %s has no value, skipping.",
|
||||
self.name,
|
||||
macro_name,
|
||||
)
|
||||
return False
|
||||
else:
|
||||
self.logger.warning(
|
||||
"Host %s: Usermacro %s is not a valid usermacro name, skipping.",
|
||||
self.name,
|
||||
macro_name,
|
||||
)
|
||||
return False
|
||||
return macro
|
||||
|
||||
def generate(self):
|
||||
"""
|
||||
Generate full set of Usermacros
|
||||
"""
|
||||
macros = []
|
||||
data = {}
|
||||
# Parse the field mapper for usermacros
|
||||
if self.usermacro_map:
|
||||
self.logger.debug("Host %s: Starting usermacro mapper.", self.nb.name)
|
||||
field_macros = field_mapper(
|
||||
self.nb.name, self.usermacro_map, self.nb, self.logger
|
||||
)
|
||||
for macro, value in field_macros.items():
|
||||
m = self.render_macro(macro, value)
|
||||
if m:
|
||||
macros.append(m)
|
||||
# Parse NetBox config context for usermacros
|
||||
if (
|
||||
"zabbix" in self.nb.config_context
|
||||
and "usermacros" in self.nb.config_context["zabbix"]
|
||||
):
|
||||
for macro, properties in self.nb.config_context["zabbix"][
|
||||
"usermacros"
|
||||
].items():
|
||||
m = self.render_macro(macro, properties)
|
||||
if m:
|
||||
macros.append(m)
|
||||
data = {"macros": macros}
|
||||
self.logger.debug(
|
||||
"Host %s: Resolved macros: %s", self.name, sanatize_log_output(data)
|
||||
)
|
||||
return macros
|
||||
62
modules/virtual_machine.py
Normal file
62
modules/virtual_machine.py
Normal file
@@ -0,0 +1,62 @@
|
||||
# pylint: disable=duplicate-code
|
||||
"""Module that hosts all functions for virtual machine processing"""
|
||||
from modules.device import PhysicalDevice
|
||||
from modules.exceptions import InterfaceConfigError, SyncInventoryError, TemplateError
|
||||
from modules.interface import ZabbixInterface
|
||||
from modules.config import load_config
|
||||
# Load config
|
||||
config = load_config()
|
||||
|
||||
|
||||
class VirtualMachine(PhysicalDevice):
|
||||
"""Model for virtual machines"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.hostgroup = None
|
||||
self.zbx_template_names = None
|
||||
self.hostgroup_type = "vm"
|
||||
|
||||
def _inventory_map(self):
|
||||
"""use VM inventory maps"""
|
||||
return config["vm_inventory_map"]
|
||||
|
||||
def _usermacro_map(self):
|
||||
"""use VM usermacro maps"""
|
||||
return config["vm_usermacro_map"]
|
||||
|
||||
def _tag_map(self):
|
||||
"""use VM tag maps"""
|
||||
return config["vm_tag_map"]
|
||||
|
||||
def set_vm_template(self):
|
||||
"""Set Template for VMs. Overwrites default class
|
||||
to skip a lookup of custom fields."""
|
||||
# Gather templates ONLY from the device specific context
|
||||
try:
|
||||
self.zbx_template_names = self.get_templates_context()
|
||||
except TemplateError as e:
|
||||
self.logger.warning(e)
|
||||
return True
|
||||
|
||||
def setInterfaceDetails(self): # pylint: disable=invalid-name
|
||||
"""
|
||||
Overwrites device function to select an agent interface type by default
|
||||
Agent type interfaces are more likely to be used with VMs then SNMP
|
||||
"""
|
||||
try:
|
||||
# Initiate interface class
|
||||
interface = ZabbixInterface(self.nb.config_context, self.ip)
|
||||
# Check if NetBox has device context.
|
||||
# If not fall back to old config.
|
||||
if interface.get_context():
|
||||
# If device is SNMP type, add aditional information.
|
||||
if interface.interface["type"] == 2:
|
||||
interface.set_snmp()
|
||||
else:
|
||||
interface.set_default_agent()
|
||||
return [interface.interface]
|
||||
except InterfaceConfigError as e:
|
||||
message = f"{self.name}: {e}"
|
||||
self.logger.warning(message)
|
||||
raise SyncInventoryError(message) from e
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,2 +1,2 @@
|
||||
pynetbox
|
||||
pyzabbix
|
||||
pynetbox==7.4.1
|
||||
zabbix-utils==2.0.3
|
||||
|
||||
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
139
tests/test_configuration_parsing.py
Normal file
139
tests/test_configuration_parsing.py
Normal file
@@ -0,0 +1,139 @@
|
||||
"""Tests for configuration parsing in the modules.config module."""
|
||||
from unittest.mock import patch, MagicMock
|
||||
import os
|
||||
from modules.config import load_config, DEFAULT_CONFIG, load_config_file, load_env_variable
|
||||
|
||||
|
||||
def test_load_config_defaults():
|
||||
"""Test that load_config returns default values when no config file or env vars are present"""
|
||||
with patch('modules.config.load_config_file', return_value=DEFAULT_CONFIG.copy()), \
|
||||
patch('modules.config.load_env_variable', return_value=None):
|
||||
config = load_config()
|
||||
assert config == DEFAULT_CONFIG
|
||||
assert config["templates_config_context"] is False
|
||||
assert config["create_hostgroups"] is True
|
||||
|
||||
|
||||
def test_load_config_file():
|
||||
"""Test that load_config properly loads values from config file"""
|
||||
mock_config = DEFAULT_CONFIG.copy()
|
||||
mock_config["templates_config_context"] = True
|
||||
mock_config["sync_vms"] = True
|
||||
|
||||
with patch('modules.config.load_config_file', return_value=mock_config), \
|
||||
patch('modules.config.load_env_variable', return_value=None):
|
||||
config = load_config()
|
||||
assert config["templates_config_context"] is True
|
||||
assert config["sync_vms"] is True
|
||||
# Unchanged values should remain as defaults
|
||||
assert config["create_journal"] is False
|
||||
|
||||
|
||||
def test_load_env_variables():
|
||||
"""Test that load_config properly loads values from environment variables"""
|
||||
# Mock env variable loading to return values for specific keys
|
||||
def mock_load_env(key):
|
||||
if key == "sync_vms":
|
||||
return True
|
||||
if key == "create_journal":
|
||||
return True
|
||||
return None
|
||||
|
||||
with patch('modules.config.load_config_file', return_value=DEFAULT_CONFIG.copy()), \
|
||||
patch('modules.config.load_env_variable', side_effect=mock_load_env):
|
||||
config = load_config()
|
||||
assert config["sync_vms"] is True
|
||||
assert config["create_journal"] is True
|
||||
# Unchanged values should remain as defaults
|
||||
assert config["templates_config_context"] is False
|
||||
|
||||
|
||||
def test_env_vars_override_config_file():
|
||||
"""Test that environment variables override values from config file"""
|
||||
mock_config = DEFAULT_CONFIG.copy()
|
||||
mock_config["templates_config_context"] = True
|
||||
mock_config["sync_vms"] = False
|
||||
|
||||
# Mock env variable that will override the config file value
|
||||
def mock_load_env(key):
|
||||
if key == "sync_vms":
|
||||
return True
|
||||
return None
|
||||
|
||||
with patch('modules.config.load_config_file', return_value=mock_config), \
|
||||
patch('modules.config.load_env_variable', side_effect=mock_load_env):
|
||||
config = load_config()
|
||||
# This should be overridden by the env var
|
||||
assert config["sync_vms"] is True
|
||||
# This should remain from the config file
|
||||
assert config["templates_config_context"] is True
|
||||
|
||||
|
||||
def test_load_config_file_function():
|
||||
"""Test the load_config_file function directly"""
|
||||
# Test when the file exists
|
||||
with patch('pathlib.Path.exists', return_value=True), \
|
||||
patch('importlib.util.spec_from_file_location') as mock_spec:
|
||||
# Setup the mock module with attributes
|
||||
mock_module = MagicMock()
|
||||
mock_module.templates_config_context = True
|
||||
mock_module.sync_vms = True
|
||||
|
||||
# Setup the mock spec
|
||||
mock_spec_instance = MagicMock()
|
||||
mock_spec.return_value = mock_spec_instance
|
||||
mock_spec_instance.loader.exec_module = lambda x: None
|
||||
|
||||
# Patch module_from_spec to return our mock module
|
||||
with patch('importlib.util.module_from_spec', return_value=mock_module):
|
||||
config = load_config_file(DEFAULT_CONFIG.copy())
|
||||
assert config["templates_config_context"] is True
|
||||
assert config["sync_vms"] is True
|
||||
|
||||
|
||||
def test_load_config_file_not_found():
|
||||
"""Test load_config_file when the config file doesn't exist"""
|
||||
with patch('pathlib.Path.exists', return_value=False):
|
||||
result = load_config_file(DEFAULT_CONFIG.copy())
|
||||
# Should return a dict equal to DEFAULT_CONFIG, not a new object
|
||||
assert result == DEFAULT_CONFIG
|
||||
|
||||
|
||||
def test_load_env_variable_function():
|
||||
"""Test the load_env_variable function directly"""
|
||||
# Create a real environment variable for testing with correct prefix and uppercase
|
||||
test_var = "NBZX_TEMPLATES_CONFIG_CONTEXT"
|
||||
original_env = os.environ.get(test_var, None)
|
||||
try:
|
||||
# Set the environment variable with the proper prefix and case
|
||||
os.environ[test_var] = "True"
|
||||
|
||||
# Test that it's properly read (using lowercase in the function call)
|
||||
value = load_env_variable("templates_config_context")
|
||||
assert value == "True"
|
||||
|
||||
# Test when the environment variable doesn't exist
|
||||
value = load_env_variable("nonexistent_variable")
|
||||
assert value is None
|
||||
finally:
|
||||
# Clean up - restore original environment
|
||||
if original_env is not None:
|
||||
os.environ[test_var] = original_env
|
||||
else:
|
||||
os.environ.pop(test_var, None)
|
||||
|
||||
|
||||
def test_load_config_file_exception_handling():
|
||||
"""Test that load_config_file handles exceptions gracefully"""
|
||||
# This test requires modifying the load_config_file function to handle exceptions
|
||||
# For now, we're just checking that an exception is raised
|
||||
with patch('pathlib.Path.exists', return_value=True), \
|
||||
patch('importlib.util.spec_from_file_location', side_effect=Exception("Import error")):
|
||||
# Since the current implementation doesn't handle exceptions, we should
|
||||
# expect an exception to be raised
|
||||
try:
|
||||
load_config_file(DEFAULT_CONFIG.copy())
|
||||
assert False, "An exception should have been raised"
|
||||
except Exception: # pylint: disable=broad-except
|
||||
# This is expected
|
||||
pass
|
||||
166
tests/test_device_deletion.py
Normal file
166
tests/test_device_deletion.py
Normal file
@@ -0,0 +1,166 @@
|
||||
"""Tests for device deletion functionality in the PhysicalDevice class."""
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from zabbix_utils import APIRequestError
|
||||
from modules.device import PhysicalDevice
|
||||
from modules.exceptions import SyncExternalError
|
||||
|
||||
|
||||
class TestDeviceDeletion(unittest.TestCase):
|
||||
"""Test class for device deletion functionality."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures."""
|
||||
# Create mock NetBox device
|
||||
self.mock_nb_device = MagicMock()
|
||||
self.mock_nb_device.id = 123
|
||||
self.mock_nb_device.name = "test-device"
|
||||
self.mock_nb_device.status.label = "Decommissioning"
|
||||
self.mock_nb_device.custom_fields = {"zabbix_hostid": "456"}
|
||||
self.mock_nb_device.config_context = {}
|
||||
|
||||
# Set up a primary IP
|
||||
primary_ip = MagicMock()
|
||||
primary_ip.address = "192.168.1.1/24"
|
||||
self.mock_nb_device.primary_ip = primary_ip
|
||||
|
||||
# Create mock Zabbix API
|
||||
self.mock_zabbix = MagicMock()
|
||||
self.mock_zabbix.version = "6.0"
|
||||
|
||||
# Set up mock host.get response
|
||||
self.mock_zabbix.host.get.return_value = [{"hostid": "456"}]
|
||||
|
||||
# Mock NetBox journal class
|
||||
self.mock_nb_journal = MagicMock()
|
||||
|
||||
# Create logger mock
|
||||
self.mock_logger = MagicMock()
|
||||
|
||||
# Create PhysicalDevice instance with mocks
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
self.device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
journal=True,
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
def test_cleanup_successful_deletion(self):
|
||||
"""Test successful device deletion from Zabbix."""
|
||||
# Setup
|
||||
self.mock_zabbix.host.get.return_value = [{"hostid": "456"}]
|
||||
self.mock_zabbix.host.delete.return_value = {"hostids": ["456"]}
|
||||
|
||||
# Execute
|
||||
self.device.cleanup()
|
||||
|
||||
# Verify
|
||||
self.mock_zabbix.host.get.assert_called_once_with(filter={'hostid': '456'}, output=[])
|
||||
self.mock_zabbix.host.delete.assert_called_once_with('456')
|
||||
self.mock_nb_device.save.assert_called_once()
|
||||
self.assertIsNone(self.mock_nb_device.custom_fields["zabbix_hostid"])
|
||||
self.mock_logger.info.assert_called_with(f"Host {self.device.name}: "
|
||||
"Deleted host from Zabbix.")
|
||||
|
||||
def test_cleanup_device_already_deleted(self):
|
||||
"""Test cleanup when device is already deleted from Zabbix."""
|
||||
# Setup
|
||||
self.mock_zabbix.host.get.return_value = [] # Empty list means host not found
|
||||
|
||||
# Execute
|
||||
self.device.cleanup()
|
||||
|
||||
# Verify
|
||||
self.mock_zabbix.host.get.assert_called_once_with(filter={'hostid': '456'}, output=[])
|
||||
self.mock_zabbix.host.delete.assert_not_called()
|
||||
self.mock_nb_device.save.assert_called_once()
|
||||
self.assertIsNone(self.mock_nb_device.custom_fields["zabbix_hostid"])
|
||||
self.mock_logger.info.assert_called_with(
|
||||
f"Host {self.device.name}: was already deleted from Zabbix. Removed link in NetBox.")
|
||||
|
||||
def test_cleanup_api_error(self):
|
||||
"""Test cleanup when Zabbix API returns an error."""
|
||||
# Setup
|
||||
self.mock_zabbix.host.get.return_value = [{"hostid": "456"}]
|
||||
self.mock_zabbix.host.delete.side_effect = APIRequestError("API Error")
|
||||
|
||||
# Execute and verify
|
||||
with self.assertRaises(SyncExternalError):
|
||||
self.device.cleanup()
|
||||
|
||||
# Verify correct calls were made
|
||||
self.mock_zabbix.host.get.assert_called_once_with(filter={'hostid': '456'}, output=[])
|
||||
self.mock_zabbix.host.delete.assert_called_once_with('456')
|
||||
self.mock_nb_device.save.assert_not_called()
|
||||
self.mock_logger.error.assert_called()
|
||||
|
||||
def test_zeroize_cf(self):
|
||||
"""Test _zeroize_cf method that clears the custom field."""
|
||||
# Execute
|
||||
self.device._zeroize_cf() # pylint: disable=protected-access
|
||||
|
||||
# Verify
|
||||
self.assertIsNone(self.mock_nb_device.custom_fields["zabbix_hostid"])
|
||||
self.mock_nb_device.save.assert_called_once()
|
||||
|
||||
def test_create_journal_entry(self):
|
||||
"""Test create_journal_entry method."""
|
||||
# Setup
|
||||
test_message = "Test journal entry"
|
||||
|
||||
# Execute
|
||||
result = self.device.create_journal_entry("info", test_message)
|
||||
|
||||
# Verify
|
||||
self.assertTrue(result)
|
||||
self.mock_nb_journal.create.assert_called_once()
|
||||
journal_entry = self.mock_nb_journal.create.call_args[0][0]
|
||||
self.assertEqual(journal_entry["assigned_object_type"], "dcim.device")
|
||||
self.assertEqual(journal_entry["assigned_object_id"], 123)
|
||||
self.assertEqual(journal_entry["kind"], "info")
|
||||
self.assertEqual(journal_entry["comments"], test_message)
|
||||
|
||||
def test_create_journal_entry_invalid_severity(self):
|
||||
"""Test create_journal_entry with invalid severity."""
|
||||
# Execute
|
||||
result = self.device.create_journal_entry("invalid", "Test message")
|
||||
|
||||
# Verify
|
||||
self.assertFalse(result)
|
||||
self.mock_nb_journal.create.assert_not_called()
|
||||
self.mock_logger.warning.assert_called()
|
||||
|
||||
def test_create_journal_entry_when_disabled(self):
|
||||
"""Test create_journal_entry when journaling is disabled."""
|
||||
# Setup - create device with journal=False
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
journal=False, # Disable journaling
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Execute
|
||||
result = device.create_journal_entry("info", "Test message")
|
||||
|
||||
# Verify
|
||||
self.assertFalse(result)
|
||||
self.mock_nb_journal.create.assert_not_called()
|
||||
|
||||
def test_cleanup_updates_journal(self):
|
||||
"""Test that cleanup method creates a journal entry."""
|
||||
# Setup
|
||||
self.mock_zabbix.host.get.return_value = [{"hostid": "456"}]
|
||||
|
||||
# Execute
|
||||
with patch.object(self.device, 'create_journal_entry') as mock_journal_entry:
|
||||
self.device.cleanup()
|
||||
|
||||
# Verify
|
||||
mock_journal_entry.assert_called_once_with("warning", "Deleted host from Zabbix")
|
||||
372
tests/test_hostgroups.py
Normal file
372
tests/test_hostgroups.py
Normal file
@@ -0,0 +1,372 @@
|
||||
"""Tests for the Hostgroup class in the hostgroups module."""
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch, call
|
||||
from modules.hostgroups import Hostgroup
|
||||
from modules.exceptions import HostgroupError
|
||||
|
||||
|
||||
class TestHostgroups(unittest.TestCase):
|
||||
"""Test class for Hostgroup functionality."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures."""
|
||||
# Create mock logger
|
||||
self.mock_logger = MagicMock()
|
||||
|
||||
# *** Mock NetBox Device setup ***
|
||||
# Create mock device with all properties
|
||||
self.mock_device = MagicMock()
|
||||
self.mock_device.name = "test-device"
|
||||
|
||||
# Set up site information
|
||||
site = MagicMock()
|
||||
site.name = "TestSite"
|
||||
|
||||
# Set up region information
|
||||
region = MagicMock()
|
||||
region.name = "TestRegion"
|
||||
# Ensure region string representation returns the name
|
||||
region.__str__.return_value = "TestRegion"
|
||||
site.region = region
|
||||
|
||||
# Set up site group information
|
||||
site_group = MagicMock()
|
||||
site_group.name = "TestSiteGroup"
|
||||
# Ensure site group string representation returns the name
|
||||
site_group.__str__.return_value = "TestSiteGroup"
|
||||
site.group = site_group
|
||||
|
||||
self.mock_device.site = site
|
||||
|
||||
# Set up role information (varies based on NetBox version)
|
||||
self.mock_device_role = MagicMock()
|
||||
self.mock_device_role.name = "TestRole"
|
||||
# Ensure string representation returns the name
|
||||
self.mock_device_role.__str__.return_value = "TestRole"
|
||||
self.mock_device.device_role = self.mock_device_role
|
||||
self.mock_device.role = self.mock_device_role
|
||||
|
||||
# Set up tenant information
|
||||
tenant = MagicMock()
|
||||
tenant.name = "TestTenant"
|
||||
# Ensure tenant string representation returns the name
|
||||
tenant.__str__.return_value = "TestTenant"
|
||||
tenant_group = MagicMock()
|
||||
tenant_group.name = "TestTenantGroup"
|
||||
# Ensure tenant group string representation returns the name
|
||||
tenant_group.__str__.return_value = "TestTenantGroup"
|
||||
tenant.group = tenant_group
|
||||
self.mock_device.tenant = tenant
|
||||
|
||||
# Set up platform information
|
||||
platform = MagicMock()
|
||||
platform.name = "TestPlatform"
|
||||
self.mock_device.platform = platform
|
||||
|
||||
# Device-specific properties
|
||||
device_type = MagicMock()
|
||||
manufacturer = MagicMock()
|
||||
manufacturer.name = "TestManufacturer"
|
||||
device_type.manufacturer = manufacturer
|
||||
self.mock_device.device_type = device_type
|
||||
|
||||
location = MagicMock()
|
||||
location.name = "TestLocation"
|
||||
# Ensure location string representation returns the name
|
||||
location.__str__.return_value = "TestLocation"
|
||||
self.mock_device.location = location
|
||||
|
||||
# Custom fields
|
||||
self.mock_device.custom_fields = {"test_cf": "TestCF"}
|
||||
|
||||
# *** Mock NetBox VM setup ***
|
||||
# Create mock VM with all properties
|
||||
self.mock_vm = MagicMock()
|
||||
self.mock_vm.name = "test-vm"
|
||||
|
||||
# Reuse site from device
|
||||
self.mock_vm.site = site
|
||||
|
||||
# Set up role for VM
|
||||
self.mock_vm.role = self.mock_device_role
|
||||
|
||||
# Set up tenant for VM (same as device)
|
||||
self.mock_vm.tenant = tenant
|
||||
|
||||
# Set up platform for VM (same as device)
|
||||
self.mock_vm.platform = platform
|
||||
|
||||
# VM-specific properties
|
||||
cluster = MagicMock()
|
||||
cluster.name = "TestCluster"
|
||||
cluster_type = MagicMock()
|
||||
cluster_type.name = "TestClusterType"
|
||||
cluster.type = cluster_type
|
||||
self.mock_vm.cluster = cluster
|
||||
|
||||
# Custom fields
|
||||
self.mock_vm.custom_fields = {"test_cf": "TestCF"}
|
||||
|
||||
# Mock data for nesting tests
|
||||
self.mock_regions_data = [
|
||||
{"name": "ParentRegion", "parent": None, "_depth": 0},
|
||||
{"name": "TestRegion", "parent": "ParentRegion", "_depth": 1}
|
||||
]
|
||||
|
||||
self.mock_groups_data = [
|
||||
{"name": "ParentSiteGroup", "parent": None, "_depth": 0},
|
||||
{"name": "TestSiteGroup", "parent": "ParentSiteGroup", "_depth": 1}
|
||||
]
|
||||
|
||||
def test_device_hostgroup_creation(self):
|
||||
"""Test basic device hostgroup creation."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Test the string representation
|
||||
self.assertEqual(str(hostgroup), "Hostgroup for dev test-device")
|
||||
|
||||
# Check format options were set correctly
|
||||
self.assertEqual(hostgroup.format_options["site"], "TestSite")
|
||||
self.assertEqual(hostgroup.format_options["region"], "TestRegion")
|
||||
self.assertEqual(hostgroup.format_options["site_group"], "TestSiteGroup")
|
||||
self.assertEqual(hostgroup.format_options["role"], "TestRole")
|
||||
self.assertEqual(hostgroup.format_options["tenant"], "TestTenant")
|
||||
self.assertEqual(hostgroup.format_options["tenant_group"], "TestTenantGroup")
|
||||
self.assertEqual(hostgroup.format_options["platform"], "TestPlatform")
|
||||
self.assertEqual(hostgroup.format_options["manufacturer"], "TestManufacturer")
|
||||
self.assertEqual(hostgroup.format_options["location"], "TestLocation")
|
||||
|
||||
def test_vm_hostgroup_creation(self):
|
||||
"""Test basic VM hostgroup creation."""
|
||||
hostgroup = Hostgroup("vm", self.mock_vm, "4.0", self.mock_logger)
|
||||
|
||||
# Test the string representation
|
||||
self.assertEqual(str(hostgroup), "Hostgroup for vm test-vm")
|
||||
|
||||
# Check format options were set correctly
|
||||
self.assertEqual(hostgroup.format_options["site"], "TestSite")
|
||||
self.assertEqual(hostgroup.format_options["region"], "TestRegion")
|
||||
self.assertEqual(hostgroup.format_options["site_group"], "TestSiteGroup")
|
||||
self.assertEqual(hostgroup.format_options["role"], "TestRole")
|
||||
self.assertEqual(hostgroup.format_options["tenant"], "TestTenant")
|
||||
self.assertEqual(hostgroup.format_options["tenant_group"], "TestTenantGroup")
|
||||
self.assertEqual(hostgroup.format_options["platform"], "TestPlatform")
|
||||
self.assertEqual(hostgroup.format_options["cluster"], "TestCluster")
|
||||
self.assertEqual(hostgroup.format_options["cluster_type"], "TestClusterType")
|
||||
|
||||
def test_invalid_object_type(self):
|
||||
"""Test that an invalid object type raises an exception."""
|
||||
with self.assertRaises(HostgroupError):
|
||||
Hostgroup("invalid", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
def test_device_hostgroup_formats(self):
|
||||
"""Test different hostgroup formats for devices."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Custom format: site/region
|
||||
custom_result = hostgroup.generate("site/region")
|
||||
self.assertEqual(custom_result, "TestSite/TestRegion")
|
||||
|
||||
# Custom format: site/tenant/platform/location
|
||||
complex_result = hostgroup.generate("site/tenant/platform/location")
|
||||
self.assertEqual(complex_result, "TestSite/TestTenant/TestPlatform/TestLocation")
|
||||
|
||||
def test_vm_hostgroup_formats(self):
|
||||
"""Test different hostgroup formats for VMs."""
|
||||
hostgroup = Hostgroup("vm", self.mock_vm, "4.0", self.mock_logger)
|
||||
|
||||
# Default format: cluster/role
|
||||
default_result = hostgroup.generate("cluster/role")
|
||||
self.assertEqual(default_result, "TestCluster/TestRole")
|
||||
|
||||
# Custom format: site/tenant
|
||||
custom_result = hostgroup.generate("site/tenant")
|
||||
self.assertEqual(custom_result, "TestSite/TestTenant")
|
||||
|
||||
# Custom format: cluster/cluster_type/platform
|
||||
complex_result = hostgroup.generate("cluster/cluster_type/platform")
|
||||
self.assertEqual(complex_result, "TestCluster/TestClusterType/TestPlatform")
|
||||
|
||||
def test_device_netbox_version_differences(self):
|
||||
"""Test hostgroup generation with different NetBox versions."""
|
||||
# NetBox v2.x
|
||||
hostgroup_v2 = Hostgroup("dev", self.mock_device, "2.11", self.mock_logger)
|
||||
self.assertEqual(hostgroup_v2.format_options["role"], "TestRole")
|
||||
|
||||
# NetBox v3.x
|
||||
hostgroup_v3 = Hostgroup("dev", self.mock_device, "3.5", self.mock_logger)
|
||||
self.assertEqual(hostgroup_v3.format_options["role"], "TestRole")
|
||||
|
||||
# NetBox v4.x (already tested in other methods)
|
||||
|
||||
def test_custom_field_lookup(self):
|
||||
"""Test custom field lookup functionality."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Test custom field exists and is populated
|
||||
cf_result = hostgroup.custom_field_lookup("test_cf")
|
||||
self.assertTrue(cf_result["result"])
|
||||
self.assertEqual(cf_result["cf"], "TestCF")
|
||||
|
||||
# Test custom field doesn't exist
|
||||
cf_result = hostgroup.custom_field_lookup("nonexistent_cf")
|
||||
self.assertFalse(cf_result["result"])
|
||||
self.assertIsNone(cf_result["cf"])
|
||||
|
||||
def test_hostgroup_with_custom_field(self):
|
||||
"""Test hostgroup generation including a custom field."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Generate with custom field included
|
||||
result = hostgroup.generate("site/test_cf/role")
|
||||
self.assertEqual(result, "TestSite/TestCF/TestRole")
|
||||
|
||||
def test_missing_hostgroup_format_item(self):
|
||||
"""Test handling of missing hostgroup format items."""
|
||||
# Create a device with minimal attributes
|
||||
minimal_device = MagicMock()
|
||||
minimal_device.name = "minimal-device"
|
||||
minimal_device.site = None
|
||||
minimal_device.tenant = None
|
||||
minimal_device.platform = None
|
||||
minimal_device.custom_fields = {}
|
||||
|
||||
# Create role
|
||||
role = MagicMock()
|
||||
role.name = "MinimalRole"
|
||||
minimal_device.role = role
|
||||
|
||||
# Create device_type with manufacturer
|
||||
device_type = MagicMock()
|
||||
manufacturer = MagicMock()
|
||||
manufacturer.name = "MinimalManufacturer"
|
||||
device_type.manufacturer = manufacturer
|
||||
minimal_device.device_type = device_type
|
||||
|
||||
# Create hostgroup
|
||||
hostgroup = Hostgroup("dev", minimal_device, "4.0", self.mock_logger)
|
||||
|
||||
# Generate with default format
|
||||
result = hostgroup.generate("site/manufacturer/role")
|
||||
# Site is missing, so only manufacturer and role should be included
|
||||
self.assertEqual(result, "MinimalManufacturer/MinimalRole")
|
||||
|
||||
# Test with invalid format
|
||||
with self.assertRaises(HostgroupError):
|
||||
hostgroup.generate("site/nonexistent/role")
|
||||
|
||||
def test_nested_region_hostgroups(self):
|
||||
"""Test hostgroup generation with nested regions."""
|
||||
# Mock the build_path function to return a predictable result
|
||||
with patch('modules.hostgroups.build_path') as mock_build_path:
|
||||
# Configure the mock to return a list of regions in the path
|
||||
mock_build_path.return_value = ["ParentRegion", "TestRegion"]
|
||||
|
||||
# Create hostgroup with nested regions enabled
|
||||
hostgroup = Hostgroup(
|
||||
"dev",
|
||||
self.mock_device,
|
||||
"4.0",
|
||||
self.mock_logger,
|
||||
nested_region_flag=True,
|
||||
nb_regions=self.mock_regions_data
|
||||
)
|
||||
|
||||
# Generate hostgroup with region
|
||||
result = hostgroup.generate("site/region/role")
|
||||
# Should include the parent region
|
||||
self.assertEqual(result, "TestSite/ParentRegion/TestRegion/TestRole")
|
||||
|
||||
def test_nested_sitegroup_hostgroups(self):
|
||||
"""Test hostgroup generation with nested site groups."""
|
||||
# Mock the build_path function to return a predictable result
|
||||
with patch('modules.hostgroups.build_path') as mock_build_path:
|
||||
# Configure the mock to return a list of site groups in the path
|
||||
mock_build_path.return_value = ["ParentSiteGroup", "TestSiteGroup"]
|
||||
|
||||
# Create hostgroup with nested site groups enabled
|
||||
hostgroup = Hostgroup(
|
||||
"dev",
|
||||
self.mock_device,
|
||||
"4.0",
|
||||
self.mock_logger,
|
||||
nested_sitegroup_flag=True,
|
||||
nb_groups=self.mock_groups_data
|
||||
)
|
||||
|
||||
# Generate hostgroup with site_group
|
||||
result = hostgroup.generate("site/site_group/role")
|
||||
# Should include the parent site group
|
||||
self.assertEqual(result, "TestSite/ParentSiteGroup/TestSiteGroup/TestRole")
|
||||
|
||||
|
||||
def test_list_formatoptions(self):
|
||||
"""Test the list_formatoptions method for debugging."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Patch sys.stdout to capture print output
|
||||
with patch('sys.stdout') as mock_stdout:
|
||||
hostgroup.list_formatoptions()
|
||||
|
||||
# Check that print was called with expected output
|
||||
calls = [call.write(f"The following options are available for host test-device"),
|
||||
call.write('\n')]
|
||||
mock_stdout.assert_has_calls(calls, any_order=True)
|
||||
|
||||
def test_vm_list_based_hostgroup_format(self):
|
||||
"""Test VM hostgroup generation with a list-based format."""
|
||||
hostgroup = Hostgroup("vm", self.mock_vm, "4.0", self.mock_logger)
|
||||
|
||||
# Test with a list of format strings
|
||||
format_list = ["platform", "role", "cluster_type/cluster"]
|
||||
|
||||
# Generate hostgroups for each format in the list
|
||||
hostgroups = []
|
||||
for fmt in format_list:
|
||||
result = hostgroup.generate(fmt)
|
||||
if result: # Only add non-None results
|
||||
hostgroups.append(result)
|
||||
|
||||
# Verify each expected hostgroup is generated
|
||||
self.assertEqual(len(hostgroups), 3) # Should have 3 hostgroups
|
||||
self.assertIn("TestPlatform", hostgroups)
|
||||
self.assertIn("TestRole", hostgroups)
|
||||
self.assertIn("TestClusterType/TestCluster", hostgroups)
|
||||
|
||||
def test_nested_format_splitting(self):
|
||||
"""Test that formats with slashes correctly split and resolve each component."""
|
||||
hostgroup = Hostgroup("vm", self.mock_vm, "4.0", self.mock_logger)
|
||||
|
||||
# Test a format with slashes that should be split
|
||||
complex_format = "cluster_type/cluster"
|
||||
result = hostgroup.generate(complex_format)
|
||||
|
||||
# Verify the format is correctly split and each component resolved
|
||||
self.assertEqual(result, "TestClusterType/TestCluster")
|
||||
|
||||
def test_multiple_hostgroup_formats_device(self):
|
||||
"""Test device hostgroup generation with multiple formats."""
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Test with various formats that would be in a list
|
||||
formats = [
|
||||
"site",
|
||||
"manufacturer/role",
|
||||
"platform/location",
|
||||
"tenant_group/tenant"
|
||||
]
|
||||
|
||||
# Generate and check each format
|
||||
results = {}
|
||||
for fmt in formats:
|
||||
results[fmt] = hostgroup.generate(fmt)
|
||||
|
||||
# Verify results
|
||||
self.assertEqual(results["site"], "TestSite")
|
||||
self.assertEqual(results["manufacturer/role"], "TestManufacturer/TestRole")
|
||||
self.assertEqual(results["platform/location"], "TestPlatform/TestLocation")
|
||||
self.assertEqual(results["tenant_group/tenant"], "TestTenantGroup/TestTenant")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
247
tests/test_interface.py
Normal file
247
tests/test_interface.py
Normal file
@@ -0,0 +1,247 @@
|
||||
"""Tests for the ZabbixInterface class in the interface module."""
|
||||
import unittest
|
||||
from modules.interface import ZabbixInterface
|
||||
from modules.exceptions import InterfaceConfigError
|
||||
|
||||
|
||||
class TestZabbixInterface(unittest.TestCase):
|
||||
"""Test class for ZabbixInterface functionality."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures."""
|
||||
self.test_ip = "192.168.1.1"
|
||||
self.empty_context = {}
|
||||
self.default_interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
|
||||
# Create some test contexts for different scenarios
|
||||
self.snmpv2_context = {
|
||||
"zabbix": {
|
||||
"interface_type": 2,
|
||||
"interface_port": "161",
|
||||
"snmp": {
|
||||
"version": 2,
|
||||
"community": "public",
|
||||
"bulk": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.snmpv3_context = {
|
||||
"zabbix": {
|
||||
"interface_type": 2,
|
||||
"snmp": {
|
||||
"version": 3,
|
||||
"securityname": "snmpuser",
|
||||
"securitylevel": "authPriv",
|
||||
"authprotocol": "SHA",
|
||||
"authpassphrase": "authpass123",
|
||||
"privprotocol": "AES",
|
||||
"privpassphrase": "privpass123",
|
||||
"contextname": "context1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.agent_context = {
|
||||
"zabbix": {
|
||||
"interface_type": 1,
|
||||
"interface_port": "10050"
|
||||
}
|
||||
}
|
||||
|
||||
def test_init(self):
|
||||
"""Test initialization of ZabbixInterface."""
|
||||
interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
|
||||
# Check basic properties
|
||||
self.assertEqual(interface.ip, self.test_ip)
|
||||
self.assertEqual(interface.context, self.empty_context)
|
||||
self.assertEqual(interface.interface["ip"], self.test_ip)
|
||||
self.assertEqual(interface.interface["main"], "1")
|
||||
self.assertEqual(interface.interface["useip"], "1")
|
||||
self.assertEqual(interface.interface["dns"], "")
|
||||
|
||||
def test_get_context_empty(self):
|
||||
"""Test get_context with empty context."""
|
||||
interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
result = interface.get_context()
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_get_context_with_interface_type(self):
|
||||
"""Test get_context with interface_type but no port."""
|
||||
context = {"zabbix": {"interface_type": 2}}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
|
||||
# Should set type and default port
|
||||
result = interface.get_context()
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(interface.interface["type"], 2)
|
||||
self.assertEqual(interface.interface["port"], "161") # Default port for SNMP
|
||||
|
||||
def test_get_context_with_interface_type_and_port(self):
|
||||
"""Test get_context with both interface_type and port."""
|
||||
context = {"zabbix": {"interface_type": 1, "interface_port": "12345"}}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
|
||||
# Should set type and specified port
|
||||
result = interface.get_context()
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(interface.interface["type"], 1)
|
||||
self.assertEqual(interface.interface["port"], "12345")
|
||||
|
||||
def test_set_default_port(self):
|
||||
"""Test _set_default_port for different interface types."""
|
||||
interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
|
||||
# Test for agent type (1)
|
||||
interface.interface["type"] = 1
|
||||
interface._set_default_port() # pylint: disable=protected-access
|
||||
self.assertEqual(interface.interface["port"], "10050")
|
||||
|
||||
# Test for SNMP type (2)
|
||||
interface.interface["type"] = 2
|
||||
interface._set_default_port() # pylint: disable=protected-access
|
||||
self.assertEqual(interface.interface["port"], "161")
|
||||
|
||||
# Test for IPMI type (3)
|
||||
interface.interface["type"] = 3
|
||||
interface._set_default_port() # pylint: disable=protected-access
|
||||
self.assertEqual(interface.interface["port"], "623")
|
||||
|
||||
# Test for JMX type (4)
|
||||
interface.interface["type"] = 4
|
||||
interface._set_default_port() # pylint: disable=protected-access
|
||||
self.assertEqual(interface.interface["port"], "12345")
|
||||
|
||||
# Test for unsupported type
|
||||
interface.interface["type"] = 99
|
||||
result = interface._set_default_port() # pylint: disable=protected-access
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_set_snmp_v2(self):
|
||||
"""Test set_snmp with SNMPv2 configuration."""
|
||||
interface = ZabbixInterface(self.snmpv2_context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp
|
||||
interface.set_snmp()
|
||||
|
||||
# Check SNMP details
|
||||
self.assertEqual(interface.interface["details"]["version"], "2")
|
||||
self.assertEqual(interface.interface["details"]["community"], "public")
|
||||
self.assertEqual(interface.interface["details"]["bulk"], "1")
|
||||
|
||||
def test_set_snmp_v3(self):
|
||||
"""Test set_snmp with SNMPv3 configuration."""
|
||||
interface = ZabbixInterface(self.snmpv3_context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp
|
||||
interface.set_snmp()
|
||||
|
||||
# Check SNMP details
|
||||
self.assertEqual(interface.interface["details"]["version"], "3")
|
||||
self.assertEqual(interface.interface["details"]["securityname"], "snmpuser")
|
||||
self.assertEqual(interface.interface["details"]["securitylevel"], "authPriv")
|
||||
self.assertEqual(interface.interface["details"]["authprotocol"], "SHA")
|
||||
self.assertEqual(interface.interface["details"]["authpassphrase"], "authpass123")
|
||||
self.assertEqual(interface.interface["details"]["privprotocol"], "AES")
|
||||
self.assertEqual(interface.interface["details"]["privpassphrase"], "privpass123")
|
||||
self.assertEqual(interface.interface["details"]["contextname"], "context1")
|
||||
|
||||
def test_set_snmp_no_snmp_config(self):
|
||||
"""Test set_snmp with missing SNMP configuration."""
|
||||
# Create context with interface type but no SNMP config
|
||||
context = {"zabbix": {"interface_type": 2}}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp - should raise exception
|
||||
with self.assertRaises(InterfaceConfigError):
|
||||
interface.set_snmp()
|
||||
|
||||
def test_set_snmp_unsupported_version(self):
|
||||
"""Test set_snmp with unsupported SNMP version."""
|
||||
# Create context with invalid SNMP version
|
||||
context = {
|
||||
"zabbix": {
|
||||
"interface_type": 2,
|
||||
"snmp": {
|
||||
"version": 4 # Invalid version
|
||||
}
|
||||
}
|
||||
}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp - should raise exception
|
||||
with self.assertRaises(InterfaceConfigError):
|
||||
interface.set_snmp()
|
||||
|
||||
def test_set_snmp_no_version(self):
|
||||
"""Test set_snmp with missing SNMP version."""
|
||||
# Create context without SNMP version
|
||||
context = {
|
||||
"zabbix": {
|
||||
"interface_type": 2,
|
||||
"snmp": {
|
||||
"community": "public" # No version specified
|
||||
}
|
||||
}
|
||||
}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp - should raise exception
|
||||
with self.assertRaises(InterfaceConfigError):
|
||||
interface.set_snmp()
|
||||
|
||||
def test_set_snmp_non_snmp_interface(self):
|
||||
"""Test set_snmp with non-SNMP interface type."""
|
||||
interface = ZabbixInterface(self.agent_context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp - should raise exception
|
||||
with self.assertRaises(InterfaceConfigError):
|
||||
interface.set_snmp()
|
||||
|
||||
def test_set_default_snmp(self):
|
||||
"""Test set_default_snmp method."""
|
||||
interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
interface.set_default_snmp()
|
||||
|
||||
# Check interface properties
|
||||
self.assertEqual(interface.interface["type"], "2")
|
||||
self.assertEqual(interface.interface["port"], "161")
|
||||
self.assertEqual(interface.interface["details"]["version"], "2")
|
||||
self.assertEqual(interface.interface["details"]["community"], "{$SNMP_COMMUNITY}")
|
||||
self.assertEqual(interface.interface["details"]["bulk"], "1")
|
||||
|
||||
def test_set_default_agent(self):
|
||||
"""Test set_default_agent method."""
|
||||
interface = ZabbixInterface(self.empty_context, self.test_ip)
|
||||
interface.set_default_agent()
|
||||
|
||||
# Check interface properties
|
||||
self.assertEqual(interface.interface["type"], "1")
|
||||
self.assertEqual(interface.interface["port"], "10050")
|
||||
|
||||
def test_snmpv2_no_community(self):
|
||||
"""Test SNMPv2 with no community string specified."""
|
||||
# Create context with SNMPv2 but no community
|
||||
context = {
|
||||
"zabbix": {
|
||||
"interface_type": 2,
|
||||
"snmp": {
|
||||
"version": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
interface = ZabbixInterface(context, self.test_ip)
|
||||
interface.get_context() # Set the interface type
|
||||
|
||||
# Call set_snmp
|
||||
interface.set_snmp()
|
||||
|
||||
# Should use default community string
|
||||
self.assertEqual(interface.interface["details"]["community"], "{$SNMP_COMMUNITY}")
|
||||
137
tests/test_list_hostgroup_formats.py
Normal file
137
tests/test_list_hostgroup_formats.py
Normal file
@@ -0,0 +1,137 @@
|
||||
"""Tests for list-based hostgroup formats in configuration."""
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from modules.hostgroups import Hostgroup
|
||||
from modules.exceptions import HostgroupError
|
||||
from modules.tools import verify_hg_format
|
||||
|
||||
|
||||
class TestListHostgroupFormats(unittest.TestCase):
|
||||
"""Test class for list-based hostgroup format functionality."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures."""
|
||||
# Create mock logger
|
||||
self.mock_logger = MagicMock()
|
||||
|
||||
# Create mock device
|
||||
self.mock_device = MagicMock()
|
||||
self.mock_device.name = "test-device"
|
||||
|
||||
# Set up site information
|
||||
site = MagicMock()
|
||||
site.name = "TestSite"
|
||||
|
||||
# Set up region information
|
||||
region = MagicMock()
|
||||
region.name = "TestRegion"
|
||||
region.__str__.return_value = "TestRegion"
|
||||
site.region = region
|
||||
|
||||
# Set device site
|
||||
self.mock_device.site = site
|
||||
|
||||
# Set up role information
|
||||
self.mock_device_role = MagicMock()
|
||||
self.mock_device_role.name = "TestRole"
|
||||
self.mock_device_role.__str__.return_value = "TestRole"
|
||||
self.mock_device.role = self.mock_device_role
|
||||
|
||||
# Set up rack information
|
||||
rack = MagicMock()
|
||||
rack.name = "TestRack"
|
||||
self.mock_device.rack = rack
|
||||
|
||||
# Set up platform information
|
||||
platform = MagicMock()
|
||||
platform.name = "TestPlatform"
|
||||
self.mock_device.platform = platform
|
||||
|
||||
# Device-specific properties
|
||||
device_type = MagicMock()
|
||||
manufacturer = MagicMock()
|
||||
manufacturer.name = "TestManufacturer"
|
||||
device_type.manufacturer = manufacturer
|
||||
self.mock_device.device_type = device_type
|
||||
|
||||
# Create mock VM
|
||||
self.mock_vm = MagicMock()
|
||||
self.mock_vm.name = "test-vm"
|
||||
|
||||
# Reuse site from device
|
||||
self.mock_vm.site = site
|
||||
|
||||
# Set up role for VM
|
||||
self.mock_vm.role = self.mock_device_role
|
||||
|
||||
# Set up platform for VM
|
||||
self.mock_vm.platform = platform
|
||||
|
||||
# VM-specific properties
|
||||
cluster = MagicMock()
|
||||
cluster.name = "TestCluster"
|
||||
cluster_type = MagicMock()
|
||||
cluster_type.name = "TestClusterType"
|
||||
cluster.type = cluster_type
|
||||
self.mock_vm.cluster = cluster
|
||||
|
||||
def test_verify_list_based_hostgroup_format(self):
|
||||
"""Test verification of list-based hostgroup formats."""
|
||||
# List format with valid items
|
||||
valid_format = ["region", "site", "rack"]
|
||||
|
||||
# List format with nested path
|
||||
valid_nested_format = ["region", "site/rack"]
|
||||
|
||||
# List format with invalid item
|
||||
invalid_format = ["region", "invalid_item", "rack"]
|
||||
|
||||
# Should not raise exception for valid formats
|
||||
verify_hg_format(valid_format, hg_type="dev", logger=self.mock_logger)
|
||||
verify_hg_format(valid_nested_format, hg_type="dev", logger=self.mock_logger)
|
||||
|
||||
# Should raise exception for invalid format
|
||||
with self.assertRaises(HostgroupError):
|
||||
verify_hg_format(invalid_format, hg_type="dev", logger=self.mock_logger)
|
||||
|
||||
def test_simulate_hostgroup_generation_from_config(self):
|
||||
"""Simulate how the main script would generate hostgroups from list-based config."""
|
||||
# Mock configuration with list-based hostgroup format
|
||||
config_format = ["region", "site", "rack"]
|
||||
hostgroup = Hostgroup("dev", self.mock_device, "4.0", self.mock_logger)
|
||||
|
||||
# Simulate the main script's hostgroup generation process
|
||||
hostgroups = []
|
||||
for fmt in config_format:
|
||||
result = hostgroup.generate(fmt)
|
||||
if result:
|
||||
hostgroups.append(result)
|
||||
|
||||
# Check results
|
||||
self.assertEqual(len(hostgroups), 3)
|
||||
self.assertIn("TestRegion", hostgroups)
|
||||
self.assertIn("TestSite", hostgroups)
|
||||
self.assertIn("TestRack", hostgroups)
|
||||
|
||||
def test_vm_hostgroup_format_from_config(self):
|
||||
"""Test VM hostgroup generation with list-based format."""
|
||||
# Mock VM configuration with mixed format
|
||||
config_format = ["platform", "role", "cluster_type/cluster"]
|
||||
hostgroup = Hostgroup("vm", self.mock_vm, "4.0", self.mock_logger)
|
||||
|
||||
# Simulate the main script's hostgroup generation process
|
||||
hostgroups = []
|
||||
for fmt in config_format:
|
||||
result = hostgroup.generate(fmt)
|
||||
if result:
|
||||
hostgroups.append(result)
|
||||
|
||||
# Check results
|
||||
self.assertEqual(len(hostgroups), 3)
|
||||
self.assertIn("TestPlatform", hostgroups)
|
||||
self.assertIn("TestRole", hostgroups)
|
||||
self.assertIn("TestClusterType/TestCluster", hostgroups)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
429
tests/test_physical_device.py
Normal file
429
tests/test_physical_device.py
Normal file
@@ -0,0 +1,429 @@
|
||||
"""Tests for the PhysicalDevice class in the device module."""
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from modules.device import PhysicalDevice
|
||||
from modules.exceptions import TemplateError, SyncInventoryError
|
||||
|
||||
|
||||
class TestPhysicalDevice(unittest.TestCase):
|
||||
"""Test class for PhysicalDevice functionality."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures."""
|
||||
# Create mock NetBox device
|
||||
self.mock_nb_device = MagicMock()
|
||||
self.mock_nb_device.id = 123
|
||||
self.mock_nb_device.name = "test-device"
|
||||
self.mock_nb_device.status.label = "Active"
|
||||
self.mock_nb_device.custom_fields = {"zabbix_hostid": None}
|
||||
self.mock_nb_device.config_context = {}
|
||||
|
||||
# Set up a primary IP
|
||||
primary_ip = MagicMock()
|
||||
primary_ip.address = "192.168.1.1/24"
|
||||
self.mock_nb_device.primary_ip = primary_ip
|
||||
|
||||
# Create mock Zabbix API
|
||||
self.mock_zabbix = MagicMock()
|
||||
self.mock_zabbix.version = "6.0"
|
||||
|
||||
# Mock NetBox journal class
|
||||
self.mock_nb_journal = MagicMock()
|
||||
|
||||
# Create logger mock
|
||||
self.mock_logger = MagicMock()
|
||||
|
||||
# Create PhysicalDevice instance with mocks
|
||||
with patch('modules.device.config',
|
||||
{"device_cf": "zabbix_hostid",
|
||||
"template_cf": "zabbix_template",
|
||||
"templates_config_context": False,
|
||||
"templates_config_context_overrule": False,
|
||||
"traverse_regions": False,
|
||||
"traverse_site_groups": False,
|
||||
"inventory_mode": "disabled",
|
||||
"inventory_sync": False,
|
||||
"device_inventory_map": {}
|
||||
}):
|
||||
self.device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
journal=True,
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
def test_init(self):
|
||||
"""Test the initialization of the PhysicalDevice class."""
|
||||
# Check that basic properties are set correctly
|
||||
self.assertEqual(self.device.name, "test-device")
|
||||
self.assertEqual(self.device.id, 123)
|
||||
self.assertEqual(self.device.status, "Active")
|
||||
self.assertEqual(self.device.ip, "192.168.1.1")
|
||||
self.assertEqual(self.device.cidr, "192.168.1.1/24")
|
||||
|
||||
def test_init_no_primary_ip(self):
|
||||
"""Test initialization when device has no primary IP."""
|
||||
# Set primary_ip to None
|
||||
self.mock_nb_device.primary_ip = None
|
||||
|
||||
# Creating device should raise SyncInventoryError
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
with self.assertRaises(SyncInventoryError):
|
||||
PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
def test_set_basics_with_special_characters(self):
|
||||
"""Test _setBasics when device name contains special characters."""
|
||||
# Set name with special characters that
|
||||
# will actually trigger the special character detection
|
||||
self.mock_nb_device.name = "test-devïce"
|
||||
|
||||
# We need to patch the search function to simulate finding special characters
|
||||
with patch('modules.device.search') as mock_search, \
|
||||
patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
# Make the search function return True to simulate special characters
|
||||
mock_search.return_value = True
|
||||
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# With the mocked search function, the name should be changed to NETBOX_ID format
|
||||
self.assertEqual(device.name, f"NETBOX_ID{self.mock_nb_device.id}")
|
||||
# And visible_name should be set to the original name
|
||||
self.assertEqual(device.visible_name, "test-devïce")
|
||||
# use_visible_name flag should be set
|
||||
self.assertTrue(device.use_visible_name)
|
||||
|
||||
def test_get_templates_context(self):
|
||||
"""Test get_templates_context with valid config."""
|
||||
# Set up config_context with valid template data
|
||||
self.mock_nb_device.config_context = {
|
||||
"zabbix": {
|
||||
"templates": ["Template1", "Template2"]
|
||||
}
|
||||
}
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Test that templates are returned correctly
|
||||
templates = device.get_templates_context()
|
||||
self.assertEqual(templates, ["Template1", "Template2"])
|
||||
|
||||
def test_get_templates_context_with_string(self):
|
||||
"""Test get_templates_context with a string instead of list."""
|
||||
# Set up config_context with a string template
|
||||
self.mock_nb_device.config_context = {
|
||||
"zabbix": {
|
||||
"templates": "Template1"
|
||||
}
|
||||
}
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Test that template is wrapped in a list
|
||||
templates = device.get_templates_context()
|
||||
self.assertEqual(templates, ["Template1"])
|
||||
|
||||
def test_get_templates_context_no_zabbix_key(self):
|
||||
"""Test get_templates_context when zabbix key is missing."""
|
||||
# Set up config_context without zabbix key
|
||||
self.mock_nb_device.config_context = {}
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Test that TemplateError is raised
|
||||
with self.assertRaises(TemplateError):
|
||||
device.get_templates_context()
|
||||
|
||||
def test_get_templates_context_no_templates_key(self):
|
||||
"""Test get_templates_context when templates key is missing."""
|
||||
# Set up config_context without templates key
|
||||
self.mock_nb_device.config_context = {"zabbix": {}}
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Test that TemplateError is raised
|
||||
with self.assertRaises(TemplateError):
|
||||
device.get_templates_context()
|
||||
|
||||
def test_set_template_with_config_context(self):
|
||||
"""Test set_template with templates_config_context=True."""
|
||||
# Set up config_context with templates
|
||||
self.mock_nb_device.config_context = {
|
||||
"zabbix": {
|
||||
"templates": ["Template1"]
|
||||
}
|
||||
}
|
||||
|
||||
# Mock get_templates_context to return expected templates
|
||||
with patch.object(PhysicalDevice, 'get_templates_context', return_value=["Template1"]):
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call set_template with prefer_config_context=True
|
||||
result = device.set_template(prefer_config_context=True, overrule_custom=False)
|
||||
|
||||
# Check result and template names
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(device.zbx_template_names, ["Template1"])
|
||||
|
||||
def test_set_inventory_disabled_mode(self):
|
||||
"""Test set_inventory with inventory_mode=disabled."""
|
||||
# Configure with disabled inventory mode
|
||||
config_patch = {
|
||||
"device_cf": "zabbix_hostid",
|
||||
"inventory_mode": "disabled",
|
||||
"inventory_sync": False
|
||||
}
|
||||
|
||||
with patch('modules.device.config', config_patch):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call set_inventory with the config patch still active
|
||||
with patch('modules.device.config', config_patch):
|
||||
result = device.set_inventory({})
|
||||
|
||||
# Check result
|
||||
self.assertTrue(result)
|
||||
# Default value for disabled inventory
|
||||
self.assertEqual(device.inventory_mode, -1)
|
||||
|
||||
def test_set_inventory_manual_mode(self):
|
||||
"""Test set_inventory with inventory_mode=manual."""
|
||||
# Configure with manual inventory mode
|
||||
config_patch = {
|
||||
"device_cf": "zabbix_hostid",
|
||||
"inventory_mode": "manual",
|
||||
"inventory_sync": False
|
||||
}
|
||||
|
||||
with patch('modules.device.config', config_patch):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call set_inventory with the config patch still active
|
||||
with patch('modules.device.config', config_patch):
|
||||
result = device.set_inventory({})
|
||||
|
||||
# Check result
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(device.inventory_mode, 0) # Manual mode
|
||||
|
||||
def test_set_inventory_automatic_mode(self):
|
||||
"""Test set_inventory with inventory_mode=automatic."""
|
||||
# Configure with automatic inventory mode
|
||||
config_patch = {
|
||||
"device_cf": "zabbix_hostid",
|
||||
"inventory_mode": "automatic",
|
||||
"inventory_sync": False
|
||||
}
|
||||
|
||||
with patch('modules.device.config', config_patch):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call set_inventory with the config patch still active
|
||||
with patch('modules.device.config', config_patch):
|
||||
result = device.set_inventory({})
|
||||
|
||||
# Check result
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(device.inventory_mode, 1) # Automatic mode
|
||||
|
||||
def test_set_inventory_with_inventory_sync(self):
|
||||
"""Test set_inventory with inventory_sync=True."""
|
||||
# Configure with inventory sync enabled
|
||||
config_patch = {
|
||||
"device_cf": "zabbix_hostid",
|
||||
"inventory_mode": "manual",
|
||||
"inventory_sync": True,
|
||||
"device_inventory_map": {
|
||||
"name": "name",
|
||||
"serial": "serialno_a"
|
||||
}
|
||||
}
|
||||
|
||||
with patch('modules.device.config', config_patch):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Create a mock device with the required attributes
|
||||
mock_device_data = {
|
||||
"name": "test-device",
|
||||
"serial": "ABC123"
|
||||
}
|
||||
|
||||
# Call set_inventory with the config patch still active
|
||||
with patch('modules.device.config', config_patch):
|
||||
result = device.set_inventory(mock_device_data)
|
||||
|
||||
# Check result
|
||||
self.assertTrue(result)
|
||||
self.assertEqual(device.inventory_mode, 0) # Manual mode
|
||||
self.assertEqual(device.inventory, {
|
||||
"name": "test-device",
|
||||
"serialno_a": "ABC123"
|
||||
})
|
||||
|
||||
def test_iscluster_true(self):
|
||||
"""Test isCluster when device is part of a cluster."""
|
||||
# Set up virtual_chassis
|
||||
self.mock_nb_device.virtual_chassis = MagicMock()
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Check isCluster result
|
||||
self.assertTrue(device.isCluster())
|
||||
|
||||
def test_is_cluster_false(self):
|
||||
"""Test isCluster when device is not part of a cluster."""
|
||||
# Set virtual_chassis to None
|
||||
self.mock_nb_device.virtual_chassis = None
|
||||
|
||||
# Create device with the updated mock
|
||||
with patch('modules.device.config', {"device_cf": "zabbix_hostid"}):
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Check isCluster result
|
||||
self.assertFalse(device.isCluster())
|
||||
|
||||
|
||||
def test_promote_master_device_primary(self):
|
||||
"""Test promoteMasterDevice when device is primary in cluster."""
|
||||
# Set up virtual chassis with master device
|
||||
mock_vc = MagicMock()
|
||||
mock_vc.name = "virtual-chassis-1"
|
||||
mock_master = MagicMock()
|
||||
mock_master.id = self.mock_nb_device.id # Set master ID to match the current device
|
||||
mock_vc.master = mock_master
|
||||
self.mock_nb_device.virtual_chassis = mock_vc
|
||||
|
||||
# Create device with the updated mock
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call promoteMasterDevice and check the result
|
||||
result = device.promoteMasterDevice()
|
||||
|
||||
# Should return True for primary device
|
||||
self.assertTrue(result)
|
||||
# Device name should be updated to virtual chassis name
|
||||
self.assertEqual(device.name, "virtual-chassis-1")
|
||||
|
||||
|
||||
def test_promote_master_device_secondary(self):
|
||||
"""Test promoteMasterDevice when device is secondary in cluster."""
|
||||
# Set up virtual chassis with a different master device
|
||||
mock_vc = MagicMock()
|
||||
mock_vc.name = "virtual-chassis-1"
|
||||
mock_master = MagicMock()
|
||||
mock_master.id = self.mock_nb_device.id + 1 # Different ID than the current device
|
||||
mock_vc.master = mock_master
|
||||
self.mock_nb_device.virtual_chassis = mock_vc
|
||||
|
||||
# Create device with the updated mock
|
||||
device = PhysicalDevice(
|
||||
self.mock_nb_device,
|
||||
self.mock_zabbix,
|
||||
self.mock_nb_journal,
|
||||
"3.0",
|
||||
logger=self.mock_logger
|
||||
)
|
||||
|
||||
# Call promoteMasterDevice and check the result
|
||||
result = device.promoteMasterDevice()
|
||||
|
||||
# Should return False for secondary device
|
||||
self.assertFalse(result)
|
||||
# Device name should not be modified
|
||||
self.assertEqual(device.name, "test-device")
|
||||
62
tests/test_tools.py
Normal file
62
tests/test_tools.py
Normal file
@@ -0,0 +1,62 @@
|
||||
from modules.tools import sanatize_log_output
|
||||
|
||||
def test_sanatize_log_output_secrets():
|
||||
data = {
|
||||
"macros": [
|
||||
{"macro": "{$SECRET}", "type": "1", "value": "supersecret"},
|
||||
{"macro": "{$PLAIN}", "type": "0", "value": "notsecret"},
|
||||
]
|
||||
}
|
||||
sanitized = sanatize_log_output(data)
|
||||
assert sanitized["macros"][0]["value"] == "********"
|
||||
assert sanitized["macros"][1]["value"] == "notsecret"
|
||||
|
||||
def test_sanatize_log_output_interface_secrets():
|
||||
data = {
|
||||
"interfaceid": 123,
|
||||
"details": {
|
||||
"authpassphrase": "supersecret",
|
||||
"privpassphrase": "anothersecret",
|
||||
"securityname": "sensitiveuser",
|
||||
"community": "public",
|
||||
"other": "normalvalue"
|
||||
}
|
||||
}
|
||||
sanitized = sanatize_log_output(data)
|
||||
# Sensitive fields should be sanitized
|
||||
assert sanitized["details"]["authpassphrase"] == "********"
|
||||
assert sanitized["details"]["privpassphrase"] == "********"
|
||||
assert sanitized["details"]["securityname"] == "********"
|
||||
# Non-sensitive fields should remain
|
||||
assert sanitized["details"]["community"] == "********"
|
||||
assert sanitized["details"]["other"] == "normalvalue"
|
||||
# interfaceid should be removed
|
||||
assert "interfaceid" not in sanitized
|
||||
|
||||
def test_sanatize_log_output_interface_macros():
|
||||
data = {
|
||||
"interfaceid": 123,
|
||||
"details": {
|
||||
"authpassphrase": "{$SECRET_MACRO}",
|
||||
"privpassphrase": "{$SECRET_MACRO}",
|
||||
"securityname": "{$USER_MACRO}",
|
||||
"community": "{$SNNMP_COMMUNITY}",
|
||||
}
|
||||
}
|
||||
sanitized = sanatize_log_output(data)
|
||||
# Macro values should not be sanitized
|
||||
assert sanitized["details"]["authpassphrase"] == "{$SECRET_MACRO}"
|
||||
assert sanitized["details"]["privpassphrase"] == "{$SECRET_MACRO}"
|
||||
assert sanitized["details"]["securityname"] == "{$USER_MACRO}"
|
||||
assert sanitized["details"]["community"] == "{$SNNMP_COMMUNITY}"
|
||||
assert "interfaceid" not in sanitized
|
||||
|
||||
def test_sanatize_log_output_plain_data():
|
||||
data = {"foo": "bar", "baz": 123}
|
||||
sanitized = sanatize_log_output(data)
|
||||
assert sanitized == data
|
||||
|
||||
def test_sanatize_log_output_non_dict():
|
||||
data = [1, 2, 3]
|
||||
sanitized = sanatize_log_output(data)
|
||||
assert sanitized == data
|
||||
125
tests/test_usermacros.py
Normal file
125
tests/test_usermacros.py
Normal file
@@ -0,0 +1,125 @@
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from modules.device import PhysicalDevice
|
||||
from modules.usermacros import ZabbixUsermacros
|
||||
|
||||
class DummyNB:
|
||||
def __init__(self, name="dummy", config_context=None, **kwargs):
|
||||
self.name = name
|
||||
self.config_context = config_context or {}
|
||||
for k, v in kwargs.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
def __getitem__(self, key):
|
||||
# Allow dict-style access for test compatibility
|
||||
if hasattr(self, key):
|
||||
return getattr(self, key)
|
||||
if key in self.config_context:
|
||||
return self.config_context[key]
|
||||
raise KeyError(key)
|
||||
|
||||
class TestUsermacroSync(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.nb = DummyNB(serial="1234")
|
||||
self.logger = MagicMock()
|
||||
self.usermacro_map = {"serial": "{$HW_SERIAL}"}
|
||||
|
||||
@patch("modules.device.config", {"usermacro_sync": False})
|
||||
def test_usermacro_sync_false(self):
|
||||
device = PhysicalDevice.__new__(PhysicalDevice)
|
||||
device.nb = self.nb
|
||||
device.logger = self.logger
|
||||
device.name = "dummy"
|
||||
device._usermacro_map = MagicMock(return_value=self.usermacro_map)
|
||||
# call set_usermacros
|
||||
result = device.set_usermacros()
|
||||
self.assertEqual(device.usermacros, [])
|
||||
self.assertTrue(result is True or result is None)
|
||||
|
||||
@patch("modules.device.config", {"usermacro_sync": True})
|
||||
def test_usermacro_sync_true(self):
|
||||
device = PhysicalDevice.__new__(PhysicalDevice)
|
||||
device.nb = self.nb
|
||||
device.logger = self.logger
|
||||
device.name = "dummy"
|
||||
device._usermacro_map = MagicMock(return_value=self.usermacro_map)
|
||||
result = device.set_usermacros()
|
||||
self.assertIsInstance(device.usermacros, list)
|
||||
self.assertGreater(len(device.usermacros), 0)
|
||||
|
||||
@patch("modules.device.config", {"usermacro_sync": "full"})
|
||||
def test_usermacro_sync_full(self):
|
||||
device = PhysicalDevice.__new__(PhysicalDevice)
|
||||
device.nb = self.nb
|
||||
device.logger = self.logger
|
||||
device.name = "dummy"
|
||||
device._usermacro_map = MagicMock(return_value=self.usermacro_map)
|
||||
result = device.set_usermacros()
|
||||
self.assertIsInstance(device.usermacros, list)
|
||||
self.assertGreater(len(device.usermacros), 0)
|
||||
|
||||
class TestZabbixUsermacros(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.nb = DummyNB()
|
||||
self.logger = MagicMock()
|
||||
|
||||
def test_validate_macro_valid(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
self.assertTrue(macros.validate_macro("{$TEST_MACRO}"))
|
||||
self.assertTrue(macros.validate_macro("{$A1_2.3}"))
|
||||
self.assertTrue(macros.validate_macro("{$FOO:bar}"))
|
||||
|
||||
def test_validate_macro_invalid(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
self.assertFalse(macros.validate_macro("$TEST_MACRO"))
|
||||
self.assertFalse(macros.validate_macro("{TEST_MACRO}"))
|
||||
self.assertFalse(macros.validate_macro("{$test}")) # lower-case not allowed
|
||||
self.assertFalse(macros.validate_macro(""))
|
||||
|
||||
def test_render_macro_dict(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
macro = macros.render_macro("{$FOO}", {"value": "bar", "type": "secret", "description": "desc"})
|
||||
self.assertEqual(macro["macro"], "{$FOO}")
|
||||
self.assertEqual(macro["value"], "bar")
|
||||
self.assertEqual(macro["type"], "1")
|
||||
self.assertEqual(macro["description"], "desc")
|
||||
|
||||
def test_render_macro_dict_missing_value(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
result = macros.render_macro("{$FOO}", {"type": "text"})
|
||||
self.assertFalse(result)
|
||||
self.logger.info.assert_called()
|
||||
|
||||
def test_render_macro_str(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
macro = macros.render_macro("{$FOO}", "bar")
|
||||
self.assertEqual(macro["macro"], "{$FOO}")
|
||||
self.assertEqual(macro["value"], "bar")
|
||||
self.assertEqual(macro["type"], "0")
|
||||
self.assertEqual(macro["description"], "")
|
||||
|
||||
def test_render_macro_invalid_name(self):
|
||||
macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger)
|
||||
result = macros.render_macro("FOO", "bar")
|
||||
self.assertFalse(result)
|
||||
self.logger.warning.assert_called()
|
||||
|
||||
def test_generate_from_map(self):
|
||||
nb = DummyNB(memory="bar", role="baz")
|
||||
usermacro_map = {"memory": "{$FOO}", "role": "{$BAR}"}
|
||||
macros = ZabbixUsermacros(nb, usermacro_map, True, logger=self.logger)
|
||||
result = macros.generate()
|
||||
self.assertEqual(len(result), 2)
|
||||
self.assertEqual(result[0]["macro"], "{$FOO}")
|
||||
self.assertEqual(result[1]["macro"], "{$BAR}")
|
||||
|
||||
def test_generate_from_config_context(self):
|
||||
config_context = {"zabbix": {"usermacros": {"{$FOO}": {"value": "bar"}}}}
|
||||
nb = DummyNB(config_context=config_context)
|
||||
macros = ZabbixUsermacros(nb, {}, True, logger=self.logger)
|
||||
result = macros.generate()
|
||||
self.assertEqual(len(result), 1)
|
||||
self.assertEqual(result[0]["macro"], "{$FOO}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user