diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..99322f6 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,22 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/python +{ + "name": "Python 3", + // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile + "image": "mcr.microsoft.com/devcontainers/python:1-3.12-bullseye", + + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Use 'postCreateCommand' to run commands after the container is created. + "postCreateCommand": "pip3 install --user -r requirements.txt && pip3 install --user pylint pytest" + + // Configure tool-specific properties. + // "customizations": {}, + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "root" +} diff --git a/.github/workflows/publish-image.yml b/.github/workflows/publish-image.yml index 615b784..c18dc39 100644 --- a/.github/workflows/publish-image.yml +++ b/.github/workflows/publish-image.yml @@ -1,14 +1,10 @@ +--- name: Build and Push Docker Image -permissions: - contents: read - packages: write - on: - release: - types: [published] - pull_request: - types: [opened, synchronize] + push: + branches: + - main jobs: test_quality: diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml index 7b01f6f..af7cc25 100644 --- a/.github/workflows/quality.yml +++ b/.github/workflows/quality.yml @@ -1,15 +1,16 @@ --- name: Pylint Quality control -on: - workflow_call +on: + push: + pull_request: jobs: - build: + python_quality_testing: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.11","3.12"] + python-version: ["3.12","3.13"] steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} @@ -23,4 +24,4 @@ jobs: pip install -r requirements.txt - name: Analysing the code with pylint run: | - pylint --module-naming-style=any $(git ls-files '*.py') + pylint --module-naming-style=any modules/* netbox_zabbix_sync.py diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml new file mode 100644 index 0000000..db4456e --- /dev/null +++ b/.github/workflows/run_tests.yml @@ -0,0 +1,34 @@ +--- +name: Pytest code testing + +on: + push: + pull_request: + +jobs: + test_code: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: 3.12 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pytest pytest-mock + pip install -r requirements.txt + - name: Install coverage + run: pip install coverage pytest-cov + - name: Testing the code with PyTest + run: | + cp config.py.example config.py + pytest tests + - name: Run tests with coverage + run: | + cp config.py.example config.py + coverage run -m pytest tests + - name: Check coverage percentage + run: | + coverage report --fail-under=60 diff --git a/.gitignore b/.gitignore index 2a3448b..c515fe3 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,6 @@ Pipfile.lock # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] +.vscode +.flake +.coverage \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index c3bb81e..b7b9c68 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,6 @@ # syntax=docker/dockerfile:1 FROM python:3.12-alpine -LABEL org.opencontainers.image.source=https://github.com/TheNetworkGuy/netbox-zabbix-sync -LABEL org.opencontainers.image.title="NetBox-Zabbix-Sync" -LABEL org.opencontainers.image.description="Python script to synchronise NetBox devices to Zabbix." -LABEL org.opencontainers.image.documentation=https://github.com/TheNetworkGuy/netbox-zabbix-sync/ -LABEL org.opencontainers.image.licenses=MIT -LABEL org.opencontainers.image.authors="Twan Kamans" +RUN mkdir -p /opt/netbox-zabbix && chown -R 1000:1000 /opt/netbox-zabbix RUN mkdir -p /opt/netbox-zabbix RUN addgroup -g 1000 -S netbox-zabbix && adduser -u 1000 -S netbox-zabbix -G netbox-zabbix @@ -18,6 +13,8 @@ COPY --chown=1000:1000 . /opt/netbox-zabbix USER 1000:1000 RUN if ! [ -f ./config.py ]; then cp ./config.py.example ./config.py; fi +USER root RUN pip install -r ./requirements.txt +USER 1000:1000 ENTRYPOINT ["python"] CMD ["/opt/netbox-zabbix/netbox_zabbix_sync.py", "-v"] diff --git a/README.md b/README.md index a938572..72291e2 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # NetBox to Zabbix synchronization -A script to create, update and delete Zabbix hosts using NetBox device objects. +A script to create, update and delete Zabbix hosts using NetBox device objects. Tested and compatible with all [currently supported Zabbix releases](https://www.zabbix.com/life_cycle_and_release_policy). ## Installation via Docker @@ -23,10 +23,10 @@ docker run -d -t -i -e ZABBIX_HOST='https://zabbix.local' \ --name netbox-zabbix-sync ghcr.io/thenetworkguy/netbox-zabbix-sync:main ``` -This should run a one-time sync, you can check the sync with +This should run a one-time sync. You can check the sync with `docker logs netbox-zabbix-sync`. -The image uses the default `config.py` for it's configuration, you can use a +The image uses the default `config.py` for its configuration, you can use a volume mount in the docker run command to override with your own config file if needed (see [config file](#config-file)): @@ -48,9 +48,15 @@ Make sure that you have a python environment with the following packages installed. You can also use the `requirements.txt` file for installation with pip. -``` +```sh +# Packages: pynetbox pyzabbix + +# Install them through requirements.txt from a venv: +virtualenv .venv +source .venv/bin/activate +.venv/bin/pip --require-virtualenv install -r requirements.txt ``` ### Config file @@ -58,7 +64,7 @@ pyzabbix First time user? Copy the `config.py.example` file to `config.py`. This file is used for modifying filters and setting variables such as custom field names. -``` +```sh cp config.py.example config.py ``` @@ -84,8 +90,8 @@ ZABBIX_TOKEN=othersecrettoken If you are using custom SSL certificates for NetBox and/or Zabbix, you can set the following environment variable to the path of your CA bundle file: -```bash -REQUEST_CA_BUNDLE=/path/to/your/ca-bundle.crt +```sh +export REQUESTS_CA_BUNDLE=/path/to/your/ca-bundle.crt ``` ### NetBox custom fields @@ -112,8 +118,8 @@ the template information then the zabbix_template field is not required): You can make the `zabbix_hostid` field hidden or read-only to prevent human intervention. -This is optional and there is a use case for leaving it read-write in the UI to -manually change the ID. For example to re-run a sync. +This is optional, but there may be cases where you want to leave it +read-write in the UI. For example to manually change or clear the ID and re-run a sync. ## Virtual Machine (VM) Syncing @@ -140,7 +146,7 @@ creation for devices in a new category. I would recommend setting this variable to `True` since leaving it on `False` results in a lot of manual work. The format can be set with the `hostgroup_format` variable for devices and -`vm_hostgroup_format` for devices. +`vm_hostgroup_format` for virtual machines. Any nested parent hostgroups will also be created automatically. For instance the region `Berlin` with parent region `Germany` will create the hostgroup @@ -184,10 +190,10 @@ used: | cluster | VM cluster name | | device | parent device | -You can specify the value seperated by a "/" like so: +You can specify the value separated by a "/" like so: ```python -hostgroup_format = "tenant/site/dev_location/role" +hostgroup_format = "tenant/site/location/role" ``` You can also provice a list of groups like so: @@ -241,7 +247,7 @@ have a relationship with a tenant. - Site: HQ-AMS ```python -hostgroup_format = "site/tenant/device_role" +hostgroup_format = "site/tenant/role" ``` When running the script like above, the following hostgroup (HG) will be @@ -314,7 +320,7 @@ device_inventory_map = {"custom_fields/mycustomfield/name": "alias"} vm_inventory_map = {"custom_fields/mycustomfield/name": "alias"} ``` -See `config.py.example` for an extensive example map. Any Zabix Inventory fields +See `config.py.example` for an extensive example map. Any Zabbix Inventory fields that are not included in the map will not be touched by the script, so you can safely add manual values or use items to automatically add values to other fields. @@ -369,7 +375,7 @@ SLA calculations and event correlation. Tags can be synced from the following sources: 1. NetBox device/vm tags -2. NetBox config ontext +2. NetBox config context 3. NetBox fields Syncing tags will override any tags that were set manually on the host, @@ -387,10 +393,10 @@ tag_lower = True #### Device tags As NetBox doesn't follow the tag/value pattern for tags, we will need a tag -name set to register the netwbox tags. +name set to register the netbox tags. By default the tag name is "NetBox", but you can change this to whatever you want. -The value for the tag can be choosen from 'name', 'display' or 'slug'. +The value for the tag can be set to 'name', 'display', or 'slug', which refers to the property of the NetBox tag object that will be used as the value in Zabbix. ```python tag_name = 'NetBox' @@ -505,8 +511,8 @@ Examples: ``` Please be aware that secret usermacros are only synced _once_ by default. -This is the default behaviour because Zabbix API won't return the value of -secrets so the script cannot compare the values with the ones set in NetBox. +This is the default behavior because Zabbix API won't return the value of +secrets so the script cannot compare the values with those set in NetBox. If you update a secret usermacro value, just remove the value from the host in Zabbix and the new value will be synced during the next run. @@ -520,8 +526,8 @@ usermacro_sync = "full" This will force a full usermacro sync on every run on hosts that have secret usermacros set. That way, you will know for sure the secret values are always up to date. -Keep in mind that NetBox (and the log output of this script) will show your secrets -in plain text. If true secrecy is required, consider switching to +Keep in mind that NetBox will show your secrets in plain text. +If true secrecy is required, consider switching to [vault](https://www.zabbix.com/documentation/current/en/manual/config/macros/secret_macros#vault-secret) usermacros. @@ -603,7 +609,7 @@ You can set the proxy for a device using the 'proxy' key in config context. } ``` -It is now posible to specify proxy groups with the introduction of Proxy groups +It is now possible to specify proxy groups with the introduction of Proxy groups in Zabbix 7. Specifying a group in the config context on older Zabbix releases will have no impact and the script will ignore the statement. @@ -616,9 +622,9 @@ will have no impact and the script will ignore the statement. ``` The script will prefer groups when specifying both a proxy and group. This is -done with the assumption that groups are more resiliant and HA ready, making it +done with the assumption that groups are more resilient and HA ready, making it a more logical choice to use for proxy linkage. This also makes migrating from a -proxy to proxy group easier since the group take priority over the invidivual +proxy to proxy group easier since the group take priority over the individual proxy. ```json @@ -632,13 +638,7 @@ proxy. In the example above the host will use the group on Zabbix 7. On Zabbix 6 and below the host will use the proxy. Zabbix 7 will use the proxy value when -ommiting the proxy_group value. - -Because of the possible amount of destruction when setting up NetBox but -forgetting the proxy command, the sync works a bit different. By default -everything is synced except in a situation where the Zabbix host has a proxy -configured but nothing is configured in NetBox. To force deletion and a full -sync, set the `full_proxy_sync` variable in the config file. +omitting the proxy_group value. ### Set interface parameters within NetBox @@ -655,7 +655,7 @@ Due to Zabbix limitations of changing interface type with a linked template, changing the interface type from within NetBox is not supported and the script will generate an error. -For example when changing a SNMP interface to an Agent interface: +For example, when changing a SNMP interface to an Agent interface: ``` NetBox-Zabbix-sync - WARNING - Device: Interface OUT of sync. @@ -663,11 +663,11 @@ NetBox-Zabbix-sync - ERROR - Device: changing interface type to 1 is not support ``` To configure the interface parameters you'll need to use custom context. Custom -context was used to make this script as customizable as posible for each +context was used to make this script as customizable as possible for each environment. For example, you could: - Set the custom context directly on a device -- Set the custom context on a label, which you would add to a device (for +- Set the custom context on a tag, which you would add to a device (for instance, SNMPv3) - Set the custom context on a device role - Set the custom context on a site or region @@ -721,7 +721,7 @@ I would recommend using usermacros for sensitive data such as community strings since the data in NetBox is plain-text. > **_NOTE:_** Not all SNMP data is required for a working configuration. -> [The following parameters are allowed](https://www.zabbix.com/documentation/current/manual/api/reference/hostinterface/object#details_tag "The following parameters are allowed")but +> [The following parameters are allowed](https://www.zabbix.com/documentation/current/manual/api/reference/hostinterface/object#details_tag "The following parameters are allowed") but > are not all required, depending on your environment. diff --git a/modules/config.py b/modules/config.py new file mode 100644 index 0000000..9f97c83 --- /dev/null +++ b/modules/config.py @@ -0,0 +1,121 @@ +""" +Module for parsing configuration from the top level config.py file +""" +from pathlib import Path +from importlib import util +from os import environ +from logging import getLogger + +logger = getLogger(__name__) + +# PLEASE NOTE: This is a sample config file. Please do NOT make any edits in this file! +# You should create your own config.py and it will overwrite the default config. + +DEFAULT_CONFIG = { + "templates_config_context": False, + "templates_config_context_overrule": False, + "template_cf": "zabbix_template", + "device_cf": "zabbix_hostid", + "clustering": False, + "create_hostgroups": True, + "create_journal": False, + "sync_vms": False, + "vm_hostgroup_format": "cluster_type/cluster/role", + "full_proxy_sync": False, + "zabbix_device_removal": ["Decommissioning", "Inventory"], + "zabbix_device_disable": ["Offline", "Planned", "Staged", "Failed"], + "hostgroup_format": "site/manufacturer/role", + "traverse_regions": False, + "traverse_site_groups": False, + "nb_device_filter": {"name__n": "null"}, + "nb_vm_filter": {"name__n": "null"}, + "inventory_mode": "disabled", + "inventory_sync": False, + "device_inventory_map": { + "asset_tag": "asset_tag", + "virtual_chassis/name": "chassis", + "status/label": "deployment_status", + "location/name": "location", + "latitude": "location_lat", + "longitude": "location_lon", + "comments": "notes", + "name": "name", + "rack/name": "site_rack", + "serial": "serialno_a", + "device_type/model": "type", + "device_type/manufacturer/name": "vendor", + "oob_ip/address": "oob_ip" + }, + "vm_inventory_map": { + "status/label": "deployment_status", + "comments": "notes", + "name": "name" + }, + "usermacro_sync": False, + "device_usermacro_map": { + "serial": "{$HW_SERIAL}", + "role/name": "{$DEV_ROLE}", + "url": "{$NB_URL}", + "id": "{$NB_ID}" + }, + "vm_usermacro_map": { + "memory": "{$TOTAL_MEMORY}", + "role/name": "{$DEV_ROLE}", + "url": "{$NB_URL}", + "id": "{$NB_ID}" + }, + "tag_sync": False, + "tag_lower": True, + "tag_name": 'NetBox', + "tag_value": "name", + "device_tag_map": { + "site/name": "site", + "rack/name": "rack", + "platform/name": "target" + }, + "vm_tag_map": { + "site/name": "site", + "cluster/name": "cluster", + "platform/name": "target" + } +} + + +def load_config(): + """Returns combined config from all sources""" + # Overwrite default config with config.py + conf = load_config_file(config_default=DEFAULT_CONFIG) + # Overwrite default config and config.py with environment variables + for key in conf: + value_setting = load_env_variable(key) + if value_setting is not None: + conf[key] = value_setting + return conf + + +def load_env_variable(config_environvar): + """Returns config from environment variable""" + prefix = "NBZX_" + config_environvar = prefix + config_environvar.upper() + if config_environvar in environ: + return environ[config_environvar] + return None + + +def load_config_file(config_default, config_file="config.py"): + """Returns config from config.py file""" + # Check if config.py exists and load it + # If it does not exist, return the default config + config_path = Path(config_file) + if config_path.exists(): + dconf = config_default.copy() + # Dynamically import the config module + spec = util.spec_from_file_location("config", config_path) + config_module = util.module_from_spec(spec) + spec.loader.exec_module(config_module) + # Update DEFAULT_CONFIG with variables from the config module + for key in dconf: + if hasattr(config_module, key): + dconf[key] = getattr(config_module, key) + return dconf + return config_default diff --git a/modules/device.py b/modules/device.py index 0293778..f02f35b 100644 --- a/modules/device.py +++ b/modules/device.py @@ -2,17 +2,17 @@ """ Device specific handeling for NetBox to Zabbix """ + from copy import deepcopy from logging import getLogger -from os import sys from re import search from operator import itemgetter from zabbix_utils import APIRequestError +from pynetbox import RequestError as NetboxRequestError from modules.exceptions import ( InterfaceConfigError, - JournalError, SyncExternalError, SyncInventoryError, TemplateError, @@ -20,33 +20,11 @@ from modules.exceptions import ( from modules.hostgroups import Hostgroup from modules.interface import ZabbixInterface from modules.tags import ZabbixTags -from modules.tools import field_mapper, remove_duplicates +from modules.tools import field_mapper, remove_duplicates, sanatize_log_output from modules.usermacros import ZabbixUsermacros +from modules.config import load_config -try: - from config import ( - device_cf, - device_inventory_map, - device_tag_map, - device_usermacro_map, - inventory_mode, - inventory_sync, - tag_lower, - tag_name, - tag_sync, - tag_value, - template_cf, - traverse_regions, - traverse_site_groups, - usermacro_sync, - ) -except ModuleNotFoundError: - print( - "Configuration file config.py not found in main directory." - "Please create the file or rename the config.py.example file to config.py." - ) - sys.exit(0) - +config = load_config() class PhysicalDevice: # pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments @@ -78,7 +56,7 @@ class PhysicalDevice: self.nb_journals = nb_journal_class self.inventory_mode = -1 self.inventory = {} - self.usermacros = {} + self.usermacros = [] self.tags = {} self.logger = logger if logger else getLogger(__name__) self._setBasics() @@ -91,15 +69,15 @@ class PhysicalDevice: def _inventory_map(self): """Use device inventory maps""" - return device_inventory_map + return config["device_inventory_map"] def _usermacro_map(self): """Use device inventory maps""" - return device_usermacro_map + return config["device_usermacro_map"] def _tag_map(self): """Use device host tag maps""" - return device_tag_map + return config["device_tag_map"] def _setBasics(self): """ @@ -115,10 +93,10 @@ class PhysicalDevice: raise SyncInventoryError(e) # Check if device has custom field for ZBX ID - if device_cf in self.nb.custom_fields: - self.zabbix_id = self.nb.custom_fields[device_cf] + if config["device_cf"] in self.nb.custom_fields: + self.zabbix_id = self.nb.custom_fields[config["device_cf"]] else: - e = f"Host {self.name}: Custom field {device_cf} not present" + e = f'Host {self.name}: Custom field {config["device_cf"]} not present' self.logger.warning(e) raise SyncInventoryError(e) @@ -147,8 +125,8 @@ class PhysicalDevice: self.nb, self.nb_api_version, logger=self.logger, - nested_sitegroup_flag=traverse_site_groups, - nested_region_flag=traverse_regions, + nested_sitegroup_flag=config['traverse_site_groups'], + nested_region_flag=config['traverse_regions'], nb_groups=nb_site_groups, nb_regions=nb_regions, ) @@ -187,18 +165,20 @@ class PhysicalDevice: # Get Zabbix templates from the device type device_type_cfs = self.nb.device_type.custom_fields # Check if the ZBX Template CF is present - if template_cf in device_type_cfs: + if config["template_cf"] in device_type_cfs: # Set value to template - return [device_type_cfs[template_cf]] + return [device_type_cfs[config["template_cf"]]] # Custom field not found, return error e = ( - f"Custom field {template_cf} not " + f"Custom field {config['template_cf']} not " f"found for {self.nb.device_type.manufacturer.name}" f" - {self.nb.device_type.display}." ) self.logger.warning(e) raise TemplateError(e) + + def get_templates_context(self): """Get Zabbix templates from the device context""" if "zabbix" not in self.config_context: @@ -221,25 +201,24 @@ class PhysicalDevice: def set_inventory(self, nbdevice): """Set host inventory""" # Set inventory mode. Default is disabled (see class init function). - if inventory_mode == "disabled": - if inventory_sync: - self.logger.error( - f"Host {self.name}: Unable to map NetBox inventory to Zabbix. " - "Inventory sync is enabled in config but inventory mode is disabled." - ) + if config["inventory_mode"] == "disabled": + if config["inventory_sync"]: + self.logger.error(f"Host {self.name}: Unable to map NetBox inventory to Zabbix. " + "Inventory sync is enabled in " + "config but inventory mode is disabled.") return True - if inventory_mode == "manual": + if config["inventory_mode"] == "manual": self.inventory_mode = 0 - elif inventory_mode == "automatic": + elif config["inventory_mode"] == "automatic": self.inventory_mode = 1 else: self.logger.error( f"Host {self.name}: Specified value for inventory mode in" - f" config is not valid. Got value {inventory_mode}" + f" config is not valid. Got value {config['inventory_mode']}" ) return False self.inventory = {} - if inventory_sync and self.inventory_mode in [0, 1]: + if config["inventory_sync"] and self.inventory_mode in [0, 1]: self.logger.debug(f"Host {self.name}: Starting inventory mapper") self.inventory = field_mapper( self.name, self._inventory_map(), nbdevice, self.logger @@ -377,7 +356,7 @@ class PhysicalDevice: def _zeroize_cf(self): """Sets the hostID custom field in NetBox to zero, effectively destroying the link""" - self.nb.custom_fields[device_cf] = None + self.nb.custom_fields[config["device_cf"]] = None self.nb.save() def _zabbixHostnameExists(self): @@ -421,12 +400,13 @@ class PhysicalDevice: macros = ZabbixUsermacros( self.nb, self._usermacro_map(), - usermacro_sync, + config['usermacro_sync'], logger=self.logger, host=self.name, ) if macros.sync is False: self.usermacros = [] + return True self.usermacros = macros.generate() return True @@ -438,10 +418,10 @@ class PhysicalDevice: tags = ZabbixTags( self.nb, self._tag_map(), - tag_sync, - tag_lower, - tag_name=tag_name, - tag_value=tag_value, + config['tag_sync'], + config['tag_lower'], + tag_name=config['tag_name'], + tag_value=config['tag_value'], logger=self.logger, host=self.name, ) @@ -459,7 +439,7 @@ class PhysicalDevice: input: List of all proxies and proxy groups in standardized format """ # check if the key Zabbix is defined in the config context - if not "zabbix" in self.nb.config_context: + if "zabbix" not in self.nb.config_context: return False if ( "proxy" in self.nb.config_context["zabbix"] @@ -557,7 +537,7 @@ class PhysicalDevice: self.logger.error(msg) raise SyncExternalError(msg) from e # Set NetBox custom field to hostID value. - self.nb.custom_fields[device_cf] = int(self.zabbix_id) + self.nb.custom_fields[config["device_cf"]] = int(self.zabbix_id) self.nb.save() msg = f"Host {self.name}: Created host in Zabbix." self.logger.info(msg) @@ -622,7 +602,7 @@ class PhysicalDevice: ) self.logger.error(e) raise SyncExternalError(e) from None - self.logger.info(f"Updated host {self.name} with data {kwargs}.") + self.logger.info(f"Host {self.name}: updated with data {sanatize_log_output(kwargs)}.") self.create_journal_entry("info", "Updated host in Zabbix with latest NB data.") def ConsistencyCheck( @@ -738,10 +718,8 @@ class PhysicalDevice: # Check if a proxy has been defined if self.zbxproxy: # Check if proxy or proxy group is defined - if ( - self.zbxproxy["idtype"] in host - and host[self.zbxproxy["idtype"]] == self.zbxproxy["id"] - ): + if (self.zbxproxy["idtype"] in host and + host[self.zbxproxy["idtype"]] == self.zbxproxy["id"]): self.logger.debug(f"Host {self.name}: proxy in-sync.") # Backwards compatibility for Zabbix <= 6 elif "proxy_hostid" in host and host["proxy_hostid"] == self.zbxproxy["id"]: @@ -799,7 +777,7 @@ class PhysicalDevice: else: self.logger.warning(f"Host {self.name}: inventory_mode OUT of sync.") self.updateZabbixHost(inventory_mode=str(self.inventory_mode)) - if inventory_sync and self.inventory_mode in [0, 1]: + if config["inventory_sync"] and self.inventory_mode in [0, 1]: # Check host inventory mapping if host["inventory"] == self.inventory: self.logger.debug(f"Host {self.name}: inventory in-sync.") @@ -808,24 +786,33 @@ class PhysicalDevice: self.updateZabbixHost(inventory=self.inventory) # Check host usermacros - if usermacro_sync: - macros_filtered = [] - # Do not re-sync secret usermacros unless sync is set to 'full' - if str(usermacro_sync).lower() != "full": - for m in deepcopy(self.usermacros): - if m["type"] == str(1): - # Remove the value as the api doesn't return it - # this will allow us to only update usermacros that don't exist - m.pop("value") - macros_filtered.append(m) - if host["macros"] == self.usermacros or host["macros"] == macros_filtered: + if config['usermacro_sync']: + # Make a full copy synce we dont want to lose the original value + # of secret type macros from Netbox + netbox_macros = deepcopy(self.usermacros) + # Set the sync bit + full_sync_bit = bool(str(config['usermacro_sync']).lower() == "full") + for macro in netbox_macros: + # If the Macro is a secret and full sync is NOT activated + if macro["type"] == str(1) and not full_sync_bit: + # Remove the value as the Zabbix api does not return the value key + # This is required when you want to do a diff between both lists + macro.pop("value") + # Sort all lists + def filter_with_macros(macro): + return macro["macro"] + host["macros"].sort(key=filter_with_macros) + netbox_macros.sort(key=filter_with_macros) + # Check if both lists are the same + if host["macros"] == netbox_macros: self.logger.debug(f"Host {self.name}: usermacros in-sync.") else: self.logger.warning(f"Host {self.name}: usermacros OUT of sync.") + # Update Zabbix with NetBox usermacros self.updateZabbixHost(macros=self.usermacros) - # Check host usermacros - if tag_sync: + # Check host tags + if config['tag_sync']: if remove_duplicates(host["tags"], sortkey="tag") == self.tags: self.logger.debug(f"Host {self.name}: tags in-sync.") else: @@ -881,7 +868,8 @@ class PhysicalDevice: try: # API call to Zabbix self.zabbix.hostinterface.update(updates) - e = f"Host {self.name}: solved interface conflict." + e = (f"Host {self.name}: updated interface " + f"with data {sanatize_log_output(updates)}.") self.logger.info(e) self.create_journal_entry("info", e) except APIRequestError as e: @@ -923,7 +911,7 @@ class PhysicalDevice: self.nb_journals.create(journal) self.logger.debug(f"Host {self.name}: Created journal entry in NetBox") return True - except JournalError(e) as e: + except NetboxRequestError as e: self.logger.warning( "Unable to create journal entry for " f"{self.name}: NB returned {e}" diff --git a/modules/logging.py b/modules/logging.py index c36c2c1..e96e6e9 100644 --- a/modules/logging.py +++ b/modules/logging.py @@ -21,9 +21,10 @@ def setup_logger(): """ # Set logging lgout = logging.StreamHandler() - lgfile = logging.FileHandler( - path.join(path.dirname(path.realpath(__file__)), "sync.log") - ) + # Logfile in the project root + project_root = path.dirname(path.dirname(path.realpath(__file__))) + logfile_path = path.join(project_root, "sync.log") + lgfile = logging.FileHandler(logfile_path) logging.basicConfig( format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", diff --git a/modules/tools.py b/modules/tools.py index 5a7d8d3..3f7bf81 100644 --- a/modules/tools.py +++ b/modules/tools.py @@ -100,6 +100,7 @@ def remove_duplicates(input_list, sortkey=None): output_list.sort(key=lambda x: x[sortkey]) return output_list + def verify_hg_format(hg_format, device_cfs=[], vm_cfs=[], hg_type="dev", logger=None): """ Verifies hostgroup field format @@ -148,4 +149,40 @@ def verify_hg_format(hg_format, device_cfs=[], vm_cfs=[], hg_type="dev", logger= ) logger.error(e) raise HostgroupError(e) - + + +def sanatize_log_output(data): + """ + Used for the update function to Zabbix which + shows the data that its using to update the host. + Removes and sensitive data from the input. + """ + if not isinstance(data, dict): + return data + sanitized_data = data.copy() + # Check if there are any sensitive macros defined in the data + if "macros" in data: + for macro in sanitized_data["macros"]: + # Check if macro is secret type + if not macro["type"] == str(1): + continue + macro["value"] = "********" + # Check for interface data + if "interfaceid" in data: + # Interface ID is a value which is most likely not helpful + # in logging output or for troubleshooting. + del sanitized_data["interfaceid"] + # InterfaceID also hints that this is a interface update. + # A check is required if there are no macro's used for SNMP security parameters. + if not "details" in data: + return sanitized_data + for key, detail in sanitized_data["details"].items(): + # If the detail is a secret, we don't want to log it. + if key in ("authpassphrase", "privpassphrase", "securityname", "community"): + # Check if a macro is used. + # If so then logging the output is not a security issue. + if detail.startswith("{$") and detail.endswith("}"): + continue + # A macro is not used, so we sanitize the value. + sanitized_data["details"][key] = "********" + return sanitized_data \ No newline at end of file diff --git a/modules/usermacros.py b/modules/usermacros.py index 29580d1..6d396c8 100644 --- a/modules/usermacros.py +++ b/modules/usermacros.py @@ -10,7 +10,7 @@ from modules.tools import field_mapper class ZabbixUsermacros: - """Class that represents a Zabbix interface.""" + """Class that represents Zabbix usermacros.""" def __init__(self, nb, usermacro_map, usermacro_sync, logger=None, host=None): self.nb = nb @@ -57,7 +57,8 @@ class ZabbixUsermacros: macro["macro"] = str(macro_name) if isinstance(macro_properties, dict): if not "value" in macro_properties: - self.logger.error(f"Usermacro {macro_name} has no value, skipping.") + self.logger.warning(f"Host {self.name}: Usermacro {macro_name} has " + "no value in Netbox, skipping.") return False macro["value"] = macro_properties["value"] @@ -76,13 +77,18 @@ class ZabbixUsermacros: else: macro["description"] = "" - elif isinstance(macro_properties, str): + elif isinstance(macro_properties, str) and macro_properties: macro["value"] = macro_properties macro["type"] = str(0) macro["description"] = "" + + else: + self.logger.warning(f"Host {self.name}: Usermacro {macro_name} " + "has no value, skipping.") + return False else: self.logger.error( - f"Usermacro {macro_name} is not a valid usermacro name, skipping." + f"Host {self.name}: Usermacro {macro_name} is not a valid usermacro name, skipping." ) return False return macro diff --git a/modules/virtual_machine.py b/modules/virtual_machine.py index 8915832..e0f7abb 100644 --- a/modules/virtual_machine.py +++ b/modules/virtual_machine.py @@ -1,27 +1,12 @@ # pylint: disable=duplicate-code """Module that hosts all functions for virtual machine processing""" - -from os import sys - from modules.device import PhysicalDevice from modules.exceptions import InterfaceConfigError, SyncInventoryError, TemplateError from modules.hostgroups import Hostgroup from modules.interface import ZabbixInterface - -try: - from config import ( - traverse_regions, - traverse_site_groups, - vm_inventory_map, - vm_tag_map, - vm_usermacro_map, - ) -except ModuleNotFoundError: - print( - "Configuration file config.py not found in main directory." - "Please create the file or rename the config.py.example file to config.py." - ) - sys.exit(0) +from modules.config import load_config +# Load config +config = load_config() class VirtualMachine(PhysicalDevice): @@ -34,15 +19,15 @@ class VirtualMachine(PhysicalDevice): def _inventory_map(self): """use VM inventory maps""" - return vm_inventory_map + return config["vm_inventory_map"] def _usermacro_map(self): """use VM usermacro maps""" - return vm_usermacro_map + return config["vm_usermacro_map"] def _tag_map(self): """use VM tag maps""" - return vm_tag_map + return config["vm_tag_map"] def set_hostgroup(self, hg_format, nb_site_groups, nb_regions): """Set the hostgroup for this device""" @@ -52,8 +37,8 @@ class VirtualMachine(PhysicalDevice): self.nb, self.nb_api_version, logger=self.logger, - nested_sitegroup_flag=traverse_site_groups, - nested_region_flag=traverse_regions, + nested_sitegroup_flag=config["traverse_site_groups"], + nested_region_flag=config["traverse_regions"], nb_groups=nb_site_groups, nb_regions=nb_regions, ) diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 3e50aff..afab914 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -11,35 +11,14 @@ from pynetbox import api from pynetbox.core.query import RequestError as NBRequestError from requests.exceptions import ConnectionError as RequestsConnectionError from zabbix_utils import APIRequestError, ProcessingError, ZabbixAPI - +from modules.config import load_config from modules.device import PhysicalDevice from modules.exceptions import EnvironmentVarError, SyncError from modules.logging import get_logger, set_log_levels, setup_logger from modules.tools import convert_recordset, proxy_prepper, verify_hg_format from modules.virtual_machine import VirtualMachine -try: - from config import ( - clustering, - create_hostgroups, - create_journal, - full_proxy_sync, - hostgroup_format, - nb_device_filter, - nb_vm_filter, - sync_vms, - templates_config_context, - templates_config_context_overrule, - vm_hostgroup_format, - zabbix_device_disable, - zabbix_device_removal, - ) -except ModuleNotFoundError: - print( - "Configuration file config.py not found in main directory." - "Please create the file or rename the config.py.example file to config.py." - ) - sys.exit(1) +config = load_config() setup_logger() @@ -135,10 +114,11 @@ def main(arguments): else: proxy_name = "name" # Get all Zabbix and NetBox data - netbox_devices = list(netbox.dcim.devices.filter(**nb_device_filter)) + netbox_devices = list(netbox.dcim.devices.filter(**config["nb_device_filter"])) netbox_vms = [] - if sync_vms: - netbox_vms = list(netbox.virtualization.virtual_machines.filter(**nb_vm_filter)) + if config["sync_vms"]: + netbox_vms = list( + netbox.virtualization.virtual_machines.filter(**config["nb_vm_filter"])) netbox_site_groups = convert_recordset((netbox.dcim.site_groups.all())) netbox_regions = convert_recordset(netbox.dcim.regions.all()) netbox_journals = netbox.extras.journal_entries @@ -159,15 +139,15 @@ def main(arguments): # Go through all NetBox devices for nb_vm in netbox_vms: try: - vm = VirtualMachine( - nb_vm, zabbix, netbox_journals, nb_version, create_journal, logger - ) - logger.debug(f"Host {vm.name}: Started operations on VM.") + vm = VirtualMachine(nb_vm, zabbix, netbox_journals, nb_version, + config["create_journal"], logger) + logger.debug(f"Host {vm.name}: started operations on VM.") vm.set_vm_template() # Check if a valid template has been found for this VM. if not vm.zbx_template_names: continue - vm.set_hostgroup(vm_hostgroup_format, netbox_site_groups, netbox_regions) + vm.set_hostgroup(config["vm_hostgroup_format"], + netbox_site_groups, netbox_regions) # Check if a valid hostgroup has been found for this VM. if not vm.hostgroups: continue @@ -175,7 +155,7 @@ def main(arguments): vm.set_usermacros() vm.set_tags() # Checks if device is in cleanup state - if vm.status in zabbix_device_removal: + if vm.status in config["zabbix_device_removal"]: if vm.zabbix_id: # Delete device from Zabbix # and remove hostID from NetBox. @@ -190,7 +170,7 @@ def main(arguments): ) continue # Check if the VM is in the disabled state - if vm.status in zabbix_device_disable: + if vm.status in config["zabbix_device_disable"]: vm.zabbix_state = 1 # Add hostgroup if config is set if create_hostgroups: @@ -206,8 +186,8 @@ def main(arguments): zabbix_groups, zabbix_templates, zabbix_proxy_list, - full_proxy_sync, - create_hostgroups, + config["full_proxy_sync"], + config["create_hostgroups"], ) continue # Add VM to Zabbix @@ -218,17 +198,16 @@ def main(arguments): for nb_device in netbox_devices: try: # Set device instance set data such as hostgroup and template information. - device = PhysicalDevice( - nb_device, zabbix, netbox_journals, nb_version, create_journal, logger - ) + device = PhysicalDevice(nb_device, zabbix, netbox_journals, nb_version, + config["create_journal"], logger) logger.debug(f"Host {device.name}: started operations on device.") - device.set_template( - templates_config_context, templates_config_context_overrule - ) + device.set_template(config["templates_config_context"], + config["templates_config_context_overrule"]) # Check if a valid template has been found for this VM. if not device.zbx_template_names: continue - device.set_hostgroup(hostgroup_format, netbox_site_groups, netbox_regions) + device.set_hostgroup( + config["hostgroup_format"], netbox_site_groups, netbox_regions) # Check if a valid hostgroup has been found for this VM. if not device.hostgroups: continue @@ -237,7 +216,7 @@ def main(arguments): device.set_tags() # Checks if device is part of cluster. # Requires clustering variable - if device.isCluster() and clustering: + if device.isCluster() and config["clustering"]: # Check if device is primary or secondary if device.promoteMasterDevice(): e = f"Device {device.name}: is " f"part of cluster and primary." @@ -252,7 +231,7 @@ def main(arguments): logger.info(e) continue # Checks if device is in cleanup state - if device.status in zabbix_device_removal: + if device.status in config["zabbix_device_removal"]: if device.zabbix_id: # Delete device from Zabbix # and remove hostID from NetBox. @@ -267,7 +246,7 @@ def main(arguments): ) continue # Check if the device is in the disabled state - if device.status in zabbix_device_disable: + if device.status in config["zabbix_device_disable"]: device.zabbix_state = 1 # Add hostgroup is config is set if create_hostgroups: @@ -283,14 +262,15 @@ def main(arguments): zabbix_groups, zabbix_templates, zabbix_proxy_list, - full_proxy_sync, - create_hostgroups, + config["full_proxy_sync"], + config["create_hostgroups"], ) continue # Add device to Zabbix device.createInZabbix(zabbix_groups, zabbix_templates, zabbix_proxy_list) except SyncError: pass + zabbix.logout() if __name__ == "__main__": diff --git a/requirements.txt b/requirements.txt index 33f4b90..295b59f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -pynetbox -zabbix-utils==2.0.1 +pynetbox==7.4.1 +zabbix-utils==2.0.2 diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_configuration_parsing.py b/tests/test_configuration_parsing.py new file mode 100644 index 0000000..641b508 --- /dev/null +++ b/tests/test_configuration_parsing.py @@ -0,0 +1,139 @@ +"""Tests for configuration parsing in the modules.config module.""" +from unittest.mock import patch, MagicMock +import os +from modules.config import load_config, DEFAULT_CONFIG, load_config_file, load_env_variable + + +def test_load_config_defaults(): + """Test that load_config returns default values when no config file or env vars are present""" + with patch('modules.config.load_config_file', return_value=DEFAULT_CONFIG.copy()), \ + patch('modules.config.load_env_variable', return_value=None): + config = load_config() + assert config == DEFAULT_CONFIG + assert config["templates_config_context"] is False + assert config["create_hostgroups"] is True + + +def test_load_config_file(): + """Test that load_config properly loads values from config file""" + mock_config = DEFAULT_CONFIG.copy() + mock_config["templates_config_context"] = True + mock_config["sync_vms"] = True + + with patch('modules.config.load_config_file', return_value=mock_config), \ + patch('modules.config.load_env_variable', return_value=None): + config = load_config() + assert config["templates_config_context"] is True + assert config["sync_vms"] is True + # Unchanged values should remain as defaults + assert config["create_journal"] is False + + +def test_load_env_variables(): + """Test that load_config properly loads values from environment variables""" + # Mock env variable loading to return values for specific keys + def mock_load_env(key): + if key == "sync_vms": + return True + if key == "create_journal": + return True + return None + + with patch('modules.config.load_config_file', return_value=DEFAULT_CONFIG.copy()), \ + patch('modules.config.load_env_variable', side_effect=mock_load_env): + config = load_config() + assert config["sync_vms"] is True + assert config["create_journal"] is True + # Unchanged values should remain as defaults + assert config["templates_config_context"] is False + + +def test_env_vars_override_config_file(): + """Test that environment variables override values from config file""" + mock_config = DEFAULT_CONFIG.copy() + mock_config["templates_config_context"] = True + mock_config["sync_vms"] = False + + # Mock env variable that will override the config file value + def mock_load_env(key): + if key == "sync_vms": + return True + return None + + with patch('modules.config.load_config_file', return_value=mock_config), \ + patch('modules.config.load_env_variable', side_effect=mock_load_env): + config = load_config() + # This should be overridden by the env var + assert config["sync_vms"] is True + # This should remain from the config file + assert config["templates_config_context"] is True + + +def test_load_config_file_function(): + """Test the load_config_file function directly""" + # Test when the file exists + with patch('pathlib.Path.exists', return_value=True), \ + patch('importlib.util.spec_from_file_location') as mock_spec: + # Setup the mock module with attributes + mock_module = MagicMock() + mock_module.templates_config_context = True + mock_module.sync_vms = True + + # Setup the mock spec + mock_spec_instance = MagicMock() + mock_spec.return_value = mock_spec_instance + mock_spec_instance.loader.exec_module = lambda x: None + + # Patch module_from_spec to return our mock module + with patch('importlib.util.module_from_spec', return_value=mock_module): + config = load_config_file(DEFAULT_CONFIG.copy()) + assert config["templates_config_context"] is True + assert config["sync_vms"] is True + + +def test_load_config_file_not_found(): + """Test load_config_file when the config file doesn't exist""" + with patch('pathlib.Path.exists', return_value=False): + result = load_config_file(DEFAULT_CONFIG.copy()) + # Should return a dict equal to DEFAULT_CONFIG, not a new object + assert result == DEFAULT_CONFIG + + +def test_load_env_variable_function(): + """Test the load_env_variable function directly""" + # Create a real environment variable for testing with correct prefix and uppercase + test_var = "NBZX_TEMPLATES_CONFIG_CONTEXT" + original_env = os.environ.get(test_var, None) + try: + # Set the environment variable with the proper prefix and case + os.environ[test_var] = "True" + + # Test that it's properly read (using lowercase in the function call) + value = load_env_variable("templates_config_context") + assert value == "True" + + # Test when the environment variable doesn't exist + value = load_env_variable("nonexistent_variable") + assert value is None + finally: + # Clean up - restore original environment + if original_env is not None: + os.environ[test_var] = original_env + else: + os.environ.pop(test_var, None) + + +def test_load_config_file_exception_handling(): + """Test that load_config_file handles exceptions gracefully""" + # This test requires modifying the load_config_file function to handle exceptions + # For now, we're just checking that an exception is raised + with patch('pathlib.Path.exists', return_value=True), \ + patch('importlib.util.spec_from_file_location', side_effect=Exception("Import error")): + # Since the current implementation doesn't handle exceptions, we should + # expect an exception to be raised + try: + load_config_file(DEFAULT_CONFIG.copy()) + assert False, "An exception should have been raised" + except Exception: # pylint: disable=broad-except + # This is expected + pass diff --git a/tests/test_device_deletion.py b/tests/test_device_deletion.py new file mode 100644 index 0000000..392ba1a --- /dev/null +++ b/tests/test_device_deletion.py @@ -0,0 +1,166 @@ +"""Tests for device deletion functionality in the PhysicalDevice class.""" +import unittest +from unittest.mock import MagicMock, patch +from zabbix_utils import APIRequestError +from modules.device import PhysicalDevice +from modules.exceptions import SyncExternalError + + +class TestDeviceDeletion(unittest.TestCase): + """Test class for device deletion functionality.""" + + def setUp(self): + """Set up test fixtures.""" + # Create mock NetBox device + self.mock_nb_device = MagicMock() + self.mock_nb_device.id = 123 + self.mock_nb_device.name = "test-device" + self.mock_nb_device.status.label = "Decommissioning" + self.mock_nb_device.custom_fields = {"zabbix_hostid": "456"} + self.mock_nb_device.config_context = {} + + # Set up a primary IP + primary_ip = MagicMock() + primary_ip.address = "192.168.1.1/24" + self.mock_nb_device.primary_ip = primary_ip + + # Create mock Zabbix API + self.mock_zabbix = MagicMock() + self.mock_zabbix.version = "6.0" + + # Set up mock host.get response + self.mock_zabbix.host.get.return_value = [{"hostid": "456"}] + + # Mock NetBox journal class + self.mock_nb_journal = MagicMock() + + # Create logger mock + self.mock_logger = MagicMock() + + # Create PhysicalDevice instance with mocks + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + self.device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + journal=True, + logger=self.mock_logger + ) + + def test_cleanup_successful_deletion(self): + """Test successful device deletion from Zabbix.""" + # Setup + self.mock_zabbix.host.get.return_value = [{"hostid": "456"}] + self.mock_zabbix.host.delete.return_value = {"hostids": ["456"]} + + # Execute + self.device.cleanup() + + # Verify + self.mock_zabbix.host.get.assert_called_once_with(filter={'hostid': '456'}, output=[]) + self.mock_zabbix.host.delete.assert_called_once_with('456') + self.mock_nb_device.save.assert_called_once() + self.assertIsNone(self.mock_nb_device.custom_fields["zabbix_hostid"]) + self.mock_logger.info.assert_called_with(f"Host {self.device.name}: " + "Deleted host from Zabbix.") + + def test_cleanup_device_already_deleted(self): + """Test cleanup when device is already deleted from Zabbix.""" + # Setup + self.mock_zabbix.host.get.return_value = [] # Empty list means host not found + + # Execute + self.device.cleanup() + + # Verify + self.mock_zabbix.host.get.assert_called_once_with(filter={'hostid': '456'}, output=[]) + self.mock_zabbix.host.delete.assert_not_called() + self.mock_nb_device.save.assert_called_once() + self.assertIsNone(self.mock_nb_device.custom_fields["zabbix_hostid"]) + self.mock_logger.info.assert_called_with( + f"Host {self.device.name}: was already deleted from Zabbix. Removed link in NetBox.") + + def test_cleanup_api_error(self): + """Test cleanup when Zabbix API returns an error.""" + # Setup + self.mock_zabbix.host.get.return_value = [{"hostid": "456"}] + self.mock_zabbix.host.delete.side_effect = APIRequestError("API Error") + + # Execute and verify + with self.assertRaises(SyncExternalError): + self.device.cleanup() + + # Verify correct calls were made + self.mock_zabbix.host.get.assert_called_once_with(filter={'hostid': '456'}, output=[]) + self.mock_zabbix.host.delete.assert_called_once_with('456') + self.mock_nb_device.save.assert_not_called() + self.mock_logger.error.assert_called() + + def test_zeroize_cf(self): + """Test _zeroize_cf method that clears the custom field.""" + # Execute + self.device._zeroize_cf() # pylint: disable=protected-access + + # Verify + self.assertIsNone(self.mock_nb_device.custom_fields["zabbix_hostid"]) + self.mock_nb_device.save.assert_called_once() + + def test_create_journal_entry(self): + """Test create_journal_entry method.""" + # Setup + test_message = "Test journal entry" + + # Execute + result = self.device.create_journal_entry("info", test_message) + + # Verify + self.assertTrue(result) + self.mock_nb_journal.create.assert_called_once() + journal_entry = self.mock_nb_journal.create.call_args[0][0] + self.assertEqual(journal_entry["assigned_object_type"], "dcim.device") + self.assertEqual(journal_entry["assigned_object_id"], 123) + self.assertEqual(journal_entry["kind"], "info") + self.assertEqual(journal_entry["comments"], test_message) + + def test_create_journal_entry_invalid_severity(self): + """Test create_journal_entry with invalid severity.""" + # Execute + result = self.device.create_journal_entry("invalid", "Test message") + + # Verify + self.assertFalse(result) + self.mock_nb_journal.create.assert_not_called() + self.mock_logger.warning.assert_called() + + def test_create_journal_entry_when_disabled(self): + """Test create_journal_entry when journaling is disabled.""" + # Setup - create device with journal=False + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + journal=False, # Disable journaling + logger=self.mock_logger + ) + + # Execute + result = device.create_journal_entry("info", "Test message") + + # Verify + self.assertFalse(result) + self.mock_nb_journal.create.assert_not_called() + + def test_cleanup_updates_journal(self): + """Test that cleanup method creates a journal entry.""" + # Setup + self.mock_zabbix.host.get.return_value = [{"hostid": "456"}] + + # Execute + with patch.object(self.device, 'create_journal_entry') as mock_journal_entry: + self.device.cleanup() + + # Verify + mock_journal_entry.assert_called_once_with("warning", "Deleted host from Zabbix") diff --git a/tests/test_interface.py b/tests/test_interface.py new file mode 100644 index 0000000..ff55218 --- /dev/null +++ b/tests/test_interface.py @@ -0,0 +1,247 @@ +"""Tests for the ZabbixInterface class in the interface module.""" +import unittest +from modules.interface import ZabbixInterface +from modules.exceptions import InterfaceConfigError + + +class TestZabbixInterface(unittest.TestCase): + """Test class for ZabbixInterface functionality.""" + + def setUp(self): + """Set up test fixtures.""" + self.test_ip = "192.168.1.1" + self.empty_context = {} + self.default_interface = ZabbixInterface(self.empty_context, self.test_ip) + + # Create some test contexts for different scenarios + self.snmpv2_context = { + "zabbix": { + "interface_type": 2, + "interface_port": "161", + "snmp": { + "version": 2, + "community": "public", + "bulk": 1 + } + } + } + + self.snmpv3_context = { + "zabbix": { + "interface_type": 2, + "snmp": { + "version": 3, + "securityname": "snmpuser", + "securitylevel": "authPriv", + "authprotocol": "SHA", + "authpassphrase": "authpass123", + "privprotocol": "AES", + "privpassphrase": "privpass123", + "contextname": "context1" + } + } + } + + self.agent_context = { + "zabbix": { + "interface_type": 1, + "interface_port": "10050" + } + } + + def test_init(self): + """Test initialization of ZabbixInterface.""" + interface = ZabbixInterface(self.empty_context, self.test_ip) + + # Check basic properties + self.assertEqual(interface.ip, self.test_ip) + self.assertEqual(interface.context, self.empty_context) + self.assertEqual(interface.interface["ip"], self.test_ip) + self.assertEqual(interface.interface["main"], "1") + self.assertEqual(interface.interface["useip"], "1") + self.assertEqual(interface.interface["dns"], "") + + def test_get_context_empty(self): + """Test get_context with empty context.""" + interface = ZabbixInterface(self.empty_context, self.test_ip) + result = interface.get_context() + self.assertFalse(result) + + def test_get_context_with_interface_type(self): + """Test get_context with interface_type but no port.""" + context = {"zabbix": {"interface_type": 2}} + interface = ZabbixInterface(context, self.test_ip) + + # Should set type and default port + result = interface.get_context() + self.assertTrue(result) + self.assertEqual(interface.interface["type"], 2) + self.assertEqual(interface.interface["port"], "161") # Default port for SNMP + + def test_get_context_with_interface_type_and_port(self): + """Test get_context with both interface_type and port.""" + context = {"zabbix": {"interface_type": 1, "interface_port": "12345"}} + interface = ZabbixInterface(context, self.test_ip) + + # Should set type and specified port + result = interface.get_context() + self.assertTrue(result) + self.assertEqual(interface.interface["type"], 1) + self.assertEqual(interface.interface["port"], "12345") + + def test_set_default_port(self): + """Test _set_default_port for different interface types.""" + interface = ZabbixInterface(self.empty_context, self.test_ip) + + # Test for agent type (1) + interface.interface["type"] = 1 + interface._set_default_port() # pylint: disable=protected-access + self.assertEqual(interface.interface["port"], "10050") + + # Test for SNMP type (2) + interface.interface["type"] = 2 + interface._set_default_port() # pylint: disable=protected-access + self.assertEqual(interface.interface["port"], "161") + + # Test for IPMI type (3) + interface.interface["type"] = 3 + interface._set_default_port() # pylint: disable=protected-access + self.assertEqual(interface.interface["port"], "623") + + # Test for JMX type (4) + interface.interface["type"] = 4 + interface._set_default_port() # pylint: disable=protected-access + self.assertEqual(interface.interface["port"], "12345") + + # Test for unsupported type + interface.interface["type"] = 99 + result = interface._set_default_port() # pylint: disable=protected-access + self.assertFalse(result) + + def test_set_snmp_v2(self): + """Test set_snmp with SNMPv2 configuration.""" + interface = ZabbixInterface(self.snmpv2_context, self.test_ip) + interface.get_context() # Set the interface type + + # Call set_snmp + interface.set_snmp() + + # Check SNMP details + self.assertEqual(interface.interface["details"]["version"], "2") + self.assertEqual(interface.interface["details"]["community"], "public") + self.assertEqual(interface.interface["details"]["bulk"], "1") + + def test_set_snmp_v3(self): + """Test set_snmp with SNMPv3 configuration.""" + interface = ZabbixInterface(self.snmpv3_context, self.test_ip) + interface.get_context() # Set the interface type + + # Call set_snmp + interface.set_snmp() + + # Check SNMP details + self.assertEqual(interface.interface["details"]["version"], "3") + self.assertEqual(interface.interface["details"]["securityname"], "snmpuser") + self.assertEqual(interface.interface["details"]["securitylevel"], "authPriv") + self.assertEqual(interface.interface["details"]["authprotocol"], "SHA") + self.assertEqual(interface.interface["details"]["authpassphrase"], "authpass123") + self.assertEqual(interface.interface["details"]["privprotocol"], "AES") + self.assertEqual(interface.interface["details"]["privpassphrase"], "privpass123") + self.assertEqual(interface.interface["details"]["contextname"], "context1") + + def test_set_snmp_no_snmp_config(self): + """Test set_snmp with missing SNMP configuration.""" + # Create context with interface type but no SNMP config + context = {"zabbix": {"interface_type": 2}} + interface = ZabbixInterface(context, self.test_ip) + interface.get_context() # Set the interface type + + # Call set_snmp - should raise exception + with self.assertRaises(InterfaceConfigError): + interface.set_snmp() + + def test_set_snmp_unsupported_version(self): + """Test set_snmp with unsupported SNMP version.""" + # Create context with invalid SNMP version + context = { + "zabbix": { + "interface_type": 2, + "snmp": { + "version": 4 # Invalid version + } + } + } + interface = ZabbixInterface(context, self.test_ip) + interface.get_context() # Set the interface type + + # Call set_snmp - should raise exception + with self.assertRaises(InterfaceConfigError): + interface.set_snmp() + + def test_set_snmp_no_version(self): + """Test set_snmp with missing SNMP version.""" + # Create context without SNMP version + context = { + "zabbix": { + "interface_type": 2, + "snmp": { + "community": "public" # No version specified + } + } + } + interface = ZabbixInterface(context, self.test_ip) + interface.get_context() # Set the interface type + + # Call set_snmp - should raise exception + with self.assertRaises(InterfaceConfigError): + interface.set_snmp() + + def test_set_snmp_non_snmp_interface(self): + """Test set_snmp with non-SNMP interface type.""" + interface = ZabbixInterface(self.agent_context, self.test_ip) + interface.get_context() # Set the interface type + + # Call set_snmp - should raise exception + with self.assertRaises(InterfaceConfigError): + interface.set_snmp() + + def test_set_default_snmp(self): + """Test set_default_snmp method.""" + interface = ZabbixInterface(self.empty_context, self.test_ip) + interface.set_default_snmp() + + # Check interface properties + self.assertEqual(interface.interface["type"], "2") + self.assertEqual(interface.interface["port"], "161") + self.assertEqual(interface.interface["details"]["version"], "2") + self.assertEqual(interface.interface["details"]["community"], "{$SNMP_COMMUNITY}") + self.assertEqual(interface.interface["details"]["bulk"], "1") + + def test_set_default_agent(self): + """Test set_default_agent method.""" + interface = ZabbixInterface(self.empty_context, self.test_ip) + interface.set_default_agent() + + # Check interface properties + self.assertEqual(interface.interface["type"], "1") + self.assertEqual(interface.interface["port"], "10050") + + def test_snmpv2_no_community(self): + """Test SNMPv2 with no community string specified.""" + # Create context with SNMPv2 but no community + context = { + "zabbix": { + "interface_type": 2, + "snmp": { + "version": 2 + } + } + } + interface = ZabbixInterface(context, self.test_ip) + interface.get_context() # Set the interface type + + # Call set_snmp + interface.set_snmp() + + # Should use default community string + self.assertEqual(interface.interface["details"]["community"], "{$SNMP_COMMUNITY}") diff --git a/tests/test_physical_device.py b/tests/test_physical_device.py new file mode 100644 index 0000000..1b79ad8 --- /dev/null +++ b/tests/test_physical_device.py @@ -0,0 +1,429 @@ +"""Tests for the PhysicalDevice class in the device module.""" +import unittest +from unittest.mock import MagicMock, patch +from modules.device import PhysicalDevice +from modules.exceptions import TemplateError, SyncInventoryError + + +class TestPhysicalDevice(unittest.TestCase): + """Test class for PhysicalDevice functionality.""" + + def setUp(self): + """Set up test fixtures.""" + # Create mock NetBox device + self.mock_nb_device = MagicMock() + self.mock_nb_device.id = 123 + self.mock_nb_device.name = "test-device" + self.mock_nb_device.status.label = "Active" + self.mock_nb_device.custom_fields = {"zabbix_hostid": None} + self.mock_nb_device.config_context = {} + + # Set up a primary IP + primary_ip = MagicMock() + primary_ip.address = "192.168.1.1/24" + self.mock_nb_device.primary_ip = primary_ip + + # Create mock Zabbix API + self.mock_zabbix = MagicMock() + self.mock_zabbix.version = "6.0" + + # Mock NetBox journal class + self.mock_nb_journal = MagicMock() + + # Create logger mock + self.mock_logger = MagicMock() + + # Create PhysicalDevice instance with mocks + with patch('modules.device.config', + {"device_cf": "zabbix_hostid", + "template_cf": "zabbix_template", + "templates_config_context": False, + "templates_config_context_overrule": False, + "traverse_regions": False, + "traverse_site_groups": False, + "inventory_mode": "disabled", + "inventory_sync": False, + "device_inventory_map": {} + }): + self.device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + journal=True, + logger=self.mock_logger + ) + + def test_init(self): + """Test the initialization of the PhysicalDevice class.""" + # Check that basic properties are set correctly + self.assertEqual(self.device.name, "test-device") + self.assertEqual(self.device.id, 123) + self.assertEqual(self.device.status, "Active") + self.assertEqual(self.device.ip, "192.168.1.1") + self.assertEqual(self.device.cidr, "192.168.1.1/24") + + def test_init_no_primary_ip(self): + """Test initialization when device has no primary IP.""" + # Set primary_ip to None + self.mock_nb_device.primary_ip = None + + # Creating device should raise SyncInventoryError + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + with self.assertRaises(SyncInventoryError): + PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + def test_set_basics_with_special_characters(self): + """Test _setBasics when device name contains special characters.""" + # Set name with special characters that + # will actually trigger the special character detection + self.mock_nb_device.name = "test-devïce" + + # We need to patch the search function to simulate finding special characters + with patch('modules.device.search') as mock_search, \ + patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + # Make the search function return True to simulate special characters + mock_search.return_value = True + + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # With the mocked search function, the name should be changed to NETBOX_ID format + self.assertEqual(device.name, f"NETBOX_ID{self.mock_nb_device.id}") + # And visible_name should be set to the original name + self.assertEqual(device.visible_name, "test-devïce") + # use_visible_name flag should be set + self.assertTrue(device.use_visible_name) + + def test_get_templates_context(self): + """Test get_templates_context with valid config.""" + # Set up config_context with valid template data + self.mock_nb_device.config_context = { + "zabbix": { + "templates": ["Template1", "Template2"] + } + } + + # Create device with the updated mock + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Test that templates are returned correctly + templates = device.get_templates_context() + self.assertEqual(templates, ["Template1", "Template2"]) + + def test_get_templates_context_with_string(self): + """Test get_templates_context with a string instead of list.""" + # Set up config_context with a string template + self.mock_nb_device.config_context = { + "zabbix": { + "templates": "Template1" + } + } + + # Create device with the updated mock + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Test that template is wrapped in a list + templates = device.get_templates_context() + self.assertEqual(templates, ["Template1"]) + + def test_get_templates_context_no_zabbix_key(self): + """Test get_templates_context when zabbix key is missing.""" + # Set up config_context without zabbix key + self.mock_nb_device.config_context = {} + + # Create device with the updated mock + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Test that TemplateError is raised + with self.assertRaises(TemplateError): + device.get_templates_context() + + def test_get_templates_context_no_templates_key(self): + """Test get_templates_context when templates key is missing.""" + # Set up config_context without templates key + self.mock_nb_device.config_context = {"zabbix": {}} + + # Create device with the updated mock + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Test that TemplateError is raised + with self.assertRaises(TemplateError): + device.get_templates_context() + + def test_set_template_with_config_context(self): + """Test set_template with templates_config_context=True.""" + # Set up config_context with templates + self.mock_nb_device.config_context = { + "zabbix": { + "templates": ["Template1"] + } + } + + # Mock get_templates_context to return expected templates + with patch.object(PhysicalDevice, 'get_templates_context', return_value=["Template1"]): + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Call set_template with prefer_config_context=True + result = device.set_template(prefer_config_context=True, overrule_custom=False) + + # Check result and template names + self.assertTrue(result) + self.assertEqual(device.zbx_template_names, ["Template1"]) + + def test_set_inventory_disabled_mode(self): + """Test set_inventory with inventory_mode=disabled.""" + # Configure with disabled inventory mode + config_patch = { + "device_cf": "zabbix_hostid", + "inventory_mode": "disabled", + "inventory_sync": False + } + + with patch('modules.device.config', config_patch): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Call set_inventory with the config patch still active + with patch('modules.device.config', config_patch): + result = device.set_inventory({}) + + # Check result + self.assertTrue(result) + # Default value for disabled inventory + self.assertEqual(device.inventory_mode, -1) + + def test_set_inventory_manual_mode(self): + """Test set_inventory with inventory_mode=manual.""" + # Configure with manual inventory mode + config_patch = { + "device_cf": "zabbix_hostid", + "inventory_mode": "manual", + "inventory_sync": False + } + + with patch('modules.device.config', config_patch): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Call set_inventory with the config patch still active + with patch('modules.device.config', config_patch): + result = device.set_inventory({}) + + # Check result + self.assertTrue(result) + self.assertEqual(device.inventory_mode, 0) # Manual mode + + def test_set_inventory_automatic_mode(self): + """Test set_inventory with inventory_mode=automatic.""" + # Configure with automatic inventory mode + config_patch = { + "device_cf": "zabbix_hostid", + "inventory_mode": "automatic", + "inventory_sync": False + } + + with patch('modules.device.config', config_patch): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Call set_inventory with the config patch still active + with patch('modules.device.config', config_patch): + result = device.set_inventory({}) + + # Check result + self.assertTrue(result) + self.assertEqual(device.inventory_mode, 1) # Automatic mode + + def test_set_inventory_with_inventory_sync(self): + """Test set_inventory with inventory_sync=True.""" + # Configure with inventory sync enabled + config_patch = { + "device_cf": "zabbix_hostid", + "inventory_mode": "manual", + "inventory_sync": True, + "device_inventory_map": { + "name": "name", + "serial": "serialno_a" + } + } + + with patch('modules.device.config', config_patch): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Create a mock device with the required attributes + mock_device_data = { + "name": "test-device", + "serial": "ABC123" + } + + # Call set_inventory with the config patch still active + with patch('modules.device.config', config_patch): + result = device.set_inventory(mock_device_data) + + # Check result + self.assertTrue(result) + self.assertEqual(device.inventory_mode, 0) # Manual mode + self.assertEqual(device.inventory, { + "name": "test-device", + "serialno_a": "ABC123" + }) + + def test_iscluster_true(self): + """Test isCluster when device is part of a cluster.""" + # Set up virtual_chassis + self.mock_nb_device.virtual_chassis = MagicMock() + + # Create device with the updated mock + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Check isCluster result + self.assertTrue(device.isCluster()) + + def test_is_cluster_false(self): + """Test isCluster when device is not part of a cluster.""" + # Set virtual_chassis to None + self.mock_nb_device.virtual_chassis = None + + # Create device with the updated mock + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Check isCluster result + self.assertFalse(device.isCluster()) + + + def test_promote_master_device_primary(self): + """Test promoteMasterDevice when device is primary in cluster.""" + # Set up virtual chassis with master device + mock_vc = MagicMock() + mock_vc.name = "virtual-chassis-1" + mock_master = MagicMock() + mock_master.id = self.mock_nb_device.id # Set master ID to match the current device + mock_vc.master = mock_master + self.mock_nb_device.virtual_chassis = mock_vc + + # Create device with the updated mock + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Call promoteMasterDevice and check the result + result = device.promoteMasterDevice() + + # Should return True for primary device + self.assertTrue(result) + # Device name should be updated to virtual chassis name + self.assertEqual(device.name, "virtual-chassis-1") + + + def test_promote_master_device_secondary(self): + """Test promoteMasterDevice when device is secondary in cluster.""" + # Set up virtual chassis with a different master device + mock_vc = MagicMock() + mock_vc.name = "virtual-chassis-1" + mock_master = MagicMock() + mock_master.id = self.mock_nb_device.id + 1 # Different ID than the current device + mock_vc.master = mock_master + self.mock_nb_device.virtual_chassis = mock_vc + + # Create device with the updated mock + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Call promoteMasterDevice and check the result + result = device.promoteMasterDevice() + + # Should return False for secondary device + self.assertFalse(result) + # Device name should not be modified + self.assertEqual(device.name, "test-device") diff --git a/tests/test_tools.py b/tests/test_tools.py new file mode 100644 index 0000000..3e6ae24 --- /dev/null +++ b/tests/test_tools.py @@ -0,0 +1,62 @@ +from modules.tools import sanatize_log_output + +def test_sanatize_log_output_secrets(): + data = { + "macros": [ + {"macro": "{$SECRET}", "type": "1", "value": "supersecret"}, + {"macro": "{$PLAIN}", "type": "0", "value": "notsecret"}, + ] + } + sanitized = sanatize_log_output(data) + assert sanitized["macros"][0]["value"] == "********" + assert sanitized["macros"][1]["value"] == "notsecret" + +def test_sanatize_log_output_interface_secrets(): + data = { + "interfaceid": 123, + "details": { + "authpassphrase": "supersecret", + "privpassphrase": "anothersecret", + "securityname": "sensitiveuser", + "community": "public", + "other": "normalvalue" + } + } + sanitized = sanatize_log_output(data) + # Sensitive fields should be sanitized + assert sanitized["details"]["authpassphrase"] == "********" + assert sanitized["details"]["privpassphrase"] == "********" + assert sanitized["details"]["securityname"] == "********" + # Non-sensitive fields should remain + assert sanitized["details"]["community"] == "********" + assert sanitized["details"]["other"] == "normalvalue" + # interfaceid should be removed + assert "interfaceid" not in sanitized + +def test_sanatize_log_output_interface_macros(): + data = { + "interfaceid": 123, + "details": { + "authpassphrase": "{$SECRET_MACRO}", + "privpassphrase": "{$SECRET_MACRO}", + "securityname": "{$USER_MACRO}", + "community": "{$SNNMP_COMMUNITY}", + } + } + sanitized = sanatize_log_output(data) + # Macro values should not be sanitized + assert sanitized["details"]["authpassphrase"] == "{$SECRET_MACRO}" + assert sanitized["details"]["privpassphrase"] == "{$SECRET_MACRO}" + assert sanitized["details"]["securityname"] == "{$USER_MACRO}" + assert sanitized["details"]["community"] == "{$SNNMP_COMMUNITY}" + assert "interfaceid" not in sanitized + +def test_sanatize_log_output_plain_data(): + data = {"foo": "bar", "baz": 123} + sanitized = sanatize_log_output(data) + assert sanitized == data + +def test_sanatize_log_output_non_dict(): + data = [1, 2, 3] + sanitized = sanatize_log_output(data) + assert sanitized == data diff --git a/tests/test_usermacros.py b/tests/test_usermacros.py new file mode 100644 index 0000000..28305af --- /dev/null +++ b/tests/test_usermacros.py @@ -0,0 +1,125 @@ +import unittest +from unittest.mock import MagicMock, patch +from modules.device import PhysicalDevice +from modules.usermacros import ZabbixUsermacros + +class DummyNB: + def __init__(self, name="dummy", config_context=None, **kwargs): + self.name = name + self.config_context = config_context or {} + for k, v in kwargs.items(): + setattr(self, k, v) + + def __getitem__(self, key): + # Allow dict-style access for test compatibility + if hasattr(self, key): + return getattr(self, key) + if key in self.config_context: + return self.config_context[key] + raise KeyError(key) + +class TestUsermacroSync(unittest.TestCase): + def setUp(self): + self.nb = DummyNB(serial="1234") + self.logger = MagicMock() + self.usermacro_map = {"serial": "{$HW_SERIAL}"} + + @patch("modules.device.config", {"usermacro_sync": False}) + def test_usermacro_sync_false(self): + device = PhysicalDevice.__new__(PhysicalDevice) + device.nb = self.nb + device.logger = self.logger + device.name = "dummy" + device._usermacro_map = MagicMock(return_value=self.usermacro_map) + # call set_usermacros + result = device.set_usermacros() + self.assertEqual(device.usermacros, []) + self.assertTrue(result is True or result is None) + + @patch("modules.device.config", {"usermacro_sync": True}) + def test_usermacro_sync_true(self): + device = PhysicalDevice.__new__(PhysicalDevice) + device.nb = self.nb + device.logger = self.logger + device.name = "dummy" + device._usermacro_map = MagicMock(return_value=self.usermacro_map) + result = device.set_usermacros() + self.assertIsInstance(device.usermacros, list) + self.assertGreater(len(device.usermacros), 0) + + @patch("modules.device.config", {"usermacro_sync": "full"}) + def test_usermacro_sync_full(self): + device = PhysicalDevice.__new__(PhysicalDevice) + device.nb = self.nb + device.logger = self.logger + device.name = "dummy" + device._usermacro_map = MagicMock(return_value=self.usermacro_map) + result = device.set_usermacros() + self.assertIsInstance(device.usermacros, list) + self.assertGreater(len(device.usermacros), 0) + +class TestZabbixUsermacros(unittest.TestCase): + def setUp(self): + self.nb = DummyNB() + self.logger = MagicMock() + + def test_validate_macro_valid(self): + macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger) + self.assertTrue(macros.validate_macro("{$TEST_MACRO}")) + self.assertTrue(macros.validate_macro("{$A1_2.3}")) + self.assertTrue(macros.validate_macro("{$FOO:bar}")) + + def test_validate_macro_invalid(self): + macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger) + self.assertFalse(macros.validate_macro("$TEST_MACRO")) + self.assertFalse(macros.validate_macro("{TEST_MACRO}")) + self.assertFalse(macros.validate_macro("{$test}")) # lower-case not allowed + self.assertFalse(macros.validate_macro("")) + + def test_render_macro_dict(self): + macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger) + macro = macros.render_macro("{$FOO}", {"value": "bar", "type": "secret", "description": "desc"}) + self.assertEqual(macro["macro"], "{$FOO}") + self.assertEqual(macro["value"], "bar") + self.assertEqual(macro["type"], "1") + self.assertEqual(macro["description"], "desc") + + def test_render_macro_dict_missing_value(self): + macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger) + result = macros.render_macro("{$FOO}", {"type": "text"}) + self.assertFalse(result) + self.logger.warning.assert_called() + + def test_render_macro_str(self): + macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger) + macro = macros.render_macro("{$FOO}", "bar") + self.assertEqual(macro["macro"], "{$FOO}") + self.assertEqual(macro["value"], "bar") + self.assertEqual(macro["type"], "0") + self.assertEqual(macro["description"], "") + + def test_render_macro_invalid_name(self): + macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger) + result = macros.render_macro("FOO", "bar") + self.assertFalse(result) + self.logger.error.assert_called() + + def test_generate_from_map(self): + nb = DummyNB(memory="bar", role="baz") + usermacro_map = {"memory": "{$FOO}", "role": "{$BAR}"} + macros = ZabbixUsermacros(nb, usermacro_map, True, logger=self.logger) + result = macros.generate() + self.assertEqual(len(result), 2) + self.assertEqual(result[0]["macro"], "{$FOO}") + self.assertEqual(result[1]["macro"], "{$BAR}") + + def test_generate_from_config_context(self): + config_context = {"zabbix": {"usermacros": {"{$FOO}": {"value": "bar"}}}} + nb = DummyNB(config_context=config_context) + macros = ZabbixUsermacros(nb, {}, True, logger=self.logger) + result = macros.generate() + self.assertEqual(len(result), 1) + self.assertEqual(result[0]["macro"], "{$FOO}") + +if __name__ == "__main__": + unittest.main()