diff --git a/.github/workflows/publish-image.yml b/.github/workflows/publish-image.yml index e6d07fe..4531303 100644 --- a/.github/workflows/publish-image.yml +++ b/.github/workflows/publish-image.yml @@ -1,43 +1,57 @@ -name: Publish Docker image to GHCR on a new version +name: Build and Push Docker Image on: push: branches: - main +permissions: + contents: read + packages: write -env: - REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }} +on: + release: + types: [published] + pull_request: + types: [opened, synchronize] jobs: test_quality: uses: ./.github/workflows/quality.yml - build_and_publish: + build: runs-on: ubuntu-latest steps: - - name: Checkout sources - uses: actions/checkout@v4 - - name: Log in to the container registry - uses: docker/login-action@v3 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GHCR_PAT }} - - name: Extract metadata (tags, labels) - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - tags: | - type=semver,pattern={{ version }} - type=ref,event=branch - type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'master') }} - type=sha - - name: Build and push Docker image - uses: docker/build-push-action@v5 - with: - context: . - push: true - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 + + - name: Login to GitHub Container Registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata + id: meta + uses: docker/metadata-action@369eb591f429131d6889c46b94e711f089e6ca96 + with: + images: ghcr.io/${{ github.repository }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + + - name: Build and push Docker image + uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 + with: + context: . + file: ./Dockerfile + push: true + platforms: linux/amd64,linux/arm64 + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + annotations: | + index:org.opencontainers.image.description=Python script to synchronise NetBox devices to Zabbix. diff --git a/.gitignore b/.gitignore index 27761cd..0693f26 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,8 @@ *.log .venv -/config.py +config.py +Pipfile +Pipfile.lock # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/Dockerfile b/Dockerfile index fa8d9c4..198dbe5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,13 @@ # syntax=docker/dockerfile:1 FROM python:3.12-alpine -RUN mkdir -p /opt/netbox-zabbix -COPY . /opt/netbox-zabbix +RUN mkdir -p /opt/netbox-zabbix && chown -R 1000:1000 /opt/netbox-zabbix + +USER 1000:1000 +COPY --chown=1000:1000 . /opt/netbox-zabbix WORKDIR /opt/netbox-zabbix RUN if ! [ -f ./config.py ]; then cp ./config.py.example ./config.py; fi +USER root RUN pip install -r ./requirements.txt +USER 1000:1000 ENTRYPOINT ["python"] CMD ["/opt/netbox-zabbix/netbox_zabbix_sync.py", "-v"] diff --git a/README.md b/README.md index b482e7d..62a6673 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ Currently compatible with Zabbix 7.0. Zabbix 7.2 is unfortunately not supported To pull the latest stable version to your local cache, use the following docker pull command: -```sh +```bash docker pull ghcr.io/thenetworkguy/netbox-zabbix-sync:main ``` @@ -16,7 +16,7 @@ Make sure to specify the needed environment variables for the script to work (see [here](#set-environment-variables)) on the command line or use an [env file](https://docs.docker.com/reference/cli/docker/container/run/#env). -```sh +```bash docker run -d -t -i -e ZABBIX_HOST='https://zabbix.local' \ -e ZABBIX_TOKEN='othersecrettoken' \ -e NETBOX_HOST='https://netbox.local' \ @@ -31,7 +31,7 @@ The image uses the default `config.py` for it's configuration, you can use a volume mount in the docker run command to override with your own config file if needed (see [config file](#config-file)): -```sh +```bash docker run -d -t -i -v $(pwd)/config.py:/opt/netbox-zabbix/config.py ... ``` @@ -39,7 +39,7 @@ docker run -d -t -i -v $(pwd)/config.py:/opt/netbox-zabbix/config.py ... ### Cloning the repository -```sh +```bash git clone https://github.com/TheNetworkGuy/netbox-zabbix-sync.git ``` @@ -73,19 +73,19 @@ cp config.py.example config.py Set the following environment variables: -```sh -export ZABBIX_HOST="https://zabbix.local" -export ZABBIX_USER="username" -export ZABBIX_PASS="Password" -export NETBOX_HOST="https://netbox.local" -export NETBOX_TOKEN="secrettoken" +```bash +ZABBIX_HOST="https://zabbix.local" +ZABBIX_USER="username" +ZABBIX_PASS="Password" +NETBOX_HOST="https://netbox.local" +NETBOX_TOKEN="secrettoken" ``` Or, you can use a Zabbix API token to login instead of using a username and password. In that case `ZABBIX_USER` and `ZABBIX_PASS` will be ignored. -```sh -export ZABBIX_TOKEN=othersecrettoken +```bash +ZABBIX_TOKEN=othersecrettoken ``` If you are using custom SSL certificates for NetBox and/or Zabbix, you can set @@ -190,9 +190,9 @@ used: | cluster | VM cluster name | | cluster_type | VM cluster type | -You can specify the value sperated by a "/" like so: +You can specify the value seperated by a "/" like so: -``` +```python hostgroup_format = "tenant/site/dev_location/role" ``` @@ -239,7 +239,7 @@ have a relationship with a tenant. - Device_role: PDU - Site: HQ-AMS -``` +```python hostgroup_format = "site/tenant/device_role" ``` @@ -252,7 +252,7 @@ generated for both hosts: The same logic applies to custom fields being used in the HG format: -``` +```python hostgroup_format = "site/mycustomfieldname" ``` @@ -299,15 +299,18 @@ You can set the inventory mode to "disabled", "manual" or "automatic" with the [Zabbix Manual](https://www.zabbix.com/documentation/current/en/manual/config/hosts/inventory#building-inventory) for more information about the modes. -Use the `inventory_map` variable to map which NetBox properties are used in +Use the `device_inventory_map` variable to map which NetBox properties are used in which Zabbix Inventory fields. For nested properties, you can use the '/' seperator. For example, the following map will assign the custom field 'mycustomfield' to the 'alias' Zabbix inventory field: -``` +For Virtual Machines, use `vm_inventory_map`. + +```python inventory_sync = True inventory_mode = "manual" -inventory_map = { "custom_fields/mycustomfield/name": "alias"} +device_inventory_map = {"custom_fields/mycustomfield/name": "alias"} +vm_inventory_map = {"custom_fields/mycustomfield/name": "alias"} ``` See `config.py.example` for an extensive example map. Any Zabix Inventory fields @@ -328,14 +331,14 @@ sticking to the custom field. You can change the behaviour in the config file. By default this setting is false but you can set it to true to use config context: -``` +```python templates_config_context = True ``` After that make sure that for each host there is at least one template defined in the config context in this format: -``` +```json { "zabbix": { "templates": [ @@ -353,10 +356,196 @@ added benefit of overwriting the template should a device in NetBox have a device specific context defined. In this case the device specific context template(s) will take priority over the device type custom field template. -``` +```python templates_config_context_overrule = True ``` +### Tags + +This script can sync host tags to your Zabbix hosts for use in filtering, +SLA calculations and event correlation. + +Tags can be synced from the following sources: + +1. NetBox device/vm tags +2. NetBox config ontext +3. NetBox fields + +Syncing tags will override any tags that were set manually on the host, +making NetBox the single source-of-truth for managing tags. + +To enable syncing, turn on tag_sync in the config file. +By default, this script will modify tag names and tag values to lowercase. +You can change this behaviour by setting tag_lower to False. + +```python +tag_sync = True +tag_lower = True +``` + +#### Device tags + +As NetBox doesn't follow the tag/value pattern for tags, we will need a tag +name set to register the netwbox tags. + +By default the tag name is "NetBox", but you can change this to whatever you want. +The value for the tag can be choosen from 'name', 'display' or 'slug'. + +```python +tag_name = 'NetBox' +tag_value = 'name' +``` + +#### Config context + +You can supply custom tags via config context by adding the following: + +```json +{ + "zabbix": { + "tags": [ + { + "MyTagName": "MyTagValue" + }, + { + "environment": "production" + } + ], + } +} +``` + +This will allow you to assign tags based on the config context rules. + +#### NetBox Field + +NetBox field can also be used as input for tags, just like inventory and usermacros. +To enable syncing from fields, make sure to configure a `device_tag_map` and/or a `vm_tag_map`. + +```python +device_tag_map = {"site/name": "site", + "rack/name": "rack", + "platform/name": "target"} + +vm_tag_map = {"site/name": "site", + "cluster/name": "cluster", + "platform/name": "target"} +``` + +To turn off field syncing, set the maps to empty dictionaries: + +```python +device_tag_map = {} +vm_tag_map = {} +``` + + +### Usermacros + +You can choose to use NetBox as a source for Host usermacros by +enabling the following option in the configuration file: + +```python +usermacro_sync = True +``` + +Please be advised that enabling this option will _clear_ any usermacros +manually set on the managed hosts and override them with the usermacros +from NetBox. + +There are two NetBox sources that can be used to populate usermacros: + +1. NetBox config context +2. NetBox fields + +#### Config context + +By defining a dictionary `usermacros` within the `zabbix` key in +config context, you can dynamically assign usermacro values based on +anything that you can target based on +[config contexts](https://netboxlabs.com/docs/netbox/en/stable/features/context-data/) +within NetBox. + +Through this method, it is possible to define the following types of usermacros: + +1. Text +2. Secret +3. Vault + +The default macro type is text if no `type` and `value` have been set. +It is also possible to create usermacros with +[context](https://www.zabbix.com/documentation/7.0/en/manual/config/macros/user_macros_context). + +Examples: + +```json +{ + "zabbix": { + "usermacros": { + "{$USER_MACRO}": "test value", + "{$CONTEXT_MACRO:\"test\"}": "test value", + "{$CONTEXT_REGEX_MACRO:regex:\".*\"}": "test value", + "{$SECRET_MACRO}": { + "type": "secret", + "value": "PaSsPhRaSe" + }, + "{$VAULT_MACRO}": { + "type": "vault", + "value": "secret/vmware:password" + }, + "{$USER_MACRO2}": { + "type": "text", + "value": "another test value" + } + } + } +} + +``` + +Please be aware that secret usermacros are only synced _once_ by default. +This is the default behaviour because Zabbix API won't return the value of +secrets so the script cannot compare the values with the ones set in NetBox. + +If you update a secret usermacro value, just remove the value from the host +in Zabbix and the new value will be synced during the next run. + +Alternatively, you can set the following option in the config file: + +```python +usermacro_sync = "full" +``` + +This will force a full usermacro sync on every run on hosts that have secret usermacros set. +That way, you will know for sure the secret values are always up to date. + +Keep in mind that NetBox (and the log output of this script) will show your secrets +in plain text. If true secrecy is required, consider switching to +[vault](https://www.zabbix.com/documentation/current/en/manual/config/macros/secret_macros#vault-secret) +usermacros. + +#### Netbox Fields + +To use NetBox fields as a source for usermacros, you will need to set up usermacro maps +for devices and/or virtual machines in the configuration file. +This method only supports `text` type usermacros. + +For example: + +```python +usermacro_sync = True +device_usermacro_map = {"serial": "{$HW_SERIAL}", + "role/name": "{$DEV_ROLE}", + "url": "{$NB_URL}", + "id": "{$NB_ID}"} +vm_usermacro_map = {"memory": "{$TOTAL_MEMORY}", + "role/name": "{$DEV_ROLE}", + "url": "{$NB_URL}", + "id": "{$NB_ID}"} +``` + + + ## Permissions ### NetBox @@ -393,9 +582,11 @@ python3 netbox_zabbix_sync.py ### Flags -| Flag | Option | Description | -| ---- | ------- | ---------------------- | -| -v | verbose | Log with debugging on. | +| Flag | Option | Description | +| ---- | --------- | ------------------------------------- | +| -v | verbose | Log with info on. | +| -vv | debug | Log with debugging on. | +| -vvv | debug-all | Log with debugging on for all modules | ## Config context @@ -525,9 +716,13 @@ environment. For example, you could: } ``` -I would recommend using macros for sensitive data such as community strings +I would recommend using usermacros for sensitive data such as community strings since the data in NetBox is plain-text. > **_NOTE:_** Not all SNMP data is required for a working configuration. > [The following parameters are allowed](https://www.zabbix.com/documentation/current/manual/api/reference/hostinterface/object#details_tag "The following parameters are allowed")but > are not all required, depending on your environment. + + + + diff --git a/config.py.example b/config.py.example index 1d83223..e4082e6 100644 --- a/config.py.example +++ b/config.py.example @@ -80,19 +80,74 @@ inventory_sync = False # For nested properties, you can use the '/' seperator. # For example, the following map will assign the custom field 'mycustomfield' to the 'alias' Zabbix inventory field: # -# inventory_map = { "custom_fields/mycustomfield/name": "alias"} +# device_inventory_map = { "custom_fields/mycustomfield/name": "alias"} # -# The following map should provide some nice defaults: -inventory_map = { "asset_tag": "asset_tag", - "virtual_chassis/name": "chassis", - "status/label": "deployment_status", - "location/name": "location", - "latitude": "location_lat", - "longitude": "location_lon", - "comments": "notes", - "name": "name", - "rack/name": "site_rack", - "serial": "serialno_a", - "device_type/model": "type", - "device_type/manufacturer/name": "vendor", - "oob_ip/address": "oob_ip" } +# The following maps should provide some nice defaults: +device_inventory_map = { "asset_tag": "asset_tag", + "virtual_chassis/name": "chassis", + "status/label": "deployment_status", + "location/name": "location", + "latitude": "location_lat", + "longitude": "location_lon", + "comments": "notes", + "name": "name", + "rack/name": "site_rack", + "serial": "serialno_a", + "device_type/model": "type", + "device_type/manufacturer/name": "vendor", + "oob_ip/address": "oob_ip" } + +# We also support inventory mapping on Virtual Machines. +vm_inventory_map = { "status/label": "deployment_status", + "comments": "notes", + "name": "name" } + +# To allow syncing of usermacros from NetBox, set to True. +# this will enable both field mapping and config context usermacros. +# +# If set to "full", it will force the update of secret usermacros every run. +# Please see the README.md for more information. +usermacro_sync = False + +# device usermacro_map to map NetBox fields to usermacros. +device_usermacro_map = {"serial": "{$HW_SERIAL}", + "role/name": "{$DEV_ROLE}", + "url": "{$NB_URL}", + "id": "{$NB_ID}"} + +# virtual machine usermacro_map to map NetBox fields to usermacros. +vm_usermacro_map = {"memory": "{$TOTAL_MEMORY}", + "role/name": "{$DEV_ROLE}", + "url": "{$NB_URL}", + "id": "{$NB_ID}"} + +# To sync host tags to Zabbix, set to True. +tag_sync = False + +# Setting tag_lower to True will lower capital letters ain tag names and values +# This is more inline with the Zabbix way of working with tags. +# +# You can however set this to False to ensure capital letters are synced to Zabbix tags. +tag_lower = True + +# We can sync NetBox device/VM tags to Zabbix, but as NetBox tags don't follow the key/value +# pattern, we need to specify a tag name to register the NetBox tags in Zabbix. +# +# +# +# If tag_name is set to False, we won't sync NetBox device/VM tags to Zabbix. +tag_name = 'NetBox' + +# We can choose to use 'name', 'slug' or 'display' NetBox tag properties as a value in Zabbix. +# 'name'is used by default. +tag_value = "name" + +# device tag_map to map NetBox fields to host tags. +device_tag_map = {"site/name": "site", + "rack/name": "rack", + "platform/name": "target"} + +# Virtual machine tag_map to map NetBox fields to host tags. +vm_tag_map = {"site/name": "site", + "cluster/name": "cluster", + "platform/name": "target"} diff --git a/modules/device.py b/modules/device.py index aa15a06..8bea73d 100644 --- a/modules/device.py +++ b/modules/device.py @@ -1,28 +1,40 @@ -#!/usr/bin/env python3 -# pylint: disable=invalid-name, logging-not-lazy, too-many-locals, logging-fstring-interpolation, too-many-lines +# pylint: disable=invalid-name, logging-not-lazy, too-many-locals, logging-fstring-interpolation, too-many-lines, too-many-public-methods, duplicate-code """ Device specific handeling for NetBox to Zabbix """ -from re import search + +from copy import deepcopy from logging import getLogger +from re import search + from zabbix_utils import APIRequestError -from modules.exceptions import (SyncInventoryError, TemplateError, SyncExternalError, - InterfaceConfigError, JournalError) -from modules.interface import ZabbixInterface + +from modules.exceptions import ( + InterfaceConfigError, + JournalError, + SyncExternalError, + SyncInventoryError, + TemplateError, +) from modules.hostgroups import Hostgroup +from modules.interface import ZabbixInterface +from modules.tags import ZabbixTags +from modules.tools import field_mapper, remove_duplicates +from modules.usermacros import ZabbixUsermacros from modules.config import load_config config = load_config() - -class PhysicalDevice(): +class PhysicalDevice: # pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments """ Represents Network device. INPUT: (NetBox device class, ZabbixAPI class, journal flag, NB journal class) """ - def __init__(self, nb, zabbix, nb_journal_class, nb_version, journal=None, logger=None): + def __init__( + self, nb, zabbix, nb_journal_class, nb_version, journal=None, logger=None + ): self.nb = nb self.id = nb.id self.name = nb.name @@ -43,6 +55,8 @@ class PhysicalDevice(): self.nb_journals = nb_journal_class self.inventory_mode = -1 self.inventory = {} + self.usermacros = {} + self.tags = {} self.logger = logger if logger else getLogger(__name__) self._setBasics() @@ -52,6 +66,18 @@ class PhysicalDevice(): def __str__(self): return self.__repr__() + def _inventory_map(self): + """Use device inventory maps""" + return config["device_inventory_map"] + + def _usermacro_map(self): + """Use device inventory maps""" + return config["device_usermacro_map"] + + def _tag_map(self): + """Use device host tag maps""" + return config["device_tag_map"] + def _setBasics(self): """ Sets basic information like IP address. @@ -62,7 +88,7 @@ class PhysicalDevice(): self.ip = self.cidr.split("/")[0] else: e = f"Host {self.name}: no primary IP." - self.logger.info(e) + self.logger.warning(e) raise SyncInventoryError(e) # Check if device has custom field for ZBX ID @@ -76,30 +102,38 @@ class PhysicalDevice(): # Validate hostname format. odd_character_list = ["ä", "ö", "ü", "Ä", "Ö", "Ü", "ß"] self.use_visible_name = False - if (any(letter in self.name for letter in odd_character_list) or - bool(search('[\u0400-\u04FF]', self.name))): + if any(letter in self.name for letter in odd_character_list) or bool( + search("[\u0400-\u04ff]", self.name) + ): self.name = f"NETBOX_ID{self.id}" self.visible_name = self.nb.name self.use_visible_name = True - self.logger.info(f"Host {self.visible_name} contains special characters. " - f"Using {self.name} as name for the NetBox object " - f"and using {self.visible_name} as visible name in Zabbix.") + self.logger.info( + f"Host {self.visible_name} contains special characters. " + f"Using {self.name} as name for the NetBox object " + f"and using {self.visible_name} as visible name in Zabbix." + ) else: pass def set_hostgroup(self, hg_format, nb_site_groups, nb_regions): """Set the hostgroup for this device""" # Create new Hostgroup instance - hg = Hostgroup("dev", self.nb, self.nb_api_version, logger=self.logger, - nested_sitegroup_flag=config["traverse_site_groups"], - nested_region_flag=config["traverse_regions"], - nb_groups=nb_site_groups, - nb_regions=nb_regions) + hg = Hostgroup( + "dev", + self.nb, + self.nb_api_version, + logger=self.logger, + nested_sitegroup_flag=traverse_site_groups, + nested_region_flag=traverse_regions, + nb_groups=nb_site_groups, + nb_regions=nb_regions, + ) # Generate hostgroup based on hostgroup format self.hostgroup = hg.generate(hg_format) def set_template(self, prefer_config_context, overrule_custom): - """ Set Template """ + """Set Template""" self.zbx_template_names = None # Gather templates ONLY from the device specific context if prefer_config_context: @@ -123,7 +157,7 @@ class PhysicalDevice(): return True def get_templates_cf(self): - """ Get template from custom field """ + """Get template from custom field""" # Get Zabbix templates from the device type device_type_cfs = self.nb.device_type.custom_fields # Check if the ZBX Template CF is present @@ -131,22 +165,29 @@ class PhysicalDevice(): # Set value to template return [device_type_cfs[config["template_cf"]]] # Custom field not found, return error - e = (f'Custom field {config["template_cf"]} not ' - f"found for {self.nb.device_type.manufacturer.name}" - f" - {self.nb.device_type.display}.") + e = ( + f"Custom field {template_cf} not " + f"found for {self.nb.device_type.manufacturer.name}" + f" - {self.nb.device_type.display}." + ) + self.logger.warning(e) raise TemplateError(e) def get_templates_context(self): - """ Get Zabbix templates from the device context """ + """Get Zabbix templates from the device context""" if "zabbix" not in self.config_context: - e = (f"Host {self.name}: Key 'zabbix' not found in config " - "context for template lookup") + e = ( + f"Host {self.name}: Key 'zabbix' not found in config " + "context for template lookup" + ) raise TemplateError(e) if "templates" not in self.config_context["zabbix"]: - e = (f"Host {self.name}: Key 'templates' not found in config " - "context 'zabbix' for template lookup") + e = ( + f"Host {self.name}: Key 'templates' not found in config " + "context 'zabbix' for template lookup" + ) raise TemplateError(e) # Check if format is list or string. if isinstance(self.config_context["zabbix"]["templates"], str): @@ -154,7 +195,7 @@ class PhysicalDevice(): return self.config_context["zabbix"]["templates"] def set_inventory(self, nbdevice): - """ Set host inventory """ + """Set host inventory""" # Set inventory mode. Default is disabled (see class init function). if config["inventory_mode"] == "disabled": if config["inventory_sync"]: @@ -167,37 +208,17 @@ class PhysicalDevice(): elif config["inventory_mode"] == "automatic": self.inventory_mode = 1 else: - self.logger.error(f"Host {self.name}: Specified value for inventory mode in " - f'config is not valid. Got value {config["inventory_mode"]}') + self.logger.error( + f"Host {self.name}: Specified value for inventory mode in" + f" config is not valid. Got value {config['inventory_mode']}" + ) return False self.inventory = {} if config["inventory_sync"] and self.inventory_mode in [0, 1]: self.logger.debug(f"Host {self.name}: Starting inventory mapper") - # Let's build an inventory dict for each property in the inventory_map - for nb_inv_field, zbx_inv_field in config["inventory_map"].items(): - field_list = nb_inv_field.split("/") # convert str to list based on delimiter - # start at the base of the dict... - value = nbdevice - # ... and step through the dict till we find the needed value - for item in field_list: - value = value[item] if value else None - # Check if the result is usable and expected - # We want to apply any int or float 0 values, - # even if python thinks those are empty. - if ((value and isinstance(value, int | float | str)) or - (isinstance(value, int | float) and int(value) == 0)): - self.inventory[zbx_inv_field] = str(value) - elif not value: - # empty value should just be an empty string for API compatibility - self.logger.debug(f"Host {self.name}: NetBox inventory lookup for " - f"'{nb_inv_field}' returned an empty value") - self.inventory[zbx_inv_field] = "" - else: - # Value is not a string or numeral, probably not what the user expected. - self.logger.error(f"Host {self.name}: Inventory lookup for '{nb_inv_field}'" - " returned an unexpected type: it will be skipped.") - self.logger.debug(f"Host {self.name}: Inventory mapping complete. " - f"Mapped {len(list(filter(None, self.inventory.values())))} field(s)") + self.inventory = field_mapper( + self.name, self._inventory_map(), nbdevice, self.logger + ) return True def isCluster(self): @@ -211,13 +232,17 @@ class PhysicalDevice(): Returns chassis master ID. """ if not self.isCluster(): - e = (f"Unable to proces {self.name} for cluster calculation: " - f"not part of a cluster.") + e = ( + f"Unable to proces {self.name} for cluster calculation: " + f"not part of a cluster." + ) self.logger.warning(e) raise SyncInventoryError(e) if not self.nb.virtual_chassis.master: - e = (f"{self.name} is part of a NetBox virtual chassis which does " - "not have a master configured. Skipping for this reason.") + e = ( + f"{self.name} is part of a NetBox virtual chassis which does " + "not have a master configured. Skipping for this reason." + ) self.logger.error(e) raise SyncInventoryError(e) return self.nb.virtual_chassis.master.id @@ -230,9 +255,11 @@ class PhysicalDevice(): """ masterid = self.getClusterMaster() if masterid == self.id: - self.logger.debug(f"Host {self.name} is primary cluster member. " - f"Modifying hostname from {self.name} to " + - f"{self.nb.virtual_chassis.name}.") + self.logger.debug( + f"Host {self.name} is primary cluster member. " + f"Modifying hostname from {self.name} to " + + f"{self.nb.virtual_chassis.name}." + ) self.name = self.nb.virtual_chassis.name return True self.logger.debug(f"Host {self.name} is non-primary cluster member.") @@ -257,18 +284,24 @@ class PhysicalDevice(): # Go through all templates found in Zabbix for zbx_template in templates: # If the template names match - if zbx_template['name'] == nb_template: + if zbx_template["name"] == nb_template: # Set match variable to true, add template details # to class variable and return debug log template_match = True - self.zbx_templates.append({"templateid": zbx_template['templateid'], - "name": zbx_template['name']}) + self.zbx_templates.append( + { + "templateid": zbx_template["templateid"], + "name": zbx_template["name"], + } + ) e = f"Host {self.name}: found template {zbx_template['name']}" self.logger.debug(e) # Return error should the template not be found in Zabbix if not template_match: - e = (f"Unable to find template {nb_template} " - f"for host {self.name} in Zabbix. Skipping host...") + e = ( + f"Unable to find template {nb_template} " + f"for host {self.name} in Zabbix. Skipping host..." + ) self.logger.warning(e) raise SyncInventoryError(e) @@ -280,8 +313,8 @@ class PhysicalDevice(): """ # Go through all groups for group in groups: - if group['name'] == self.hostgroup: - self.group_id = group['groupid'] + if group["name"] == self.hostgroup: + self.group_id = group["groupid"] e = f"Host {self.name}: matched group {group['name']}" self.logger.debug(e) return True @@ -295,10 +328,13 @@ class PhysicalDevice(): if self.zabbix_id: try: # Check if the Zabbix host exists in Zabbix - zbx_host = bool(self.zabbix.host.get(filter={'hostid': self.zabbix_id}, - output=[])) - e = (f"Host {self.name}: was already deleted from Zabbix." - " Removed link in NetBox.") + zbx_host = bool( + self.zabbix.host.get(filter={"hostid": self.zabbix_id}, output=[]) + ) + e = ( + f"Host {self.name}: was already deleted from Zabbix." + " Removed link in NetBox." + ) if zbx_host: # Delete host should it exists self.zabbix.host.delete(self.zabbix_id) @@ -323,9 +359,9 @@ class PhysicalDevice(): """ # Validate the hostname or visible name field if not self.use_visible_name: - zbx_filter = {'host': self.name} + zbx_filter = {"host": self.name} else: - zbx_filter = {'name': self.visible_name} + zbx_filter = {"name": self.visible_name} host = self.zabbix.host.get(filter=zbx_filter, output=[]) return bool(host) @@ -351,6 +387,43 @@ class PhysicalDevice(): self.logger.warning(message) raise SyncInventoryError(message) from e + def set_usermacros(self): + """ + Generates Usermacros + """ + macros = ZabbixUsermacros( + self.nb, + self._usermacro_map(), + usermacro_sync, + logger=self.logger, + host=self.name, + ) + if macros.sync is False: + self.usermacros = [] + + self.usermacros = macros.generate() + return True + + def set_tags(self): + """ + Generates Host Tags + """ + tags = ZabbixTags( + self.nb, + self._tag_map(), + tag_sync, + tag_lower, + tag_name=tag_name, + tag_value=tag_value, + logger=self.logger, + host=self.name, + ) + if tags.sync is False: + self.tags = [] + + self.tags = tags.generate() + return True + def setProxy(self, proxy_list): """ Sets proxy or proxy group if this @@ -361,14 +434,16 @@ class PhysicalDevice(): # check if the key Zabbix is defined in the config context if "zabbix" not in self.nb.config_context: return False - if ("proxy" in self.nb.config_context["zabbix"] and - not self.nb.config_context["zabbix"]["proxy"]): + if ( + "proxy" in self.nb.config_context["zabbix"] + and not self.nb.config_context["zabbix"]["proxy"] + ): return False # Proxy group takes priority over a proxy due # to it being HA and therefore being more reliable # Includes proxy group fix since Zabbix <= 6 should ignore this proxy_types = ["proxy"] - if str(self.zabbix.version).startswith('7'): + if str(self.zabbix.version).startswith("7"): # Only insert groups in front of list for Zabbix7 proxy_types.insert(0, "proxy_group") for proxy_type in proxy_types: @@ -382,15 +457,23 @@ class PhysicalDevice(): continue # If the proxy name matches if proxy["name"] == proxy_name: - self.logger.debug(f"Host {self.name}: using {proxy['type']}" - f" {proxy_name}") + self.logger.debug( + f"Host {self.name}: using {proxy['type']}" f" {proxy_name}" + ) self.zbxproxy = proxy return True - self.logger.warning(f"Host {self.name}: unable to find proxy {proxy_name}") + self.logger.warning( + f"Host {self.name}: unable to find proxy {proxy_name}" + ) return False - def createInZabbix(self, groups, templates, proxies, - description="Host added by NetBox sync script."): + def createInZabbix( + self, + groups, + templates, + proxies, + description="Host added by NetBox sync script.", + ): """ Creates Zabbix host object with parameters from NetBox object. """ @@ -398,14 +481,16 @@ class PhysicalDevice(): if not self._zabbixHostnameExists(): # Set group and template ID's for host if not self.setZabbixGroupID(groups): - e = (f"Unable to find group '{self.hostgroup}' " - f"for host {self.name} in Zabbix.") + e = ( + f"Unable to find group '{self.hostgroup}' " + f"for host {self.name} in Zabbix." + ) self.logger.warning(e) raise SyncInventoryError(e) self.zbxTemplatePrepper(templates) templateids = [] for template in self.zbx_templates: - templateids.append({'templateid': template['templateid']}) + templateids.append({"templateid": template["templateid"]}) # Set interface, group and template configuration interfaces = self.setInterfaceDetails() groups = [{"groupid": self.group_id}] @@ -421,13 +506,15 @@ class PhysicalDevice(): "templates": templateids, "description": description, "inventory_mode": self.inventory_mode, - "inventory": self.inventory + "inventory": self.inventory, + "macros": self.usermacros, + "tags": self.tags, } # If a Zabbix proxy or Zabbix Proxy group has been defined if self.zbxproxy: # If a lower version than 7 is used, we can assume that # the proxy is a normal proxy and not a proxy group - if not str(self.zabbix.version).startswith('7'): + if not str(self.zabbix.version).startswith("7"): create_data["proxy_hostid"] = self.zbxproxy["id"] else: # Configure either a proxy or proxy group @@ -438,9 +525,9 @@ class PhysicalDevice(): host = self.zabbix.host.create(**create_data) self.zabbix_id = host["hostids"][0] except APIRequestError as e: - e = f"Host {self.name}: Couldn't create. Zabbix returned {str(e)}." - self.logger.error(e) - raise SyncExternalError(e) from None + msg = f"Host {self.name}: Couldn't create. Zabbix returned {str(e)}." + self.logger.error(msg) + raise SyncExternalError(msg) from e # Set NetBox custom field to hostID value. self.nb.custom_fields[config["device_cf"]] = int(self.zabbix_id) self.nb.save() @@ -448,8 +535,9 @@ class PhysicalDevice(): self.logger.info(msg) self.create_journal_entry("success", msg) else: - e = f"Host {self.name}: Unable to add to Zabbix. Host already present." - self.logger.warning(e) + self.logger.error( + f"Host {self.name}: Unable to add to Zabbix. Host already present." + ) def createZabbixHostgroup(self, hostgroups): """ @@ -458,8 +546,8 @@ class PhysicalDevice(): """ final_data = [] # Check if the hostgroup is in a nested format and check each parent - for pos in range(len(self.hostgroup.split('/'))): - zabbix_hg = self.hostgroup.rsplit('/', pos)[0] + for pos in range(len(self.hostgroup.split("/"))): + zabbix_hg = self.hostgroup.rsplit("/", pos)[0] if self.lookupZabbixHostgroup(hostgroups, zabbix_hg): # Hostgroup already exists continue @@ -470,7 +558,9 @@ class PhysicalDevice(): e = f"Hostgroup '{zabbix_hg}': created in Zabbix." self.logger.info(e) # Add group to final data - final_data.append({'groupid': groupid["groupids"][0], 'name': zabbix_hg}) + final_data.append( + {"groupid": groupid["groupids"][0], "name": zabbix_hg} + ) except APIRequestError as e: msg = f"Hostgroup '{zabbix_hg}': unable to create. Zabbix returned {str(e)}." self.logger.error(msg) @@ -497,20 +587,24 @@ class PhysicalDevice(): try: self.zabbix.host.update(hostid=self.zabbix_id, **kwargs) except APIRequestError as e: - e = (f"Host {self.name}: Unable to update. " - f"Zabbix returned the following error: {str(e)}.") + e = ( + f"Host {self.name}: Unable to update. " + f"Zabbix returned the following error: {str(e)}." + ) self.logger.error(e) raise SyncExternalError(e) from None self.logger.info(f"Updated host {self.name} with data {kwargs}.") self.create_journal_entry("info", "Updated host in Zabbix with latest NB data.") - def ConsistencyCheck(self, groups, templates, proxies, proxy_power, create_hostgroups): + def ConsistencyCheck( + self, groups, templates, proxies, proxy_power, create_hostgroups + ): # pylint: disable=too-many-branches, too-many-statements """ Checks if Zabbix object is still valid with NetBox parameters. """ # If group is found or if the hostgroup is nested - if not self.setZabbixGroupID(groups) or len(self.hostgroup.split('/')) > 1: + if not self.setZabbixGroupID(groups) or len(self.hostgroup.split("/")) > 1: if create_hostgroups: # Script is allowed to create a new hostgroup new_groups = self.createZabbixHostgroup(groups) @@ -521,47 +615,59 @@ class PhysicalDevice(): if not self.group_id: # Function returns true / false but also sets GroupID if not self.setZabbixGroupID(groups) and not create_hostgroups: - e = (f"Host {self.name}: different hostgroup is required but " - "unable to create hostgroup without generation permission.") + e = ( + f"Host {self.name}: different hostgroup is required but " + "unable to create hostgroup without generation permission." + ) self.logger.warning(e) raise SyncInventoryError(e) # Prepare templates and proxy config self.zbxTemplatePrepper(templates) self.setProxy(proxies) # Get host object from Zabbix - host = self.zabbix.host.get(filter={'hostid': self.zabbix_id}, - selectInterfaces=['type', 'ip', - 'port', 'details', - 'interfaceid'], - selectGroups=["groupid"], - selectHostGroups=["groupid"], - selectParentTemplates=["templateid"], - selectInventory=list(config["inventory_map"].values())) + host = self.zabbix.host.get( + filter={"hostid": self.zabbix_id}, + selectInterfaces=["type", "ip", "port", "details", "interfaceid"], + selectGroups=["groupid"], + selectHostGroups=["groupid"], + selectParentTemplates=["templateid"], + selectInventory=list(self._inventory_map().values()), + selectMacros=["macro", "value", "type", "description"], + selectTags=["tag", "value"], + ) if len(host) > 1: - e = (f"Got {len(host)} results for Zabbix hosts " - f"with ID {self.zabbix_id} - hostname {self.name}.") + e = ( + f"Got {len(host)} results for Zabbix hosts " + f"with ID {self.zabbix_id} - hostname {self.name}." + ) self.logger.error(e) raise SyncInventoryError(e) if len(host) == 0: - e = (f"Host {self.name}: No Zabbix host found. " - f"This is likely the result of a deleted Zabbix host " - f"without zeroing the ID field in NetBox.") + e = ( + f"Host {self.name}: No Zabbix host found. " + f"This is likely the result of a deleted Zabbix host " + f"without zeroing the ID field in NetBox." + ) self.logger.error(e) raise SyncInventoryError(e) host = host[0] if host["host"] == self.name: self.logger.debug(f"Host {self.name}: hostname in-sync.") else: - self.logger.warning(f"Host {self.name}: hostname OUT of sync. " - f"Received value: {host['host']}") + self.logger.warning( + f"Host {self.name}: hostname OUT of sync. " + f"Received value: {host['host']}" + ) self.updateZabbixHost(host=self.name) # Execute check depending on wether the name is special or not if self.use_visible_name: if host["name"] == self.visible_name: self.logger.debug(f"Host {self.name}: visible name in-sync.") else: - self.logger.warning(f"Host {self.name}: visible name OUT of sync." - f" Received value: {host['name']}") + self.logger.warning( + f"Host {self.name}: visible name OUT of sync." + f" Received value: {host['name']}" + ) self.updateZabbixHost(name=self.visible_name) # Check if the templates are in-sync @@ -570,24 +676,24 @@ class PhysicalDevice(): # Prepare Templates for API parsing templateids = [] for template in self.zbx_templates: - templateids.append({'templateid': template['templateid']}) + templateids.append({"templateid": template["templateid"]}) # Update Zabbix with NB templates and clear any old / lost templates - self.updateZabbixHost(templates_clear=host["parentTemplates"], - templates=templateids) + self.updateZabbixHost( + templates_clear=host["parentTemplates"], templates=templateids + ) else: self.logger.debug(f"Host {self.name}: template(s) in-sync.") # Check if Zabbix version is 6 or higher. Issue #93 group_dictname = "hostgroups" - if str(self.zabbix.version).startswith(('6', '5')): + if str(self.zabbix.version).startswith(("6", "5")): group_dictname = "groups" for group in host[group_dictname]: if group["groupid"] == self.group_id: self.logger.debug(f"Host {self.name}: hostgroup in-sync.") break - else: self.logger.warning(f"Host {self.name}: hostgroup OUT of sync.") - self.updateZabbixHost(groups={'groupid': self.group_id}) + self.updateZabbixHost(groups={"groupid": self.group_id}) if int(host["status"]) == self.zabbix_state: self.logger.debug(f"Host {self.name}: status in-sync.") @@ -607,13 +713,15 @@ class PhysicalDevice(): else: self.logger.warning(f"Host {self.name}: proxy OUT of sync.") # Zabbix <= 6 patch - if not str(self.zabbix.version).startswith('7'): - self.updateZabbixHost(proxy_hostid=self.zbxproxy['id']) + if not str(self.zabbix.version).startswith("7"): + self.updateZabbixHost(proxy_hostid=self.zbxproxy["id"]) # Zabbix 7+ else: # Prepare data structure for updating either proxy or group - update_data = {self.zbxproxy["idtype"]: self.zbxproxy["id"], - "monitored_by": self.zbxproxy['monitored_by']} + update_data = { + self.zbxproxy["idtype"]: self.zbxproxy["id"], + "monitored_by": self.zbxproxy["monitored_by"], + } self.updateZabbixHost(**update_data) else: # No proxy is defined in NetBox @@ -625,8 +733,10 @@ class PhysicalDevice(): proxy_set = True if proxy_power and proxy_set: # Zabbix <= 6 fix - self.logger.warning(f"Host {self.name}: no proxy is configured in NetBox " - "but is configured in Zabbix. Removing proxy config in Zabbix") + self.logger.warning( + f"Host {self.name}: no proxy is configured in NetBox " + "but is configured in Zabbix. Removing proxy config in Zabbix" + ) if "proxy_hostid" in host and bool(host["proxy_hostid"]): self.updateZabbixHost(proxy_hostid=0) # Zabbix 7 proxy @@ -638,29 +748,60 @@ class PhysicalDevice(): # Checks if a proxy has been defined in Zabbix and if proxy_power config has been set if proxy_set and not proxy_power: # Display error message - self.logger.error(f"Host {self.name} is configured " - f"with proxy in Zabbix but not in NetBox. The" - " -p flag was ommited: no " - "changes have been made.") + self.logger.error( + f"Host {self.name} is configured " + f"with proxy in Zabbix but not in NetBox. The" + " -p flag was ommited: no " + "changes have been made." + ) if not proxy_set: self.logger.debug(f"Host {self.name}: proxy in-sync.") # Check host inventory mode - if str(host['inventory_mode']) == str(self.inventory_mode): + if str(host["inventory_mode"]) == str(self.inventory_mode): self.logger.debug(f"Host {self.name}: inventory_mode in-sync.") else: self.logger.warning(f"Host {self.name}: inventory_mode OUT of sync.") self.updateZabbixHost(inventory_mode=str(self.inventory_mode)) if config["inventory_sync"] and self.inventory_mode in [0, 1]: # Check host inventory mapping - if host['inventory'] == self.inventory: + if host["inventory"] == self.inventory: self.logger.debug(f"Host {self.name}: inventory in-sync.") else: self.logger.warning(f"Host {self.name}: inventory OUT of sync.") self.updateZabbixHost(inventory=self.inventory) + # Check host usermacros + if usermacro_sync: + macros_filtered = [] + # Do not re-sync secret usermacros unless sync is set to 'full' + if str(usermacro_sync).lower() != "full": + for m in + + + + (self.usermacros): + if m["type"] == str(1): + # Remove the value as the api doesn't return it + # this will allow us to only update usermacros that don't exist + m.pop("value") + macros_filtered.append(m) + if host["macros"] == self.usermacros or host["macros"] == macros_filtered: + self.logger.debug(f"Host {self.name}: usermacros in-sync.") + else: + self.logger.warning(f"Host {self.name}: usermacros OUT of sync.") + self.updateZabbixHost(macros=self.usermacros) + + # Check host usermacros + if tag_sync: + if remove_duplicates(host["tags"], sortkey="tag") == self.tags: + self.logger.debug(f"Host {self.name}: tags in-sync.") + else: + self.logger.warning(f"Host {self.name}: tags OUT of sync.") + self.updateZabbixHost(tags=self.tags) + # If only 1 interface has been found # pylint: disable=too-many-nested-blocks - if len(host['interfaces']) == 1: + if len(host["interfaces"]) == 1: updates = {} # Go through each key / item and check if it matches Zabbix for key, item in self.setInterfaceDetails()[0].items(): @@ -696,12 +837,14 @@ class PhysicalDevice(): self.logger.warning(f"Host {self.name}: Interface OUT of sync.") if "type" in updates: # Changing interface type not supported. Raise exception. - e = (f"Host {self.name}: changing interface type to " - f"{str(updates['type'])} is not supported.") + e = ( + f"Host {self.name}: changing interface type to " + f"{str(updates['type'])} is not supported." + ) self.logger.error(e) raise InterfaceConfigError(e) # Set interfaceID for Zabbix config - updates["interfaceid"] = host["interfaces"][0]['interfaceid'] + updates["interfaceid"] = host["interfaces"][0]["interfaceid"] try: # API call to Zabbix self.zabbix.hostinterface.update(updates) @@ -717,9 +860,11 @@ class PhysicalDevice(): e = f"Host {self.name}: interface in-sync." self.logger.debug(e) else: - e = (f"Host {self.name} has unsupported interface configuration." - f" Host has total of {len(host['interfaces'])} interfaces. " - "Manual interfention required.") + e = ( + f"Host {self.name} has unsupported interface configuration." + f" Host has total of {len(host['interfaces'])} interfaces. " + "Manual interfention required." + ) self.logger.error(e) raise SyncInventoryError(e) @@ -731,20 +876,25 @@ class PhysicalDevice(): if self.journal: # Check if the severity is valid if severity not in ["info", "success", "warning", "danger"]: - self.logger.warning(f"Value {severity} not valid for NB journal entries.") + self.logger.warning( + f"Value {severity} not valid for NB journal entries." + ) return False - journal = {"assigned_object_type": "dcim.device", - "assigned_object_id": self.id, - "kind": severity, - "comments": message - } + journal = { + "assigned_object_type": "dcim.device", + "assigned_object_id": self.id, + "kind": severity, + "comments": message, + } try: self.nb_journals.create(journal) self.logger.debug(f"Host {self.name}: Created journal entry in NetBox") return True except JournalError(e) as e: - self.logger.warning("Unable to create journal entry for " - f"{self.name}: NB returned {e}") + self.logger.warning( + "Unable to create journal entry for " + f"{self.name}: NB returned {e}" + ) return False return False @@ -767,10 +917,15 @@ class PhysicalDevice(): # and add this NB template to the list of successfull templates tmpls_from_zabbix.pop(pos) succesfull_templates.append(nb_tmpl) - self.logger.debug(f"Host {self.name}: template " - f"{nb_tmpl['name']} is present in Zabbix.") + self.logger.debug( + f"Host {self.name}: template " + f"{nb_tmpl['name']} is present in Zabbix." + ) break - if len(succesfull_templates) == len(self.zbx_templates) and len(tmpls_from_zabbix) == 0: + if ( + len(succesfull_templates) == len(self.zbx_templates) + and len(tmpls_from_zabbix) == 0 + ): # All of the NetBox templates have been confirmed as successfull # and the ZBX template list is empty. This means that # all of the templates match. diff --git a/modules/exceptions.py b/modules/exceptions.py index 856433a..ddac2b0 100644 --- a/modules/exceptions.py +++ b/modules/exceptions.py @@ -2,32 +2,47 @@ """ All custom exceptions used for Exception generation """ + + class SyncError(Exception): - """ Class SyncError """ + """Class SyncError""" + class JournalError(Exception): - """ Class SyncError """ + """Class SyncError""" + class SyncExternalError(SyncError): - """ Class SyncExternalError """ + """Class SyncExternalError""" + class SyncInventoryError(SyncError): - """ Class SyncInventoryError """ + """Class SyncInventoryError""" + class SyncDuplicateError(SyncError): - """ Class SyncDuplicateError """ + """Class SyncDuplicateError""" + class EnvironmentVarError(SyncError): - """ Class EnvironmentVarError """ + """Class EnvironmentVarError""" + class InterfaceConfigError(SyncError): - """ Class InterfaceConfigError """ + """Class InterfaceConfigError""" + class ProxyConfigError(SyncError): - """ Class ProxyConfigError """ + """Class ProxyConfigError""" + class HostgroupError(SyncError): - """ Class HostgroupError """ + """Class HostgroupError""" + class TemplateError(SyncError): - """ Class TemplateError """ + """Class TemplateError""" + + +class UsermacroError(SyncError): + """Class UsermacroError""" diff --git a/modules/hostgroups.py b/modules/hostgroups.py index 6e2db75..d1350bd 100644 --- a/modules/hostgroups.py +++ b/modules/hostgroups.py @@ -1,14 +1,27 @@ """Module for all hostgroup related code""" + from logging import getLogger + from modules.exceptions import HostgroupError from modules.tools import build_path -class Hostgroup(): + +class Hostgroup: """Hostgroup class for devices and VM's Takes type (vm or dev) and NB object""" - def __init__(self, obj_type, nb_obj, version, logger=None, #pylint: disable=too-many-arguments, too-many-positional-arguments - nested_sitegroup_flag=False, nested_region_flag=False, - nb_regions=None, nb_groups=None): + + # pylint: disable=too-many-arguments, disable=too-many-positional-arguments + def __init__( + self, + obj_type, + nb_obj, + version, + logger=None, + nested_sitegroup_flag=False, + nested_region_flag=False, + nb_regions=None, + nb_groups=None, + ): self.logger = logger if logger else getLogger(__name__) if obj_type not in ("vm", "dev"): msg = f"Unable to create hostgroup with type {type}" @@ -19,8 +32,9 @@ class Hostgroup(): self.name = self.nb.name self.nb_version = version # Used for nested data objects - self.set_nesting(nested_sitegroup_flag, nested_region_flag, - nb_groups, nb_regions) + self.set_nesting( + nested_sitegroup_flag, nested_region_flag, nb_groups, nb_regions + ) self._set_format_options() def __str__(self): @@ -49,20 +63,28 @@ class Hostgroup(): format_options["site_group"] = None if self.nb.site: if self.nb.site.region: - format_options["region"] = self.generate_parents("region", - str(self.nb.site.region)) + format_options["region"] = self.generate_parents( + "region", str(self.nb.site.region) + ) if self.nb.site.group: - format_options["site_group"] = self.generate_parents("site_group", - str(self.nb.site.group)) + format_options["site_group"] = self.generate_parents( + "site_group", str(self.nb.site.group) + ) format_options["role"] = role format_options["site"] = self.nb.site.name if self.nb.site else None format_options["tenant"] = str(self.nb.tenant) if self.nb.tenant else None - format_options["tenant_group"] = str(self.nb.tenant.group) if self.nb.tenant else None - format_options["platform"] = self.nb.platform.name if self.nb.platform else None + format_options["tenant_group"] = ( + str(self.nb.tenant.group) if self.nb.tenant else None + ) + format_options["platform"] = ( + self.nb.platform.name if self.nb.platform else None + ) # Variables only applicable for devices if self.type == "dev": format_options["manufacturer"] = self.nb.device_type.manufacturer.name - format_options["location"] = str(self.nb.location) if self.nb.location else None + format_options["location"] = ( + str(self.nb.location) if self.nb.location else None + ) # Variables only applicable for VM's if self.type == "vm": # Check if a cluster is configured. Could also be configured in a site. @@ -72,17 +94,22 @@ class Hostgroup(): self.format_options = format_options - def set_nesting(self, nested_sitegroup_flag, nested_region_flag, - nb_groups, nb_regions): + def set_nesting( + self, nested_sitegroup_flag, nested_region_flag, nb_groups, nb_regions + ): """Set nesting options for this Hostgroup""" - self.nested_objects = {"site_group": {"flag": nested_sitegroup_flag, "data": nb_groups}, - "region": {"flag": nested_region_flag, "data": nb_regions}} + self.nested_objects = { + "site_group": {"flag": nested_sitegroup_flag, "data": nb_groups}, + "region": {"flag": nested_region_flag, "data": nb_regions}, + } def generate(self, hg_format=None): """Generate hostgroup based on a provided format""" # Set format to default in case its not specified if not hg_format: - hg_format = "site/manufacturer/role" if self.type == "dev" else "cluster/role" + hg_format = ( + "site/manufacturer/role" if self.type == "dev" else "cluster/role" + ) # Split all given names hg_output = [] hg_items = hg_format.split("/") @@ -93,8 +120,10 @@ class Hostgroup(): cf_data = self.custom_field_lookup(hg_item) # CF does not exist if not cf_data["result"]: - msg = (f"Unable to generate hostgroup for host {self.name}. " - f"Item type {hg_item} not supported.") + msg = ( + f"Unable to generate hostgroup for host {self.name}. " + f"Item type {hg_item} not supported." + ) self.logger.error(msg) raise HostgroupError(msg) # CF data is populated @@ -109,10 +138,12 @@ class Hostgroup(): # Check if the hostgroup is populated with at least one item. if bool(hg_output): return "/".join(hg_output) - msg = (f"Unable to generate hostgroup for host {self.name}." - " Not enough valid items. This is most likely" - " due to the use of custom fields that are empty" - " or an invalid hostgroup format.") + msg = ( + f"Unable to generate hostgroup for host {self.name}." + " Not enough valid items. This is most likely" + " due to the use of custom fields that are empty" + " or an invalid hostgroup format." + ) self.logger.error(msg) raise HostgroupError(msg) @@ -157,7 +188,9 @@ class Hostgroup(): return child_object # If the nested flag is True, perform parent calculation if self.nested_objects[nest_type]["flag"]: - final_nested_object = build_path(child_object, self.nested_objects[nest_type]["data"]) + final_nested_object = build_path( + child_object, self.nested_objects[nest_type]["data"] + ) return "/".join(final_nested_object) # Nesting is not allowed for this object. Return child_object return child_object diff --git a/modules/interface.py b/modules/interface.py index e4413c6..1bd1e37 100644 --- a/modules/interface.py +++ b/modules/interface.py @@ -4,7 +4,8 @@ All of the Zabbix interface related configuration """ from modules.exceptions import InterfaceConfigError -class ZabbixInterface(): + +class ZabbixInterface: """Class that represents a Zabbix interface.""" def __init__(self, context, ip): @@ -15,21 +16,16 @@ class ZabbixInterface(): def _set_default_port(self): """Sets default TCP / UDP port for different interface types""" - interface_mapping = { - 1: 10050, - 2: 161, - 3: 623, - 4: 12345 - } + interface_mapping = {1: 10050, 2: 161, 3: 623, 4: 12345} # Check if interface type is listed in mapper. - if self.interface['type'] not in interface_mapping: + if self.interface["type"] not in interface_mapping: return False # Set default port to interface - self.interface["port"] = str(interface_mapping[self.interface['type']]) + self.interface["port"] = str(interface_mapping[self.interface["type"]]) return True def get_context(self): - """ check if NetBox custom context has been defined. """ + """check if NetBox custom context has been defined.""" if "zabbix" in self.context: zabbix = self.context["zabbix"] if "interface_type" in zabbix: @@ -43,7 +39,7 @@ class ZabbixInterface(): return False def set_snmp(self): - """ Check if interface is type SNMP """ + """Check if interface is type SNMP""" # pylint: disable=too-many-branches if self.interface["type"] == 2: # Checks if SNMP settings are defined in NetBox @@ -63,7 +59,7 @@ class ZabbixInterface(): e = "SNMP version option is not defined." raise InterfaceConfigError(e) # If version 1 or 2 is used, get community string - if self.interface["details"]["version"] in ['1','2']: + if self.interface["details"]["version"] in ["1", "2"]: if "community" in snmp: # Set SNMP community to confix context value community = snmp["community"] @@ -73,10 +69,16 @@ class ZabbixInterface(): self.interface["details"]["community"] = str(community) # If version 3 has been used, get all # SNMPv3 NetBox related configs - elif self.interface["details"]["version"] == '3': - items = ["securityname", "securitylevel", "authpassphrase", - "privpassphrase", "authprotocol", "privprotocol", - "contextname"] + elif self.interface["details"]["version"] == "3": + items = [ + "securityname", + "securitylevel", + "authpassphrase", + "privpassphrase", + "authprotocol", + "privprotocol", + "contextname", + ] for key, item in snmp.items(): if key in items: self.interface["details"][key] = str(item) @@ -91,13 +93,15 @@ class ZabbixInterface(): raise InterfaceConfigError(e) def set_default_snmp(self): - """ Set default config to SNMPv2, port 161 and community macro. """ + """Set default config to SNMPv2, port 161 and community macro.""" self.interface = self.skelet self.interface["type"] = "2" self.interface["port"] = "161" - self.interface["details"] = {"version": "2", - "community": "{$SNMP_COMMUNITY}", - "bulk": "1"} + self.interface["details"] = { + "version": "2", + "community": "{$SNMP_COMMUNITY}", + "bulk": "1", + } def set_default_agent(self): """Sets interface to Zabbix agent defaults""" diff --git a/modules/logging.py b/modules/logging.py new file mode 100644 index 0000000..c36c2c1 --- /dev/null +++ b/modules/logging.py @@ -0,0 +1,40 @@ +""" +Logging module for Netbox-Zabbix-sync +""" + +import logging +from os import path + +logger = logging.getLogger("NetBox-Zabbix-sync") + + +def get_logger(): + """ + Return the logger for Netbox Zabbix Sync + """ + return logger + + +def setup_logger(): + """ + Prepare a logger with stream and file handlers + """ + # Set logging + lgout = logging.StreamHandler() + lgfile = logging.FileHandler( + path.join(path.dirname(path.realpath(__file__)), "sync.log") + ) + + logging.basicConfig( + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", + level=logging.WARNING, + handlers=[lgout, lgfile], + ) + + +def set_log_levels(root_level, own_level): + """ + Configure log levels for root and Netbox-Zabbix-sync logger + """ + logging.getLogger().setLevel(root_level) + logger.setLevel(own_level) diff --git a/modules/tags.py b/modules/tags.py new file mode 100644 index 0000000..441ebe2 --- /dev/null +++ b/modules/tags.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments, logging-fstring-interpolation +""" +All of the Zabbix Usermacro related configuration +""" +from logging import getLogger + +from modules.tools import field_mapper, remove_duplicates + + +class ZabbixTags: + """Class that represents a Zabbix interface.""" + + def __init__( + self, + nb, + tag_map, + tag_sync, + tag_lower=True, + tag_name=None, + tag_value=None, + logger=None, + host=None, + ): + self.nb = nb + self.name = host if host else nb.name + self.tag_map = tag_map + self.logger = logger if logger else getLogger(__name__) + self.tags = {} + self.lower = tag_lower + self.tag_name = tag_name + self.tag_value = tag_value + self.tag_sync = tag_sync + self.sync = False + self._set_config() + + def __repr__(self): + return self.name + + def __str__(self): + return self.__repr__() + + def _set_config(self): + """ + Setup class + """ + if self.tag_sync: + self.sync = True + + return True + + def validate_tag(self, tag_name): + """ + Validates tag name + """ + if tag_name and isinstance(tag_name, str) and len(tag_name) <= 256: + return True + return False + + def validate_value(self, tag_value): + """ + Validates tag value + """ + if tag_value and isinstance(tag_value, str) and len(tag_value) <= 256: + return True + return False + + def render_tag(self, tag_name, tag_value): + """ + Renders a tag + """ + tag = {} + if self.validate_tag(tag_name): + if self.lower: + tag["tag"] = tag_name.lower() + else: + tag["tag"] = tag_name + else: + self.logger.warning(f"Tag {tag_name} is not a valid tag name, skipping.") + return False + + if self.validate_value(tag_value): + if self.lower: + tag["value"] = tag_value.lower() + else: + tag["value"] = tag_value + else: + self.logger.warning( + f"Tag {tag_name} has an invalid value: '{tag_value}', skipping." + ) + return False + return tag + + def generate(self): + """ + Generate full set of Usermacros + """ + # pylint: disable=too-many-branches + tags = [] + # Parse the field mapper for tags + if self.tag_map: + self.logger.debug(f"Host {self.nb.name}: Starting tag mapper") + field_tags = field_mapper(self.nb.name, self.tag_map, self.nb, self.logger) + for tag, value in field_tags.items(): + t = self.render_tag(tag, value) + if t: + tags.append(t) + + # Parse NetBox config context for tags + if ( + "zabbix" in self.nb.config_context + and "tags" in self.nb.config_context["zabbix"] + and isinstance(self.nb.config_context["zabbix"]["tags"], list) + ): + for tag in self.nb.config_context["zabbix"]["tags"]: + if isinstance(tag, dict): + for tagname, value in tag.items(): + t = self.render_tag(tagname, value) + if t: + tags.append(t) + + # Pull in NetBox device tags if tag_name is set + if self.tag_name and isinstance(self.tag_name, str): + for tag in self.nb.tags: + if self.tag_value.lower() in ["display", "name", "slug"]: + value = tag[self.tag_value] + else: + value = tag["name"] + t = self.render_tag(self.tag_name, value) + if t: + tags.append(t) + + return remove_duplicates(tags, sortkey="tag") diff --git a/modules/tools.py b/modules/tools.py index f722524..791025d 100644 --- a/modules/tools.py +++ b/modules/tools.py @@ -1,11 +1,14 @@ """A collection of tools used by several classes""" + + def convert_recordset(recordset): - """ Converts netbox RedcordSet to list of dicts. """ + """Converts netbox RedcordSet to list of dicts.""" recordlist = [] for record in recordset: recordlist.append(record.__dict__) return recordlist + def build_path(endpoint, list_of_dicts): """ Builds a path list of related parent/child items. @@ -13,16 +16,17 @@ def build_path(endpoint, list_of_dicts): be used in hostgroups. """ item_path = [] - itemlist = [i for i in list_of_dicts if i['name'] == endpoint] + itemlist = [i for i in list_of_dicts if i["name"] == endpoint] item = itemlist[0] if len(itemlist) == 1 else None - item_path.append(item['name']) - while item['_depth'] > 0: - itemlist = [i for i in list_of_dicts if i['name'] == str(item['parent'])] + item_path.append(item["name"]) + while item["_depth"] > 0: + itemlist = [i for i in list_of_dicts if i["name"] == str(item["parent"])] item = itemlist[0] if len(itemlist) == 1 else None - item_path.append(item['name']) + item_path.append(item["name"]) item_path.reverse() return item_path + def proxy_prepper(proxy_list, proxy_group_list): """ Function that takes 2 lists and converts them using a @@ -42,3 +46,56 @@ def proxy_prepper(proxy_list, proxy_group_list): group["monitored_by"] = 2 output.append(group) return output + + +def field_mapper(host, mapper, nbdevice, logger): + """ + Maps NetBox field data to Zabbix properties. + Used for Inventory, Usermacros and Tag mappings. + """ + data = {} + # Let's build an dict for each property in the map + for nb_field, zbx_field in mapper.items(): + field_list = nb_field.split("/") # convert str to list based on delimiter + # start at the base of the dict... + value = nbdevice + # ... and step through the dict till we find the needed value + for item in field_list: + value = value[item] if value else None + # Check if the result is usable and expected + # We want to apply any int or float 0 values, + # even if python thinks those are empty. + if (value and isinstance(value, int | float | str)) or ( + isinstance(value, int | float) and int(value) == 0 + ): + data[zbx_field] = str(value) + elif not value: + # empty value should just be an empty string for API compatibility + logger.debug( + f"Host {host}: NetBox lookup for " + f"'{nb_field}' returned an empty value" + ) + data[zbx_field] = "" + else: + # Value is not a string or numeral, probably not what the user expected. + logger.error( + f"Host {host}: Lookup for '{nb_field}'" + " returned an unexpected type: it will be skipped." + ) + logger.debug( + f"Host {host}: Field mapping complete. " + f"Mapped {len(list(filter(None, data.values())))} field(s)" + ) + return data + + +def remove_duplicates(input_list, sortkey=None): + """ + Removes duplicate entries from a list and sorts the list + """ + output_list = [] + if isinstance(input_list, list): + output_list = [dict(t) for t in {tuple(d.items()) for d in input_list}] + if sortkey and isinstance(sortkey, str): + output_list.sort(key=lambda x: x[sortkey]) + return output_list diff --git a/modules/usermacros.py b/modules/usermacros.py new file mode 100644 index 0000000..c1d783b --- /dev/null +++ b/modules/usermacros.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 +# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments, logging-fstring-interpolation +""" +All of the Zabbix Usermacro related configuration +""" +from logging import getLogger +from re import match + +from modules.tools import field_mapper + + +class ZabbixUsermacros: + """Class that represents a Zabbix interface.""" + + def __init__(self, nb, usermacro_map, usermacro_sync, logger=None, host=None): + self.nb = nb + self.name = host if host else nb.name + self.usermacro_map = usermacro_map + self.logger = logger if logger else getLogger(__name__) + self.usermacros = {} + self.usermacro_sync = usermacro_sync + self.sync = False + self.force_sync = False + self._set_config() + + def __repr__(self): + return self.name + + def __str__(self): + return self.__repr__() + + def _set_config(self): + """ + Setup class + """ + if str(self.usermacro_sync).lower() == "full": + self.sync = True + self.force_sync = True + elif self.usermacro_sync: + self.sync = True + return True + + def validate_macro(self, macro_name): + """ + Validates usermacro name + """ + pattern = r"\{\$[A-Z0-9\._]*(\:.*)?\}" + return match(pattern, macro_name) + + def render_macro(self, macro_name, macro_properties): + """ + Renders a full usermacro from partial input + """ + macro = {} + macrotypes = {"text": 0, "secret": 1, "vault": 2} + if self.validate_macro(macro_name): + macro["macro"] = str(macro_name) + if isinstance(macro_properties, dict): + if not "value" in macro_properties: + self.logger.warning(f"Usermacro {macro_name} has no value, skipping.") + return False + macro["value"] = macro_properties["value"] + + if ( + "type" in macro_properties + and macro_properties["type"].lower() in macrotypes + ): + macro["type"] = str(macrotypes[macro_properties["type"]]) + else: + macro["type"] = str(0) + + if "description" in macro_properties and isinstance( + macro_properties["description"], str + ): + macro["description"] = macro_properties["description"] + else: + macro["description"] = "" + + elif isinstance(macro_properties, str) and macro_properties: + macro["value"] = macro_properties + macro["type"] = str(0) + macro["description"] = "" + + else: + self.logger.warning(f"Usermacro {macro_name} has no value, skipping.") + return False + else: + self.logger.error( + f"Usermacro {macro_name} is not a valid usermacro name, skipping." + ) + return False + return macro + + def generate(self): + """ + Generate full set of Usermacros + """ + macros = [] + # Parse the field mapper for usermacros + if self.usermacro_map: + self.logger.debug(f"Host {self.nb.name}: Starting usermacro mapper") + field_macros = field_mapper( + self.nb.name, self.usermacro_map, self.nb, self.logger + ) + for macro, value in field_macros.items(): + m = self.render_macro(macro, value) + if m: + macros.append(m) + # Parse NetBox config context for usermacros + if ( + "zabbix" in self.nb.config_context + and "usermacros" in self.nb.config_context["zabbix"] + ): + for macro, properties in self.nb.config_context["zabbix"][ + "usermacros" + ].items(): + m = self.render_macro(macro, properties) + if m: + macros.append(m) + return macros diff --git a/modules/virtual_machine.py b/modules/virtual_machine.py index 80dadc0..acabd1d 100644 --- a/modules/virtual_machine.py +++ b/modules/virtual_machine.py @@ -1,9 +1,10 @@ -#!/usr/bin/env python3 # pylint: disable=duplicate-code """Module that hosts all functions for virtual machine processing""" from modules.device import PhysicalDevice +from modules.exceptions import InterfaceConfigError, SyncInventoryError, TemplateError from modules.hostgroups import Hostgroup from modules.interface import ZabbixInterface + from modules.exceptions import (TemplateError, InterfaceConfigError, SyncInventoryError) from modules.config import load_config @@ -13,24 +14,42 @@ config = load_config() class VirtualMachine(PhysicalDevice): """Model for virtual machines""" + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.hostgroup = None self.zbx_template_names = None + def _inventory_map(self): + """use VM inventory maps""" + return vm_inventory_map + + def _usermacro_map(self): + """use VM usermacro maps""" + return vm_usermacro_map + + def _tag_map(self): + """use VM tag maps""" + return vm_tag_map + def set_hostgroup(self, hg_format, nb_site_groups, nb_regions): """Set the hostgroup for this device""" # Create new Hostgroup instance - hg = Hostgroup("vm", self.nb, self.nb_api_version, logger=self.logger, - nested_sitegroup_flag=config["traverse_site_groups"], - nested_region_flag=config["traverse_regions"], - nb_groups=nb_site_groups, - nb_regions=nb_regions) + hg = Hostgroup( + "vm", + self.nb, + self.nb_api_version, + logger=self.logger, + nested_sitegroup_flag=traverse_site_groups, + nested_region_flag=traverse_regions, + nb_groups=nb_site_groups, + nb_regions=nb_regions, + ) # Generate hostgroup based on hostgroup format self.hostgroup = hg.generate(hg_format) def set_vm_template(self): - """ Set Template for VMs. Overwrites default class + """Set Template for VMs. Overwrites default class to skip a lookup of custom fields.""" # Gather templates ONLY from the device specific context try: diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 6129f92..ec14b4e 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -2,38 +2,27 @@ # pylint: disable=invalid-name, logging-not-lazy, too-many-locals, logging-fstring-interpolation """NetBox to Zabbix sync script.""" -import logging import argparse +import logging import ssl -from os import environ, path, sys +from os import environ, sys + from pynetbox import api from pynetbox.core.query import RequestError as NBRequestError from requests.exceptions import ConnectionError as RequestsConnectionError -from zabbix_utils import ZabbixAPI, APIRequestError, ProcessingError +from zabbix_utils import APIRequestError, ProcessingError, ZabbixAPI from modules.config import load_config from modules.device import PhysicalDevice -from modules.virtual_machine import VirtualMachine -from modules.tools import convert_recordset, proxy_prepper from modules.exceptions import EnvironmentVarError, HostgroupError, SyncError +from modules.logging import get_logger, set_log_levels, setup_logger +from modules.tools import convert_recordset, proxy_prepper +from modules.virtual_machine import VirtualMachine config = load_config() -# Set logging -log_format = logging.Formatter('%(asctime)s - %(name)s - ' - '%(levelname)s - %(message)s') -lgout = logging.StreamHandler() -lgout.setFormatter(log_format) -lgout.setLevel(logging.DEBUG) -lgfile = logging.FileHandler(path.join(path.dirname( - path.realpath(__file__)), "sync.log")) -lgfile.setFormatter(log_format) -lgfile.setLevel(logging.DEBUG) - -logger = logging.getLogger("NetBox-Zabbix-sync") -logger.addHandler(lgout) -logger.addHandler(lgfile) -logger.setLevel(logging.WARNING) +setup_logger() +logger = get_logger() def main(arguments): @@ -41,7 +30,14 @@ def main(arguments): # pylint: disable=too-many-branches, too-many-statements # set environment variables if arguments.verbose: - logger.setLevel(logging.DEBUG) + set_log_levels(logging.WARNING, logging.INFO) + if arguments.debug: + set_log_levels(logging.WARNING, logging.DEBUG) + if arguments.debug_all: + set_log_levels(logging.DEBUG, logging.DEBUG) + if arguments.quiet: + set_log_levels(logging.ERROR, logging.ERROR) + env_vars = ["ZABBIX_HOST", "NETBOX_HOST", "NETBOX_TOKEN"] if "ZABBIX_TOKEN" in environ: env_vars.append("ZABBIX_TOKEN") @@ -69,15 +65,26 @@ def main(arguments): netbox = api(netbox_host, token=netbox_token, threading=True) # Check if the provided Hostgroup layout is valid hg_objects = config["hostgroup_format"].split("/") - allowed_objects = ["location", "role", "manufacturer", "region", - "site", "site_group", "tenant", "tenant_group"] + allowed_objects = [ + "location", + "role", + "manufacturer", + "region", + "site", + "site_group", + "tenant", + "tenant_group", + ] # Create API call to get all custom fields which are on the device objects try: - device_cfs = list(netbox.extras.custom_fields.filter( - type="text", content_type_id=23)) + device_cfs = list( + netbox.extras.custom_fields.filter(type="text", content_type_id=23) + ) except RequestsConnectionError: - logger.error(f"Unable to connect to NetBox with URL {netbox_host}." - " Please check the URL and status of NetBox.") + logger.error( + f"Unable to connect to NetBox with URL {netbox_host}." + " Please check the URL and status of NetBox." + ) sys.exit(1) except NBRequestError as e: logger.error(f"NetBox error: {e}") @@ -86,8 +93,10 @@ def main(arguments): allowed_objects.append(cf.name) for hg_object in hg_objects: if hg_object not in allowed_objects: - e = (f"Hostgroup item {hg_object} is not valid. Make sure you" - " use valid items and seperate them with '/'.") + e = ( + f"Hostgroup item {hg_object} is not valid. Make sure you" + " use valid items and seperate them with '/'." + ) logger.error(e) raise HostgroupError(e) # Set Zabbix API @@ -99,18 +108,18 @@ def main(arguments): ssl_ctx.load_verify_locations(environ["REQUESTS_CA_BUNDLE"]) if not zabbix_token: - zabbix = ZabbixAPI(zabbix_host, user=zabbix_user, - password=zabbix_pass, ssl_context=ssl_ctx) - else: zabbix = ZabbixAPI( - zabbix_host, token=zabbix_token, ssl_context=ssl_ctx) + zabbix_host, user=zabbix_user, password=zabbix_pass, ssl_context=ssl_ctx + ) + else: + zabbix = ZabbixAPI(zabbix_host, token=zabbix_token, ssl_context=ssl_ctx) zabbix.check_auth() except (APIRequestError, ProcessingError) as e: e = f"Zabbix returned the following error: {str(e)}" logger.error(e) sys.exit(1) # Set API parameter mapping based on API version - if not str(zabbix.version).startswith('7'): + if not str(zabbix.version).startswith("7"): proxy_name = "host" else: proxy_name = "name" @@ -123,18 +132,17 @@ def main(arguments): netbox_site_groups = convert_recordset((netbox.dcim.site_groups.all())) netbox_regions = convert_recordset(netbox.dcim.regions.all()) netbox_journals = netbox.extras.journal_entries - zabbix_groups = zabbix.hostgroup.get(output=['groupid', 'name']) - zabbix_templates = zabbix.template.get(output=['templateid', 'name']) - zabbix_proxies = zabbix.proxy.get(output=['proxyid', proxy_name]) + zabbix_groups = zabbix.hostgroup.get(output=["groupid", "name"]) + zabbix_templates = zabbix.template.get(output=["templateid", "name"]) + zabbix_proxies = zabbix.proxy.get(output=["proxyid", proxy_name]) # Set empty list for proxy processing Zabbix <= 6 zabbix_proxygroups = [] - if str(zabbix.version).startswith('7'): - zabbix_proxygroups = zabbix.proxygroup.get( - output=["proxy_groupid", "name"]) + if str(zabbix.version).startswith("7"): + zabbix_proxygroups = zabbix.proxygroup.get(output=["proxy_groupid", "name"]) # Sanitize proxy data if proxy_name == "host": for proxy in zabbix_proxies: - proxy['name'] = proxy.pop('host') + proxy["name"] = proxy.pop("host") # Prepare list of all proxy and proxy_groups zabbix_proxy_list = proxy_prepper(zabbix_proxies, zabbix_proxygroups) @@ -156,27 +164,36 @@ def main(arguments): # Check if a valid hostgroup has been found for this VM. if not vm.hostgroup: continue + vm.set_inventory(nb_vm) + vm.set_usermacros() + vm.set_tags() # Checks if device is in cleanup state if vm.status in config["zabbix_device_removal"]: if vm.zabbix_id: # Delete device from Zabbix # and remove hostID from NetBox. vm.cleanup() - logger.info(f"VM {vm.name}: cleanup complete") + logger.debug(f"VM {vm.name}: cleanup complete") continue # Device has been added to NetBox # but is not in Activate state - logger.info(f"VM {vm.name}: skipping since this VM is " - f"not in the active state.") + logger.info( + f"VM {vm.name}: skipping since this VM is " + f"not in the active state." + ) continue # Check if the VM is in the disabled state if vm.status in config["zabbix_device_disable"]: vm.zabbix_state = 1 # Check if VM is already in Zabbix if vm.zabbix_id: - vm.ConsistencyCheck(zabbix_groups, zabbix_templates, - zabbix_proxy_list, config["full_proxy_sync"], - config["create_hostgroups"]) + vm.ConsistencyCheck( + zabbix_groups, + zabbix_templates, + zabbix_proxy_list, + config["full_proxy_sync"], + config["create_hostgroups"], + ) continue # Add hostgroup is config is set if config["create_hostgroups"]: @@ -187,8 +204,7 @@ def main(arguments): # Add new hostgroups to zabbix group list zabbix_groups.append(group) # Add VM to Zabbix - vm.createInZabbix(zabbix_groups, zabbix_templates, - zabbix_proxy_list) + vm.createInZabbix(zabbix_groups, zabbix_templates, zabbix_proxy_list) except SyncError: pass @@ -209,19 +225,22 @@ def main(arguments): if not device.hostgroup: continue device.set_inventory(nb_device) + device.set_usermacros() + device.set_tags() # Checks if device is part of cluster. # Requires clustering variable if device.isCluster() and config["clustering"]: # Check if device is primary or secondary if device.promoteMasterDevice(): - e = (f"Device {device.name}: is " - f"part of cluster and primary.") + e = f"Device {device.name}: is " f"part of cluster and primary." logger.info(e) else: # Device is secondary in cluster. # Don't continue with this device. - e = (f"Device {device.name}: is part of cluster " - f"but not primary. Skipping this host...") + e = ( + f"Device {device.name}: is part of cluster " + f"but not primary. Skipping this host..." + ) logger.info(e) continue # Checks if device is in cleanup state @@ -234,17 +253,23 @@ def main(arguments): continue # Device has been added to NetBox # but is not in Activate state - logger.info(f"Device {device.name}: skipping since this device is " - f"not in the active state.") + logger.info( + f"Device {device.name}: skipping since this device is " + f"not in the active state." + ) continue # Check if the device is in the disabled state if device.status in config["zabbix_device_disable"]: device.zabbix_state = 1 # Check if device is already in Zabbix if device.zabbix_id: - device.ConsistencyCheck(zabbix_groups, zabbix_templates, - zabbix_proxy_list, config["full_proxy_sync"], - config["create_hostgroups"]) + device.ConsistencyCheck( + zabbix_groups, + zabbix_templates, + zabbix_proxy_list, + config["full_proxy_sync"], + config["create_hostgroups"], + ) continue # Add hostgroup is config is set if config["create_hostgroups"]: @@ -255,17 +280,27 @@ def main(arguments): # Add new hostgroups to zabbix group list zabbix_groups.append(group) # Add device to Zabbix - device.createInZabbix(zabbix_groups, zabbix_templates, - zabbix_proxy_list) + device.createInZabbix(zabbix_groups, zabbix_templates, zabbix_proxy_list) except SyncError: pass if __name__ == "__main__": parser = argparse.ArgumentParser( - description='A script to sync Zabbix with NetBox device data.' + description="A script to sync Zabbix with NetBox device data." ) - parser.add_argument("-v", "--verbose", help="Turn on debugging.", - action="store_true") + parser.add_argument( + "-v", "--verbose", help="Turn on debugging.", action="store_true" + ) + parser.add_argument( + "-vv", "--debug", help="Turn on debugging.", action="store_true" + ) + parser.add_argument( + "-vvv", + "--debug-all", + help="Turn on debugging for all modules.", + action="store_true", + ) + parser.add_argument("-q", "--quiet", help="Turn off warnings.", action="store_true") args = parser.parse_args() main(args)