From c76e36ad3849df5c4085acf612e23b3bd2dd1d79 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Thu, 19 Dec 2024 16:26:18 +0100 Subject: [PATCH 01/93] Split inventory from the device module and started working on vm inventory support --- Pipfile | 13 +++ Pipfile.lock | 188 +++++++++++++++++++++++++++++++++++++ modules/device.py | 99 +++++++++---------- modules/inventory.py | 81 ++++++++++++++++ modules/tools.py | 1 + modules/virtual_machine.py | 7 ++ netbox_zabbix_sync.py | 2 + 7 files changed, 344 insertions(+), 47 deletions(-) create mode 100644 Pipfile create mode 100644 Pipfile.lock create mode 100644 modules/inventory.py diff --git a/Pipfile b/Pipfile new file mode 100644 index 0000000..bd0a2ba --- /dev/null +++ b/Pipfile @@ -0,0 +1,13 @@ +[[source]] +url = "https://pypi.org/simple" +verify_ssl = true +name = "pypi" + +[packages] +pynetbox = "*" +zabbix-utils = "*" + +[dev-packages] + +[requires] +python_version = "3.11" diff --git a/Pipfile.lock b/Pipfile.lock new file mode 100644 index 0000000..4be3d95 --- /dev/null +++ b/Pipfile.lock @@ -0,0 +1,188 @@ +{ + "_meta": { + "hash": { + "sha256": "6c35ac0ebf3610e4591484dfd9246af60fc4679b2d0d39193818d62961b2703c" + }, + "pipfile-spec": 6, + "requires": { + "python_version": "3.11" + }, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + } + ] + }, + "default": { + "certifi": { + "hashes": [ + "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", + "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9" + ], + "markers": "python_version >= '3.6'", + "version": "==2024.8.30" + }, + "charset-normalizer": { + "hashes": [ + "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621", + "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6", + "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8", + "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912", + "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c", + "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b", + "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d", + "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d", + "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95", + "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e", + "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565", + "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64", + "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab", + "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be", + "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e", + "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907", + "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0", + "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2", + "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62", + "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62", + "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23", + "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc", + "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284", + "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca", + "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455", + "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858", + "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b", + "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594", + "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc", + "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db", + "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b", + "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea", + "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6", + "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920", + "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749", + "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7", + "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd", + "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99", + "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242", + "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee", + "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129", + "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2", + "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51", + "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee", + "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8", + "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b", + "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613", + "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742", + "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe", + "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3", + "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5", + "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631", + "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7", + "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15", + "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c", + "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea", + "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417", + "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250", + "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88", + "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca", + "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa", + "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99", + "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149", + "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41", + "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574", + "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0", + "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f", + "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d", + "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654", + "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3", + "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19", + "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90", + "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578", + "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9", + "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1", + "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51", + "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719", + "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236", + "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a", + "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c", + "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade", + "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944", + "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc", + "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6", + "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6", + "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27", + "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6", + "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2", + "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12", + "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf", + "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114", + "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7", + "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf", + "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d", + "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b", + "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed", + "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03", + "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4", + "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67", + "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365", + "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a", + "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748", + "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b", + "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079", + "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482" + ], + "markers": "python_full_version >= '3.7.0'", + "version": "==3.4.0" + }, + "idna": { + "hashes": [ + "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", + "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3" + ], + "markers": "python_version >= '3.6'", + "version": "==3.10" + }, + "packaging": { + "hashes": [ + "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", + "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f" + ], + "markers": "python_version >= '3.8'", + "version": "==24.2" + }, + "pynetbox": { + "hashes": [ + "sha256:3f82b5964ca77a608aef6cc2fc48a3961f7667fbbdbb60646655373e3dae00c3", + "sha256:f42ce4df6ce97765df91bb4cc0c0e315683d15135265270d78f595114dd20e2b" + ], + "index": "pypi", + "version": "==7.4.1" + }, + "requests": { + "hashes": [ + "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", + "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6" + ], + "markers": "python_version >= '3.8'", + "version": "==2.32.3" + }, + "urllib3": { + "hashes": [ + "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", + "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9" + ], + "markers": "python_version >= '3.8'", + "version": "==2.2.3" + }, + "zabbix-utils": { + "hashes": [ + "sha256:1eb918096dcf1980a975ff72e4449b5d72c605f79842595dedd0f4ceba3b1225", + "sha256:3c4a98a24c101d89fd938ebe0ad6c9aaa391ac901f2afb75ae682eea88fb77af" + ], + "index": "pypi", + "version": "==2.0.2" + } + }, + "develop": {} +} diff --git a/modules/device.py b/modules/device.py index 07554d0..97206ce 100644 --- a/modules/device.py +++ b/modules/device.py @@ -11,14 +11,15 @@ from modules.exceptions import (SyncInventoryError, TemplateError, SyncExternalE InterfaceConfigError, JournalError) from modules.interface import ZabbixInterface from modules.hostgroups import Hostgroup +from modules.inventory import Inventory + try: from config import ( template_cf, device_cf, traverse_site_groups, traverse_regions, inventory_sync, - inventory_mode, - inventory_map + device_inventory_map ) except ModuleNotFoundError: print("Configuration file config.py not found in main directory." @@ -162,51 +163,55 @@ class PhysicalDevice(): return self.config_context["zabbix"]["templates"] def set_inventory(self, nbdevice): - """ Set host inventory """ - # Set inventory mode. Default is disabled (see class init function). - if inventory_mode == "disabled": - if inventory_sync: - self.logger.error(f"Host {self.name}: Unable to map NetBox inventory to Zabbix. " - "Inventory sync is enabled in config but inventory mode is disabled.") - return True - if inventory_mode == "manual": - self.inventory_mode = 0 - elif inventory_mode == "automatic": - self.inventory_mode = 1 - else: - self.logger.error(f"Host {self.name}: Specified value for inventory mode in" - f" config is not valid. Got value {inventory_mode}") - return False - self.inventory = {} - if inventory_sync and self.inventory_mode in [0,1]: - self.logger.debug(f"Host {self.name}: Starting inventory mapper") - # Let's build an inventory dict for each property in the inventory_map - for nb_inv_field, zbx_inv_field in inventory_map.items(): - field_list = nb_inv_field.split("/") # convert str to list based on delimiter - # start at the base of the dict... - value = nbdevice - # ... and step through the dict till we find the needed value - for item in field_list: - value = value[item] if value else None - # Check if the result is usable and expected - # We want to apply any int or float 0 values, - # even if python thinks those are empty. - if ((value and isinstance(value, int | float | str )) or - (isinstance(value, int | float) and int(value) ==0)): - self.inventory[zbx_inv_field] = str(value) - elif not value: - # empty value should just be an empty string for API compatibility - self.logger.debug(f"Host {self.name}: NetBox inventory lookup for " - f"'{nb_inv_field}' returned an empty value") - self.inventory[zbx_inv_field] = "" - else: - # Value is not a string or numeral, probably not what the user expected. - self.logger.error(f"Host {self.name}: Inventory lookup for '{nb_inv_field}'" - " returned an unexpected type: it will be skipped.") - self.logger.debug(f"Host {self.name}: Inventory mapping complete. " - f"Mapped {len(list(filter(None, self.inventory.values())))} field(s)") - return True + """ Set inventory """ + Inventory.set_inventory(self, nbdevice) +# def set_inventory(self, nbdevice): +# """ Set host inventory """ +# # Set inventory mode. Default is disabled (see class init function). +# if inventory_mode == "disabled": +# if inventory_sync: +# self.logger.error(f"Host {self.name}: Unable to map NetBox inventory to Zabbix. " +# "Inventory sync is enabled in config but inventory mode is disabled.") +# return True +# if inventory_mode == "manual": +# self.inventory_mode = 0 +# elif inventory_mode == "automatic": +# self.inventory_mode = 1 +# else: +# self.logger.error(f"Host {self.name}: Specified value for inventory mode in" +# f" config is not valid. Got value {inventory_mode}") +# return False +# self.inventory = {} +# if inventory_sync and self.inventory_mode in [0,1]: +# self.logger.debug(f"Host {self.name}: Starting inventory mapper") +# # Let's build an inventory dict for each property in the inventory_map +# for nb_inv_field, zbx_inv_field in inventory_map.items(): +# field_list = nb_inv_field.split("/") # convert str to list based on delimiter +# # start at the base of the dict... +# value = nbdevice +# # ... and step through the dict till we find the needed value +# for item in field_list: +# value = value[item] if value else None +# # Check if the result is usable and expected +# # We want to apply any int or float 0 values, +# # even if python thinks those are empty. +# if ((value and isinstance(value, int | float | str )) or +# (isinstance(value, int | float) and int(value) ==0)): +# self.inventory[zbx_inv_field] = str(value) +# elif not value: +# # empty value should just be an empty string for API compatibility +# self.logger.debug(f"Host {self.name}: NetBox inventory lookup for " +# f"'{nb_inv_field}' returned an empty value") +# self.inventory[zbx_inv_field] = "" +# else: +# # Value is not a string or numeral, probably not what the user expected. +# self.logger.error(f"Host {self.name}: Inventory lookup for '{nb_inv_field}'" +# " returned an unexpected type: it will be skipped.") +# self.logger.debug(f"Host {self.name}: Inventory mapping complete. " +# f"Mapped {len(list(filter(None, self.inventory.values())))} field(s)") +# return True +# def isCluster(self): """ Checks if device is part of cluster. @@ -541,7 +546,7 @@ class PhysicalDevice(): 'interfaceid'], selectGroups=["groupid"], selectParentTemplates=["templateid"], - selectInventory=list(inventory_map.values())) + selectInventory=list(device_inventory_map.values())) if len(host) > 1: e = (f"Got {len(host)} results for Zabbix hosts " f"with ID {self.zabbix_id} - hostname {self.name}.") diff --git a/modules/inventory.py b/modules/inventory.py new file mode 100644 index 0000000..7c7cd78 --- /dev/null +++ b/modules/inventory.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 +# pylint: disable=invalid-name, logging-not-lazy, too-many-locals, logging-fstring-interpolation, too-many-lines +""" +Device specific handeling for NetBox to Zabbix +""" +from pprint import pprint +from logging import getLogger +from zabbix_utils import APIRequestError +from modules.exceptions import (SyncInventoryError, TemplateError, SyncExternalError, + InterfaceConfigError, JournalError) +try: + from config import ( + inventory_sync, + inventory_mode, + device_inventory_map, + vm_inventory_map + ) +except ModuleNotFoundError: + print("Configuration file config.py not found in main directory." + "Please create the file or rename the config.py.example file to config.py.") + sys.exit(0) + +class Inventory(): + # pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments + """ + Represents Network device. + INPUT: (NetBox device class, ZabbixAPI class, journal flag, NB journal class) + """ + +# def __init__(self, nb, logger=None): +# self.nb = nb + + def set_inventory(self, nbobject): + if hasattr(nbobject, 'device_type'): + inventory_map = device_inventory_map + else: + inventory_map = vm_inventory_map + """ Set host inventory """ + # Set inventory mode. Default is disabled (see class init function). + if inventory_mode == "disabled": + if inventory_sync: + self.logger.error(f"Host {self.name}: Unable to map NetBox inventory to Zabbix. " + "Inventory sync is enabled in config but inventory mode is disabled.") + return True + if inventory_mode == "manual": + self.inventory_mode = 0 + elif inventory_mode == "automatic": + self.inventory_mode = 1 + else: + self.logger.error(f"Host {self.name}: Specified value for inventory mode in" + f" config is not valid. Got value {inventory_mode}") + return False + self.inventory = {} + if inventory_sync and self.inventory_mode in [0,1]: + self.logger.debug(f"Host {self.name}: Starting inventory mapper") + # Let's build an inventory dict for each property in the inventory_map + for nb_inv_field, zbx_inv_field in inventory_map.items(): + field_list = nb_inv_field.split("/") # convert str to list based on delimiter + # start at the base of the dict... + value = nbobject + # ... and step through the dict till we find the needed value + for item in field_list: + value = value[item] if value else None + # Check if the result is usable and expected + # We want to apply any int or float 0 values, + # even if python thinks those are empty. + if ((value and isinstance(value, int | float | str )) or + (isinstance(value, int | float) and int(value) ==0)): + self.inventory[zbx_inv_field] = str(value) + elif not value: + # empty value should just be an empty string for API compatibility + self.logger.debug(f"Host {self.name}: NetBox inventory lookup for " + f"'{nb_inv_field}' returned an empty value") + self.inventory[zbx_inv_field] = "" + else: + # Value is not a string or numeral, probably not what the user expected. + self.logger.error(f"Host {self.name}: Inventory lookup for '{nb_inv_field}'" + " returned an unexpected type: it will be skipped.") + self.logger.debug(f"Host {self.name}: Inventory mapping complete. " + f"Mapped {len(list(filter(None, self.inventory.values())))} field(s)") +# return True diff --git a/modules/tools.py b/modules/tools.py index f722524..5e09265 100644 --- a/modules/tools.py +++ b/modules/tools.py @@ -42,3 +42,4 @@ def proxy_prepper(proxy_list, proxy_group_list): group["monitored_by"] = 2 output.append(group) return output + diff --git a/modules/virtual_machine.py b/modules/virtual_machine.py index 331a463..27069e6 100644 --- a/modules/virtual_machine.py +++ b/modules/virtual_machine.py @@ -6,9 +6,12 @@ from os import sys from modules.device import PhysicalDevice from modules.hostgroups import Hostgroup from modules.interface import ZabbixInterface +from modules.inventory import Inventory from modules.exceptions import TemplateError, InterfaceConfigError, SyncInventoryError try: from config import ( + inventory_sync, + vm_inventory_map, traverse_site_groups, traverse_regions ) @@ -35,6 +38,10 @@ class VirtualMachine(PhysicalDevice): # Generate hostgroup based on hostgroup format self.hostgroup = hg.generate(hg_format) + def set_inventory(self, nbvm): + """ Set inventory """ + Inventory.set_inventory(self, nbvm) + def set_vm_template(self): """ Set Template for VMs. Overwrites default class to skip a lookup of custom fields.""" diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 935b55e..12c6960 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -5,6 +5,7 @@ import logging import argparse import ssl +from pprint import pprint from os import environ, path, sys from pynetbox import api from pynetbox.core.query import RequestError as NBRequestError @@ -171,6 +172,7 @@ def main(arguments): # Check if a valid hostgroup has been found for this VM. if not vm.hostgroup: continue + vm.set_inventory(nb_vm) # Checks if device is in cleanup state if vm.status in zabbix_device_removal: if vm.zabbix_id: From 8272e34c129373050317abb3096e3d33f7a38e38 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 12 Feb 2025 11:20:45 +0100 Subject: [PATCH 02/93] removed pipenv artefacts --- Pipfile | 13 ---- Pipfile.lock | 188 --------------------------------------------------- 2 files changed, 201 deletions(-) delete mode 100644 Pipfile delete mode 100644 Pipfile.lock diff --git a/Pipfile b/Pipfile deleted file mode 100644 index bd0a2ba..0000000 --- a/Pipfile +++ /dev/null @@ -1,13 +0,0 @@ -[[source]] -url = "https://pypi.org/simple" -verify_ssl = true -name = "pypi" - -[packages] -pynetbox = "*" -zabbix-utils = "*" - -[dev-packages] - -[requires] -python_version = "3.11" diff --git a/Pipfile.lock b/Pipfile.lock deleted file mode 100644 index 4be3d95..0000000 --- a/Pipfile.lock +++ /dev/null @@ -1,188 +0,0 @@ -{ - "_meta": { - "hash": { - "sha256": "6c35ac0ebf3610e4591484dfd9246af60fc4679b2d0d39193818d62961b2703c" - }, - "pipfile-spec": 6, - "requires": { - "python_version": "3.11" - }, - "sources": [ - { - "name": "pypi", - "url": "https://pypi.org/simple", - "verify_ssl": true - } - ] - }, - "default": { - "certifi": { - "hashes": [ - "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", - "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9" - ], - "markers": "python_version >= '3.6'", - "version": "==2024.8.30" - }, - "charset-normalizer": { - "hashes": [ - "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621", - "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6", - "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8", - "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912", - "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c", - "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b", - "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d", - "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d", - "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95", - "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e", - "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565", - "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64", - "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab", - "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be", - "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e", - "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907", - "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0", - "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2", - "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62", - "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62", - "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23", - "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc", - "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284", - "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca", - "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455", - "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858", - "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b", - "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594", - "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc", - "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db", - "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b", - "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea", - "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6", - "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920", - "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749", - "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7", - "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd", - "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99", - "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242", - "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee", - "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129", - "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2", - "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51", - "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee", - "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8", - "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b", - "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613", - "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742", - "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe", - "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3", - "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5", - "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631", - "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7", - "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15", - "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c", - "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea", - "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417", - "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250", - "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88", - "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca", - "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa", - "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99", - "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149", - "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41", - "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574", - "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0", - "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f", - "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d", - "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654", - "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3", - "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19", - "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90", - "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578", - "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9", - "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1", - "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51", - "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719", - "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236", - "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a", - "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c", - "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade", - "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944", - "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc", - "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6", - "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6", - "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27", - "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6", - "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2", - "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12", - "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf", - "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114", - "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7", - "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf", - "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d", - "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b", - "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed", - "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03", - "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4", - "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67", - "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365", - "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a", - "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748", - "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b", - "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079", - "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482" - ], - "markers": "python_full_version >= '3.7.0'", - "version": "==3.4.0" - }, - "idna": { - "hashes": [ - "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", - "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3" - ], - "markers": "python_version >= '3.6'", - "version": "==3.10" - }, - "packaging": { - "hashes": [ - "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", - "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f" - ], - "markers": "python_version >= '3.8'", - "version": "==24.2" - }, - "pynetbox": { - "hashes": [ - "sha256:3f82b5964ca77a608aef6cc2fc48a3961f7667fbbdbb60646655373e3dae00c3", - "sha256:f42ce4df6ce97765df91bb4cc0c0e315683d15135265270d78f595114dd20e2b" - ], - "index": "pypi", - "version": "==7.4.1" - }, - "requests": { - "hashes": [ - "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", - "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6" - ], - "markers": "python_version >= '3.8'", - "version": "==2.32.3" - }, - "urllib3": { - "hashes": [ - "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", - "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9" - ], - "markers": "python_version >= '3.8'", - "version": "==2.2.3" - }, - "zabbix-utils": { - "hashes": [ - "sha256:1eb918096dcf1980a975ff72e4449b5d72c605f79842595dedd0f4ceba3b1225", - "sha256:3c4a98a24c101d89fd938ebe0ad6c9aaa391ac901f2afb75ae682eea88fb77af" - ], - "index": "pypi", - "version": "==2.0.2" - } - }, - "develop": {} -} From 4c91c660a8bb55b4b9a52b548ad7fc0a2770f599 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 12 Feb 2025 11:22:27 +0100 Subject: [PATCH 03/93] removed newline --- modules/tools.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/tools.py b/modules/tools.py index 5e09265..f722524 100644 --- a/modules/tools.py +++ b/modules/tools.py @@ -42,4 +42,3 @@ def proxy_prepper(proxy_list, proxy_group_list): group["monitored_by"] = 2 output.append(group) return output - From ba2f77a640d8396c7ebc0535baa186c130d318c7 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 12 Feb 2025 11:25:27 +0100 Subject: [PATCH 04/93] Added Pipfile ignore --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index c3069c9..2a3448b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,8 @@ *.log .venv config.py +Pipfile +Pipfile.lock # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] From c7d3dab27ca762fb2e59b92157ad3d54097b5e93 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 12 Feb 2025 12:30:28 +0100 Subject: [PATCH 05/93] reverted module split, switched to class inheretance instead. Updated config example. --- config.py.example | 39 ++++++++------ modules/device.py | 106 +++++++++++++++++++------------------ modules/inventory.py | 81 ---------------------------- modules/virtual_machine.py | 12 +++-- 4 files changed, 87 insertions(+), 151 deletions(-) delete mode 100644 modules/inventory.py diff --git a/config.py.example b/config.py.example index 1d83223..7f8861e 100644 --- a/config.py.example +++ b/config.py.example @@ -80,19 +80,28 @@ inventory_sync = False # For nested properties, you can use the '/' seperator. # For example, the following map will assign the custom field 'mycustomfield' to the 'alias' Zabbix inventory field: # -# inventory_map = { "custom_fields/mycustomfield/name": "alias"} +# device_inventory_map = { "custom_fields/mycustomfield/name": "alias"} # -# The following map should provide some nice defaults: -inventory_map = { "asset_tag": "asset_tag", - "virtual_chassis/name": "chassis", - "status/label": "deployment_status", - "location/name": "location", - "latitude": "location_lat", - "longitude": "location_lon", - "comments": "notes", - "name": "name", - "rack/name": "site_rack", - "serial": "serialno_a", - "device_type/model": "type", - "device_type/manufacturer/name": "vendor", - "oob_ip/address": "oob_ip" } +# The following maps should provide some nice defaults: +device_inventory_map = { "asset_tag": "asset_tag", + "virtual_chassis/name": "chassis", + "status/label": "deployment_status", + "location/name": "location", + "latitude": "location_lat", + "longitude": "location_lon", + "comments": "notes", + "name": "name", + "rack/name": "site_rack", + "serial": "serialno_a", + "device_type/model": "type", + "device_type/manufacturer/name": "vendor", + "oob_ip/address": "oob_ip" } + +# We also support inventory mapping on Virtual Machines. +vm_inventory_map = { "asset_tag": "asset_tag", + "status/label": "deployment_status", + "location/name": "location", + "latitude": "location_lat", + "longitude": "location_lon", + "comments": "notes", + "name": "name" } diff --git a/modules/device.py b/modules/device.py index 97206ce..ae1488c 100644 --- a/modules/device.py +++ b/modules/device.py @@ -11,7 +11,6 @@ from modules.exceptions import (SyncInventoryError, TemplateError, SyncExternalE InterfaceConfigError, JournalError) from modules.interface import ZabbixInterface from modules.hostgroups import Hostgroup -from modules.inventory import Inventory try: from config import ( @@ -19,6 +18,7 @@ try: traverse_site_groups, traverse_regions, inventory_sync, + inventory_mode, device_inventory_map ) except ModuleNotFoundError: @@ -63,6 +63,10 @@ class PhysicalDevice(): def __str__(self): return self.__repr__() + def _inventory_map(self): + """ Use device inventory maps """ + return device_inventory_map + def _setBasics(self): """ Sets basic information like IP address. @@ -162,56 +166,56 @@ class PhysicalDevice(): return [self.config_context["zabbix"]["templates"]] return self.config_context["zabbix"]["templates"] - def set_inventory(self, nbdevice): - """ Set inventory """ - Inventory.set_inventory(self, nbdevice) - # def set_inventory(self, nbdevice): -# """ Set host inventory """ -# # Set inventory mode. Default is disabled (see class init function). -# if inventory_mode == "disabled": -# if inventory_sync: -# self.logger.error(f"Host {self.name}: Unable to map NetBox inventory to Zabbix. " -# "Inventory sync is enabled in config but inventory mode is disabled.") -# return True -# if inventory_mode == "manual": -# self.inventory_mode = 0 -# elif inventory_mode == "automatic": -# self.inventory_mode = 1 -# else: -# self.logger.error(f"Host {self.name}: Specified value for inventory mode in" -# f" config is not valid. Got value {inventory_mode}") -# return False -# self.inventory = {} -# if inventory_sync and self.inventory_mode in [0,1]: -# self.logger.debug(f"Host {self.name}: Starting inventory mapper") -# # Let's build an inventory dict for each property in the inventory_map -# for nb_inv_field, zbx_inv_field in inventory_map.items(): -# field_list = nb_inv_field.split("/") # convert str to list based on delimiter -# # start at the base of the dict... -# value = nbdevice -# # ... and step through the dict till we find the needed value -# for item in field_list: -# value = value[item] if value else None -# # Check if the result is usable and expected -# # We want to apply any int or float 0 values, -# # even if python thinks those are empty. -# if ((value and isinstance(value, int | float | str )) or -# (isinstance(value, int | float) and int(value) ==0)): -# self.inventory[zbx_inv_field] = str(value) -# elif not value: -# # empty value should just be an empty string for API compatibility -# self.logger.debug(f"Host {self.name}: NetBox inventory lookup for " -# f"'{nb_inv_field}' returned an empty value") -# self.inventory[zbx_inv_field] = "" -# else: -# # Value is not a string or numeral, probably not what the user expected. -# self.logger.error(f"Host {self.name}: Inventory lookup for '{nb_inv_field}'" -# " returned an unexpected type: it will be skipped.") -# self.logger.debug(f"Host {self.name}: Inventory mapping complete. " -# f"Mapped {len(list(filter(None, self.inventory.values())))} field(s)") -# return True -# +# """ Set inventory """ +# Inventory.set_inventory(self, nbdevice) + + def set_inventory(self, nbdevice): + """ Set host inventory """ + # Set inventory mode. Default is disabled (see class init function). + if inventory_mode == "disabled": + if inventory_sync: + self.logger.error(f"Host {self.name}: Unable to map NetBox inventory to Zabbix. " + "Inventory sync is enabled in config but inventory mode is disabled.") + return True + if inventory_mode == "manual": + self.inventory_mode = 0 + elif inventory_mode == "automatic": + self.inventory_mode = 1 + else: + self.logger.error(f"Host {self.name}: Specified value for inventory mode in" + f" config is not valid. Got value {inventory_mode}") + return False + self.inventory = {} + if inventory_sync and self.inventory_mode in [0,1]: + self.logger.debug(f"Host {self.name}: Starting inventory mapper") + # Let's build an inventory dict for each property in the inventory_map + for nb_inv_field, zbx_inv_field in self._inventory_map().items(): + field_list = nb_inv_field.split("/") # convert str to list based on delimiter + # start at the base of the dict... + value = nbdevice + # ... and step through the dict till we find the needed value + for item in field_list: + value = value[item] if value else None + # Check if the result is usable and expected + # We want to apply any int or float 0 values, + # even if python thinks those are empty. + if ((value and isinstance(value, int | float | str )) or + (isinstance(value, int | float) and int(value) ==0)): + self.inventory[zbx_inv_field] = str(value) + elif not value: + # empty value should just be an empty string for API compatibility + self.logger.debug(f"Host {self.name}: NetBox inventory lookup for " + f"'{nb_inv_field}' returned an empty value") + self.inventory[zbx_inv_field] = "" + else: + # Value is not a string or numeral, probably not what the user expected. + self.logger.error(f"Host {self.name}: Inventory lookup for '{nb_inv_field}'" + " returned an unexpected type: it will be skipped.") + self.logger.debug(f"Host {self.name}: Inventory mapping complete. " + f"Mapped {len(list(filter(None, self.inventory.values())))} field(s)") + return True + def isCluster(self): """ Checks if device is part of cluster. @@ -546,7 +550,7 @@ class PhysicalDevice(): 'interfaceid'], selectGroups=["groupid"], selectParentTemplates=["templateid"], - selectInventory=list(device_inventory_map.values())) + selectInventory=list(self._inventory_map().values())) if len(host) > 1: e = (f"Got {len(host)} results for Zabbix hosts " f"with ID {self.zabbix_id} - hostname {self.name}.") diff --git a/modules/inventory.py b/modules/inventory.py deleted file mode 100644 index 7c7cd78..0000000 --- a/modules/inventory.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python3 -# pylint: disable=invalid-name, logging-not-lazy, too-many-locals, logging-fstring-interpolation, too-many-lines -""" -Device specific handeling for NetBox to Zabbix -""" -from pprint import pprint -from logging import getLogger -from zabbix_utils import APIRequestError -from modules.exceptions import (SyncInventoryError, TemplateError, SyncExternalError, - InterfaceConfigError, JournalError) -try: - from config import ( - inventory_sync, - inventory_mode, - device_inventory_map, - vm_inventory_map - ) -except ModuleNotFoundError: - print("Configuration file config.py not found in main directory." - "Please create the file or rename the config.py.example file to config.py.") - sys.exit(0) - -class Inventory(): - # pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments - """ - Represents Network device. - INPUT: (NetBox device class, ZabbixAPI class, journal flag, NB journal class) - """ - -# def __init__(self, nb, logger=None): -# self.nb = nb - - def set_inventory(self, nbobject): - if hasattr(nbobject, 'device_type'): - inventory_map = device_inventory_map - else: - inventory_map = vm_inventory_map - """ Set host inventory """ - # Set inventory mode. Default is disabled (see class init function). - if inventory_mode == "disabled": - if inventory_sync: - self.logger.error(f"Host {self.name}: Unable to map NetBox inventory to Zabbix. " - "Inventory sync is enabled in config but inventory mode is disabled.") - return True - if inventory_mode == "manual": - self.inventory_mode = 0 - elif inventory_mode == "automatic": - self.inventory_mode = 1 - else: - self.logger.error(f"Host {self.name}: Specified value for inventory mode in" - f" config is not valid. Got value {inventory_mode}") - return False - self.inventory = {} - if inventory_sync and self.inventory_mode in [0,1]: - self.logger.debug(f"Host {self.name}: Starting inventory mapper") - # Let's build an inventory dict for each property in the inventory_map - for nb_inv_field, zbx_inv_field in inventory_map.items(): - field_list = nb_inv_field.split("/") # convert str to list based on delimiter - # start at the base of the dict... - value = nbobject - # ... and step through the dict till we find the needed value - for item in field_list: - value = value[item] if value else None - # Check if the result is usable and expected - # We want to apply any int or float 0 values, - # even if python thinks those are empty. - if ((value and isinstance(value, int | float | str )) or - (isinstance(value, int | float) and int(value) ==0)): - self.inventory[zbx_inv_field] = str(value) - elif not value: - # empty value should just be an empty string for API compatibility - self.logger.debug(f"Host {self.name}: NetBox inventory lookup for " - f"'{nb_inv_field}' returned an empty value") - self.inventory[zbx_inv_field] = "" - else: - # Value is not a string or numeral, probably not what the user expected. - self.logger.error(f"Host {self.name}: Inventory lookup for '{nb_inv_field}'" - " returned an unexpected type: it will be skipped.") - self.logger.debug(f"Host {self.name}: Inventory mapping complete. " - f"Mapped {len(list(filter(None, self.inventory.values())))} field(s)") -# return True diff --git a/modules/virtual_machine.py b/modules/virtual_machine.py index 27069e6..353a245 100644 --- a/modules/virtual_machine.py +++ b/modules/virtual_machine.py @@ -6,11 +6,11 @@ from os import sys from modules.device import PhysicalDevice from modules.hostgroups import Hostgroup from modules.interface import ZabbixInterface -from modules.inventory import Inventory from modules.exceptions import TemplateError, InterfaceConfigError, SyncInventoryError try: from config import ( inventory_sync, + inventory_mode, vm_inventory_map, traverse_site_groups, traverse_regions @@ -27,6 +27,10 @@ class VirtualMachine(PhysicalDevice): self.hostgroup = None self.zbx_template_names = None + def _inventory_map(self): + """ use VM inventory maps """ + return vm_inventory_map + def set_hostgroup(self, hg_format, nb_site_groups, nb_regions): """Set the hostgroup for this device""" # Create new Hostgroup instance @@ -38,9 +42,9 @@ class VirtualMachine(PhysicalDevice): # Generate hostgroup based on hostgroup format self.hostgroup = hg.generate(hg_format) - def set_inventory(self, nbvm): - """ Set inventory """ - Inventory.set_inventory(self, nbvm) +# def set_inventory(self, nbvm): +# """ Set inventory """ +# Inventory.set_inventory(self, nbvm) def set_vm_template(self): """ Set Template for VMs. Overwrites default class From 1157ed9e640c826a689ad3bfe13f03cf921ea8cf Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 12 Feb 2025 12:32:42 +0100 Subject: [PATCH 06/93] cleanup --- modules/device.py | 4 ---- modules/virtual_machine.py | 4 ---- 2 files changed, 8 deletions(-) diff --git a/modules/device.py b/modules/device.py index ae1488c..349db0a 100644 --- a/modules/device.py +++ b/modules/device.py @@ -166,10 +166,6 @@ class PhysicalDevice(): return [self.config_context["zabbix"]["templates"]] return self.config_context["zabbix"]["templates"] -# def set_inventory(self, nbdevice): -# """ Set inventory """ -# Inventory.set_inventory(self, nbdevice) - def set_inventory(self, nbdevice): """ Set host inventory """ # Set inventory mode. Default is disabled (see class init function). diff --git a/modules/virtual_machine.py b/modules/virtual_machine.py index 353a245..b8fa1a1 100644 --- a/modules/virtual_machine.py +++ b/modules/virtual_machine.py @@ -42,10 +42,6 @@ class VirtualMachine(PhysicalDevice): # Generate hostgroup based on hostgroup format self.hostgroup = hg.generate(hg_format) -# def set_inventory(self, nbvm): -# """ Set inventory """ -# Inventory.set_inventory(self, nbvm) - def set_vm_template(self): """ Set Template for VMs. Overwrites default class to skip a lookup of custom fields.""" From 5f78a2c7890b49cb720ffb4ff1de1bffc32a2168 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 12 Feb 2025 12:35:21 +0100 Subject: [PATCH 07/93] removed unsupported field from vm_inventory_map --- config.py.example | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/config.py.example b/config.py.example index 7f8861e..dcc307f 100644 --- a/config.py.example +++ b/config.py.example @@ -98,8 +98,7 @@ device_inventory_map = { "asset_tag": "asset_tag", "oob_ip/address": "oob_ip" } # We also support inventory mapping on Virtual Machines. -vm_inventory_map = { "asset_tag": "asset_tag", - "status/label": "deployment_status", +vm_inventory_map = { "status/label": "deployment_status", "location/name": "location", "latitude": "location_lat", "longitude": "location_lon", From b8bb3fb3f091c4da0d9b33507fff5594b1f5abc1 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 12 Feb 2025 12:36:27 +0100 Subject: [PATCH 08/93] removed unsupported fields from vm_inventory_map --- config.py.example | 3 --- 1 file changed, 3 deletions(-) diff --git a/config.py.example b/config.py.example index dcc307f..0a653d6 100644 --- a/config.py.example +++ b/config.py.example @@ -99,8 +99,5 @@ device_inventory_map = { "asset_tag": "asset_tag", # We also support inventory mapping on Virtual Machines. vm_inventory_map = { "status/label": "deployment_status", - "location/name": "location", - "latitude": "location_lat", - "longitude": "location_lon", "comments": "notes", "name": "name" } From c67180138eb60ee7a8fefd433bd836d3c2ff7032 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 12 Feb 2025 12:39:36 +0100 Subject: [PATCH 09/93] cleanup --- netbox_zabbix_sync.py | 1 - 1 file changed, 1 deletion(-) diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 12c6960..3eaea3f 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -5,7 +5,6 @@ import logging import argparse import ssl -from pprint import pprint from os import environ, path, sys from pynetbox import api from pynetbox.core.query import RequestError as NBRequestError From cebefd681e1c47829d849637ba68c6a7be0d05fb Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 12 Feb 2025 17:43:57 +0100 Subject: [PATCH 10/93] started work on macro support --- modules/device.py | 35 ++++++++++++++++++++++++++++---- modules/exceptions.py | 3 +++ modules/usermacros.py | 47 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 81 insertions(+), 4 deletions(-) create mode 100644 modules/usermacros.py diff --git a/modules/device.py b/modules/device.py index 666fc15..dc12a28 100644 --- a/modules/device.py +++ b/modules/device.py @@ -8,9 +8,11 @@ from re import search from logging import getLogger from zabbix_utils import APIRequestError from modules.exceptions import (SyncInventoryError, TemplateError, SyncExternalError, - InterfaceConfigError, JournalError) + InterfaceConfigError, JournalError, UsermacroError) from modules.interface import ZabbixInterface +from modules.usermacros import ZabbixUsermacros from modules.hostgroups import Hostgroup +from pprint import pprint try: from config import ( @@ -19,7 +21,8 @@ try: traverse_regions, inventory_sync, inventory_mode, - device_inventory_map + device_inventory_map, + device_usermacro_map ) except ModuleNotFoundError: print("Configuration file config.py not found in main directory." @@ -54,6 +57,7 @@ class PhysicalDevice(): self.nb_journals = nb_journal_class self.inventory_mode = -1 self.inventory = {} + self.usermacros = {} self.logger = logger if logger else getLogger(__name__) self._setBasics() @@ -67,6 +71,10 @@ class PhysicalDevice(): """ Use device inventory maps """ return device_inventory_map + def _usermacro_map(self): + """ Use device inventory maps """ + return device_usermacro_map + def _setBasics(self): """ Sets basic information like IP address. @@ -363,6 +371,19 @@ class PhysicalDevice(): self.logger.warning(message) raise SyncInventoryError(message) from e + def setUsermacros(self): + try: + # Initiate interface class + macros = ZabbixUsermacros(self.nb.config_context, self._usermacro_map()) + if macros.sync == False: + return {} + else: + return [{'macro': '{$USERMACRO}', 'value': '123', 'type': 0, 'description': 'just a test'}] + except UsermacroError as e: + message = f"{self.name}: {e}" + self.logger.warning(message) + raise UsermacroError(message) from e + def setProxy(self, proxy_list): """ Sets proxy or proxy group if this @@ -423,6 +444,8 @@ class PhysicalDevice(): groups = [{"groupid": self.group_id}] # Set Zabbix proxy if defined self.setProxy(proxies) + # Set usermacros + self.usermacros = self.setUsermacros() # Set basic data for host creation create_data = {"host": self.name, "name": self.visible_name, @@ -432,7 +455,8 @@ class PhysicalDevice(): "templates": templateids, "description": description, "inventory_mode": self.inventory_mode, - "inventory": self.inventory + "inventory": self.inventory, + "macros": self.usermacros } # If a Zabbix proxy or Zabbix Proxy group has been defined if self.zbxproxy: @@ -547,7 +571,10 @@ class PhysicalDevice(): selectGroups=["groupid"], selectHostGroups=["groupid"], selectParentTemplates=["templateid"], - selectInventory=list(self._inventory_map().values())) + selectInventory=list(self._inventory_map().values()), + selectMacros=["macro","value","type","description"] + ) + pprint(host) if len(host) > 1: e = (f"Got {len(host)} results for Zabbix hosts " f"with ID {self.zabbix_id} - hostname {self.name}.") diff --git a/modules/exceptions.py b/modules/exceptions.py index 856433a..27a141c 100644 --- a/modules/exceptions.py +++ b/modules/exceptions.py @@ -31,3 +31,6 @@ class HostgroupError(SyncError): class TemplateError(SyncError): """ Class TemplateError """ + +class UsermacroError(SyncError): + """ Class UsermacroError """ diff --git a/modules/usermacros.py b/modules/usermacros.py new file mode 100644 index 0000000..9f53760 --- /dev/null +++ b/modules/usermacros.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +""" +All of the Zabbix Usermacro related configuration +""" +from logging import getLogger +from zabbix_utils import APIRequestError +from modules.exceptions import UsermacroError + + +from pprint import pprint + +try: + from config import ( + usermacro_sync, + ) +except ModuleNotFoundError: + print("Configuration file config.py not found in main directory." + "Please create the file or rename the config.py.example file to config.py.") + sys.exit(0) + +class ZabbixUsermacros(): + """Class that represents a Zabbix interface.""" + + def __init__(self, context, usermacro_map, logger=None): + self.context = context + self.usermacro_map = usermacro_map + self.logger = logger if logger else getLogger(__name__) + self.usermacros = {} + self.sync = False + self.force_sync = False + self._setConfig() + + def __repr__(self): + return self.name + + def __str__(self): + return self.__repr__() + + def _setConfig(self): + if str(usermacro_sync) == "full": + self.sync = True + self.force_sync = True + elif usermacro_sync: + self.sync = True + return True + + From 6d4e250b236efe0a70594f6fd604d0a29cbf2d4a Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Fri, 14 Feb 2025 08:28:10 +0100 Subject: [PATCH 11/93] :sparkles: Working usermacros based on config context --- modules/device.py | 40 ++++++++++++++++++++++----------- modules/usermacros.py | 51 +++++++++++++++++++++++++++++++++++++++---- 2 files changed, 74 insertions(+), 17 deletions(-) diff --git a/modules/device.py b/modules/device.py index dc12a28..1a9e452 100644 --- a/modules/device.py +++ b/modules/device.py @@ -5,6 +5,7 @@ Device specific handeling for NetBox to Zabbix """ from os import sys from re import search +from copy import deepcopy from logging import getLogger from zabbix_utils import APIRequestError from modules.exceptions import (SyncInventoryError, TemplateError, SyncExternalError, @@ -22,6 +23,7 @@ try: inventory_sync, inventory_mode, device_inventory_map, + usermacro_sync, device_usermacro_map ) except ModuleNotFoundError: @@ -372,18 +374,13 @@ class PhysicalDevice(): raise SyncInventoryError(message) from e def setUsermacros(self): - try: - # Initiate interface class - macros = ZabbixUsermacros(self.nb.config_context, self._usermacro_map()) - if macros.sync == False: - return {} - else: - return [{'macro': '{$USERMACRO}', 'value': '123', 'type': 0, 'description': 'just a test'}] - except UsermacroError as e: - message = f"{self.name}: {e}" - self.logger.warning(message) - raise UsermacroError(message) from e - + # Initiate Usermacros class + macros = ZabbixUsermacros(self.nb.config_context, self._usermacro_map()) + if macros.sync == False: + return [] + else: + return macros.generate() + def setProxy(self, proxy_list): """ Sets proxy or proxy group if this @@ -574,7 +571,6 @@ class PhysicalDevice(): selectInventory=list(self._inventory_map().values()), selectMacros=["macro","value","type","description"] ) - pprint(host) if len(host) > 1: e = (f"Got {len(host)} results for Zabbix hosts " f"with ID {self.zabbix_id} - hostname {self.name}.") @@ -696,6 +692,24 @@ class PhysicalDevice(): self.logger.warning(f"Host {self.name}: inventory OUT of sync.") self.updateZabbixHost(inventory=self.inventory) + # Check host usermacros + if usermacro_sync: + macros_filtered = [] + self.usermacros = self.setUsermacros() + # Do not re-sync secret usermacros unless sync is set to 'full' + if not str(usermacro_sync).lower() == "full": + for m in deepcopy(self.usermacros): + if m['type'] == str(1): + # Remove the value as the api doesn't return it + # this will allow us to only update usermacros that don't exist + m.pop('value') + macros_filtered.append(m) + if host['macros'] == self.usermacros or host['macros'] == macros_filtered: + self.logger.debug(f"Host {self.name}: usermacros in-sync.") + else: + self.logger.warning(f"Host {self.name}: usermacros OUT of sync.") + self.updateZabbixHost(macros=self.usermacros) + # If only 1 interface has been found # pylint: disable=too-many-nested-blocks if len(host['interfaces']) == 1: diff --git a/modules/usermacros.py b/modules/usermacros.py index 9f53760..19d85a6 100644 --- a/modules/usermacros.py +++ b/modules/usermacros.py @@ -2,11 +2,11 @@ """ All of the Zabbix Usermacro related configuration """ +from re import match from logging import getLogger from zabbix_utils import APIRequestError from modules.exceptions import UsermacroError - from pprint import pprint try: @@ -37,11 +37,54 @@ class ZabbixUsermacros(): return self.__repr__() def _setConfig(self): - if str(usermacro_sync) == "full": + if str(usermacro_sync).lower() == "full": self.sync = True self.force_sync = True elif usermacro_sync: self.sync = True return True - - + + def validate_macro(self, macro_name): + pattern = '\{\$[A-Z0-9\._]*(\:.*)?\}' + return match(pattern, macro_name) + + def render_macro(self, macro_name, macro_properties): + macro={} + macrotypes={'text': 0, 'secret': 1, 'vault': 2} + if self.validate_macro(macro_name): + macro['macro'] = str(macro_name) + if isinstance(macro_properties, dict): + if not 'value' in macro_properties: + self.logger.error(f'Usermacro {macro_name} has no value, skipping.') + return False + else: + macro['value'] = macro_properties['value'] + + if 'type' in macro_properties and macro_properties['type'].lower() in macrotypes: + macro['type'] = str(macrotypes[macro_properties['type']]) + else: + macro['type'] = str(0) + + if 'description' in macro_properties and isinstance(macro_properties['description'], str): + macro['description'] = macro_properties['description'] + else: + macro['description'] = "" + + elif isinstance(macro_properties, str): + macro['value'] = macro_properties + macro['type'] = str(0) + macro['description'] = "" + else: + self.logger.error(f'Usermacro {macro_name} is not a valid usermacro name, skipping.') + return False + return macro + + def generate(self): + macros=[] + if "zabbix" in self.context and "usermacros" in self.context['zabbix']: + for macro, properties in self.context['zabbix']['usermacros'].items(): + m = self.render_macro(macro, properties) + pprint(m) + if m: + macros.append(m) + return macros From 1b831a2d399d834a71796160bf9d6c475238d38a Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Fri, 14 Feb 2025 09:46:55 +0100 Subject: [PATCH 12/93] Moved Inventory mapping logic to tools module --- modules/device.py | 52 ++++++++++++++++++++++++----------------------- modules/tools.py | 36 ++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 25 deletions(-) diff --git a/modules/device.py b/modules/device.py index 1a9e452..380bb56 100644 --- a/modules/device.py +++ b/modules/device.py @@ -13,6 +13,7 @@ from modules.exceptions import (SyncInventoryError, TemplateError, SyncExternalE from modules.interface import ZabbixInterface from modules.usermacros import ZabbixUsermacros from modules.hostgroups import Hostgroup +from modules.tools import field_mapper from pprint import pprint try: @@ -195,31 +196,32 @@ class PhysicalDevice(): self.inventory = {} if inventory_sync and self.inventory_mode in [0,1]: self.logger.debug(f"Host {self.name}: Starting inventory mapper") - # Let's build an inventory dict for each property in the inventory_map - for nb_inv_field, zbx_inv_field in self._inventory_map().items(): - field_list = nb_inv_field.split("/") # convert str to list based on delimiter - # start at the base of the dict... - value = nbdevice - # ... and step through the dict till we find the needed value - for item in field_list: - value = value[item] if value else None - # Check if the result is usable and expected - # We want to apply any int or float 0 values, - # even if python thinks those are empty. - if ((value and isinstance(value, int | float | str )) or - (isinstance(value, int | float) and int(value) ==0)): - self.inventory[zbx_inv_field] = str(value) - elif not value: - # empty value should just be an empty string for API compatibility - self.logger.debug(f"Host {self.name}: NetBox inventory lookup for " - f"'{nb_inv_field}' returned an empty value") - self.inventory[zbx_inv_field] = "" - else: - # Value is not a string or numeral, probably not what the user expected. - self.logger.error(f"Host {self.name}: Inventory lookup for '{nb_inv_field}'" - " returned an unexpected type: it will be skipped.") - self.logger.debug(f"Host {self.name}: Inventory mapping complete. " - f"Mapped {len(list(filter(None, self.inventory.values())))} field(s)") + self.inventory = field_mapper(self.name, self._inventory_map(), nbdevice, self.logger) +# # Let's build an inventory dict for each property in the inventory_map +# for nb_inv_field, zbx_inv_field in self._inventory_map().items(): +# field_list = nb_inv_field.split("/") # convert str to list based on delimiter +# # start at the base of the dict... +# value = nbdevice +# # ... and step through the dict till we find the needed value +# for item in field_list: +# value = value[item] if value else None +# # Check if the result is usable and expected +# # We want to apply any int or float 0 values, +# # even if python thinks those are empty. +# if ((value and isinstance(value, int | float | str )) or +# (isinstance(value, int | float) and int(value) ==0)): +# self.inventory[zbx_inv_field] = str(value) +# elif not value: +# # empty value should just be an empty string for API compatibility +# self.logger.debug(f"Host {self.name}: NetBox inventory lookup for " +# f"'{nb_inv_field}' returned an empty value") +# self.inventory[zbx_inv_field] = "" +# else: +# # Value is not a string or numeral, probably not what the user expected. +# self.logger.error(f"Host {self.name}: Inventory lookup for '{nb_inv_field}'" +# " returned an unexpected type: it will be skipped.") +# self.logger.debug(f"Host {self.name}: Inventory mapping complete. " +# f"Mapped {len(list(filter(None, self.inventory.values())))} field(s)") return True def isCluster(self): diff --git a/modules/tools.py b/modules/tools.py index f722524..1f197b6 100644 --- a/modules/tools.py +++ b/modules/tools.py @@ -1,3 +1,5 @@ +from logging import getLogger + """A collection of tools used by several classes""" def convert_recordset(recordset): """ Converts netbox RedcordSet to list of dicts. """ @@ -42,3 +44,37 @@ def proxy_prepper(proxy_list, proxy_group_list): group["monitored_by"] = 2 output.append(group) return output + + +def field_mapper(host, mapper, nbdevice, logger): + """ + Maps NetBox field data to Zabbix properties. + Used for Inventory, Usermacros and Tag mappings. + """ + data={} + # Let's build an dict for each property in the map + for nb_field, zbx_field in mapper.items(): + field_list = nb_field.split("/") # convert str to list based on delimiter + # start at the base of the dict... + value = nbdevice + # ... and step through the dict till we find the needed value + for item in field_list: + value = value[item] if value else None + # Check if the result is usable and expected + # We want to apply any int or float 0 values, + # even if python thinks those are empty. + if ((value and isinstance(value, int | float | str )) or + (isinstance(value, int | float) and int(value) ==0)): + data[zbx_field] = str(value) + elif not value: + # empty value should just be an empty string for API compatibility + logger.debug(f"Host {host}: NetBox lookup for " + f"'{nb_field}' returned an empty value") + data[zbx_field] = "" + else: + # Value is not a string or numeral, probably not what the user expected. + logger.error(f"Host {host}: Lookup for '{nb_field}'" + " returned an unexpected type: it will be skipped.") + logger.debug(f"Host {host}: Field mapping complete." + f"Mapped {len(list(filter(None, data.values())))} field(s)") + return data From eea7df660a70006d7761ff13715dcb1b8bb339e1 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Fri, 14 Feb 2025 15:18:26 +0100 Subject: [PATCH 13/93] Full usermacro support --- modules/device.py | 72 ++++++++++++------------------------ modules/tools.py | 8 ++-- modules/usermacros.py | 75 ++++++++++++++++++++++---------------- modules/virtual_machine.py | 2 - netbox_zabbix_sync.py | 4 +- 5 files changed, 73 insertions(+), 88 deletions(-) diff --git a/modules/device.py b/modules/device.py index 380bb56..76f07cf 100644 --- a/modules/device.py +++ b/modules/device.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# pylint: disable=invalid-name, logging-not-lazy, too-many-locals, logging-fstring-interpolation, too-many-lines +# pylint: disable=invalid-name, logging-not-lazy, too-many-locals, logging-fstring-interpolation, too-many-lines, too-many-public-methods """ Device specific handeling for NetBox to Zabbix """ @@ -9,12 +9,11 @@ from copy import deepcopy from logging import getLogger from zabbix_utils import APIRequestError from modules.exceptions import (SyncInventoryError, TemplateError, SyncExternalError, - InterfaceConfigError, JournalError, UsermacroError) + InterfaceConfigError, JournalError) from modules.interface import ZabbixInterface from modules.usermacros import ZabbixUsermacros from modules.hostgroups import Hostgroup from modules.tools import field_mapper -from pprint import pprint try: from config import ( @@ -73,7 +72,7 @@ class PhysicalDevice(): def _inventory_map(self): """ Use device inventory maps """ return device_inventory_map - + def _usermacro_map(self): """ Use device inventory maps """ return device_usermacro_map @@ -197,31 +196,6 @@ class PhysicalDevice(): if inventory_sync and self.inventory_mode in [0,1]: self.logger.debug(f"Host {self.name}: Starting inventory mapper") self.inventory = field_mapper(self.name, self._inventory_map(), nbdevice, self.logger) -# # Let's build an inventory dict for each property in the inventory_map -# for nb_inv_field, zbx_inv_field in self._inventory_map().items(): -# field_list = nb_inv_field.split("/") # convert str to list based on delimiter -# # start at the base of the dict... -# value = nbdevice -# # ... and step through the dict till we find the needed value -# for item in field_list: -# value = value[item] if value else None -# # Check if the result is usable and expected -# # We want to apply any int or float 0 values, -# # even if python thinks those are empty. -# if ((value and isinstance(value, int | float | str )) or -# (isinstance(value, int | float) and int(value) ==0)): -# self.inventory[zbx_inv_field] = str(value) -# elif not value: -# # empty value should just be an empty string for API compatibility -# self.logger.debug(f"Host {self.name}: NetBox inventory lookup for " -# f"'{nb_inv_field}' returned an empty value") -# self.inventory[zbx_inv_field] = "" -# else: -# # Value is not a string or numeral, probably not what the user expected. -# self.logger.error(f"Host {self.name}: Inventory lookup for '{nb_inv_field}'" -# " returned an unexpected type: it will be skipped.") -# self.logger.debug(f"Host {self.name}: Inventory mapping complete. " -# f"Mapped {len(list(filter(None, self.inventory.values())))} field(s)") return True def isCluster(self): @@ -375,14 +349,19 @@ class PhysicalDevice(): self.logger.warning(message) raise SyncInventoryError(message) from e - def setUsermacros(self): - # Initiate Usermacros class - macros = ZabbixUsermacros(self.nb.config_context, self._usermacro_map()) - if macros.sync == False: - return [] - else: - return macros.generate() - + def set_usermacros(self): + """ + Generates Usermacros + """ + macros = ZabbixUsermacros(self.nb, self._usermacro_map(), + usermacro_sync, logger=self.logger, + host=self.name) + if macros.sync is False: + self.usermacros = [] + + self.usermacros = macros.generate() + return True + def setProxy(self, proxy_list): """ Sets proxy or proxy group if this @@ -443,8 +422,6 @@ class PhysicalDevice(): groups = [{"groupid": self.group_id}] # Set Zabbix proxy if defined self.setProxy(proxies) - # Set usermacros - self.usermacros = self.setUsermacros() # Set basic data for host creation create_data = {"host": self.name, "name": self.visible_name, @@ -571,7 +548,7 @@ class PhysicalDevice(): selectHostGroups=["groupid"], selectParentTemplates=["templateid"], selectInventory=list(self._inventory_map().values()), - selectMacros=["macro","value","type","description"] + selectMacros=["macro","value","type","description"] ) if len(host) > 1: e = (f"Got {len(host)} results for Zabbix hosts " @@ -621,9 +598,9 @@ class PhysicalDevice(): if group["groupid"] == self.group_id: self.logger.debug(f"Host {self.name}: hostgroup in-sync.") break - else: - self.logger.warning(f"Host {self.name}: hostgroup OUT of sync.") - self.updateZabbixHost(groups={'groupid': self.group_id}) + else: + self.logger.warning(f"Host {self.name}: hostgroup OUT of sync.") + self.updateZabbixHost(groups={'groupid': self.group_id}) if int(host["status"]) == self.zabbix_state: self.logger.debug(f"Host {self.name}: status in-sync.") @@ -697,14 +674,13 @@ class PhysicalDevice(): # Check host usermacros if usermacro_sync: macros_filtered = [] - self.usermacros = self.setUsermacros() # Do not re-sync secret usermacros unless sync is set to 'full' - if not str(usermacro_sync).lower() == "full": + if str(usermacro_sync).lower() != "full": for m in deepcopy(self.usermacros): if m['type'] == str(1): - # Remove the value as the api doesn't return it - # this will allow us to only update usermacros that don't exist - m.pop('value') + # Remove the value as the api doesn't return it + # this will allow us to only update usermacros that don't exist + m.pop('value') macros_filtered.append(m) if host['macros'] == self.usermacros or host['macros'] == macros_filtered: self.logger.debug(f"Host {self.name}: usermacros in-sync.") diff --git a/modules/tools.py b/modules/tools.py index 1f197b6..f32e802 100644 --- a/modules/tools.py +++ b/modules/tools.py @@ -1,6 +1,5 @@ -from logging import getLogger - """A collection of tools used by several classes""" + def convert_recordset(recordset): """ Converts netbox RedcordSet to list of dicts. """ recordlist = [] @@ -45,7 +44,6 @@ def proxy_prepper(proxy_list, proxy_group_list): output.append(group) return output - def field_mapper(host, mapper, nbdevice, logger): """ Maps NetBox field data to Zabbix properties. @@ -75,6 +73,6 @@ def field_mapper(host, mapper, nbdevice, logger): # Value is not a string or numeral, probably not what the user expected. logger.error(f"Host {host}: Lookup for '{nb_field}'" " returned an unexpected type: it will be skipped.") - logger.debug(f"Host {host}: Field mapping complete." - f"Mapped {len(list(filter(None, data.values())))} field(s)") + logger.debug(f"Host {host}: Field mapping complete. " + f"Mapped {len(list(filter(None, data.values())))} field(s)") return data diff --git a/modules/usermacros.py b/modules/usermacros.py index 19d85a6..71efbde 100644 --- a/modules/usermacros.py +++ b/modules/usermacros.py @@ -1,71 +1,71 @@ #!/usr/bin/env python3 +# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments, logging-fstring-interpolation """ All of the Zabbix Usermacro related configuration """ from re import match from logging import getLogger -from zabbix_utils import APIRequestError -from modules.exceptions import UsermacroError - -from pprint import pprint - -try: - from config import ( - usermacro_sync, - ) -except ModuleNotFoundError: - print("Configuration file config.py not found in main directory." - "Please create the file or rename the config.py.example file to config.py.") - sys.exit(0) +from modules.tools import field_mapper class ZabbixUsermacros(): """Class that represents a Zabbix interface.""" - def __init__(self, context, usermacro_map, logger=None): - self.context = context + def __init__(self, nb, usermacro_map, usermacro_sync, logger=None, host=None): + self.nb = nb + self.name = host if host else nb.name self.usermacro_map = usermacro_map self.logger = logger if logger else getLogger(__name__) self.usermacros = {} + self.usermacro_sync = usermacro_sync self.sync = False self.force_sync = False - self._setConfig() + self._set_config() def __repr__(self): return self.name - + def __str__(self): return self.__repr__() - def _setConfig(self): - if str(usermacro_sync).lower() == "full": + def _set_config(self): + """ + Setup class + """ + if str(self.usermacro_sync).lower() == "full": self.sync = True self.force_sync = True - elif usermacro_sync: + elif self.usermacro_sync: self.sync = True return True def validate_macro(self, macro_name): - pattern = '\{\$[A-Z0-9\._]*(\:.*)?\}' + """ + Validates usermacro name + """ + pattern = r'\{\$[A-Z0-9\._]*(\:.*)?\}' return match(pattern, macro_name) def render_macro(self, macro_name, macro_properties): + """ + Renders a full usermacro from partial input + """ macro={} macrotypes={'text': 0, 'secret': 1, 'vault': 2} if self.validate_macro(macro_name): macro['macro'] = str(macro_name) if isinstance(macro_properties, dict): if not 'value' in macro_properties: - self.logger.error(f'Usermacro {macro_name} has no value, skipping.') - return False - else: - macro['value'] = macro_properties['value'] + self.logger.error(f'Usermacro {macro_name} has no value, skipping.') + return False + macro['value'] = macro_properties['value'] if 'type' in macro_properties and macro_properties['type'].lower() in macrotypes: macro['type'] = str(macrotypes[macro_properties['type']]) else: macro['type'] = str(0) - if 'description' in macro_properties and isinstance(macro_properties['description'], str): + if ('description' in macro_properties and + isinstance(macro_properties['description'], str)): macro['description'] = macro_properties['description'] else: macro['description'] = "" @@ -78,13 +78,24 @@ class ZabbixUsermacros(): self.logger.error(f'Usermacro {macro_name} is not a valid usermacro name, skipping.') return False return macro - + def generate(self): + """ + Generate full set of Usermacros + """ macros=[] - if "zabbix" in self.context and "usermacros" in self.context['zabbix']: - for macro, properties in self.context['zabbix']['usermacros'].items(): - m = self.render_macro(macro, properties) - pprint(m) + # Parse the field mapper for usermacros + if self.usermacro_map: + self.logger.debug(f"Host {self.nb.name}: Starting usermacro mapper") + field_macros = field_mapper(self.nb.name, self.usermacro_map, self.nb, self.logger) + for macro, value in field_macros.items(): + m = self.render_macro(macro, value) if m: - macros.append(m) + macros.append(m) + # Parse NetBox config context for usermacros + if "zabbix" in self.nb.config_context and "usermacros" in self.nb.config_context['zabbix']: + for macro, properties in self.nb.config_context['zabbix']['usermacros'].items(): + m = self.render_macro(macro, properties) + if m: + macros.append(m) return macros diff --git a/modules/virtual_machine.py b/modules/virtual_machine.py index b8fa1a1..d95bfc1 100644 --- a/modules/virtual_machine.py +++ b/modules/virtual_machine.py @@ -9,8 +9,6 @@ from modules.interface import ZabbixInterface from modules.exceptions import TemplateError, InterfaceConfigError, SyncInventoryError try: from config import ( - inventory_sync, - inventory_mode, vm_inventory_map, traverse_site_groups, traverse_regions diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 3eaea3f..5498edc 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -161,7 +161,7 @@ def main(arguments): try: vm = VirtualMachine(nb_vm, zabbix, netbox_journals, nb_version, create_journal, logger) - logger.debug(f"Host {vm.name}: started operations on VM.") + logger.debug(f"Host {vm.name}: Started operations on VM.") vm.set_vm_template() # Check if a valid template has been found for this VM. if not vm.zbx_template_names: @@ -172,6 +172,7 @@ def main(arguments): if not vm.hostgroup: continue vm.set_inventory(nb_vm) + vm.set_usermacros() # Checks if device is in cleanup state if vm.status in zabbix_device_removal: if vm.zabbix_id: @@ -225,6 +226,7 @@ def main(arguments): if not device.hostgroup: continue device.set_inventory(nb_device) + device.set_usermacros() # Checks if device is part of cluster. # Requires clustering variable if device.isCluster() and clustering: From 72558d3825260677c5dee526c8dc69eee014b770 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Fri, 14 Feb 2025 16:35:40 +0100 Subject: [PATCH 14/93] Updated docs for VM inventory --- README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 959a2fb..bdd850d 100644 --- a/README.md +++ b/README.md @@ -292,15 +292,18 @@ You can set the inventory mode to "disabled", "manual" or "automatic" with the [Zabbix Manual](https://www.zabbix.com/documentation/current/en/manual/config/hosts/inventory#building-inventory) for more information about the modes. -Use the `inventory_map` variable to map which NetBox properties are used in +Use the `device_inventory_map` variable to map which NetBox properties are used in which Zabbix Inventory fields. For nested properties, you can use the '/' seperator. For example, the following map will assign the custom field 'mycustomfield' to the 'alias' Zabbix inventory field: +For Virtual Machines, use `vm_inventory_map`. + ``` inventory_sync = True inventory_mode = "manual" -inventory_map = { "custom_fields/mycustomfield/name": "alias"} +device_inventory_map = {"custom_fields/mycustomfield/name": "alias"} +vm_inventory_map = {"custom_fields/mycustomfield/name": "alias"} ``` See `config.py.example` for an extensive example map. Any Zabix Inventory fields From 3d4e7803ccf35a7e91ed61c86564f9c730f02e5c Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Mon, 17 Feb 2025 12:48:26 +0100 Subject: [PATCH 15/93] Implemented vm_usermacro_map --- modules/virtual_machine.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/virtual_machine.py b/modules/virtual_machine.py index d95bfc1..5afdb18 100644 --- a/modules/virtual_machine.py +++ b/modules/virtual_machine.py @@ -10,6 +10,7 @@ from modules.exceptions import TemplateError, InterfaceConfigError, SyncInventor try: from config import ( vm_inventory_map, + vm_usermacro_map, traverse_site_groups, traverse_regions ) @@ -29,6 +30,10 @@ class VirtualMachine(PhysicalDevice): """ use VM inventory maps """ return vm_inventory_map + def _usermacro_map(self): + """ use VM inventory maps """ + return vm_usermacro_map + def set_hostgroup(self, hg_format, nb_site_groups, nb_regions): """Set the hostgroup for this device""" # Create new Hostgroup instance From f9453cc23cc2b03679155a70fe09abd2c72303cb Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Mon, 17 Feb 2025 12:54:11 +0100 Subject: [PATCH 16/93] Updated documentation for usermacro support --- README.md | 138 +++++++++++++++++++++++++++++++++++++++++----- config.py.example | 20 +++++++ 2 files changed, 143 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index bdd850d..612588f 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ A script to create, update and delete Zabbix hosts using NetBox device objects. To pull the latest stable version to your local cache, use the following docker pull command: -``` +```bash docker pull ghcr.io/thenetworkguy/netbox-zabbix-sync:main ``` @@ -15,7 +15,7 @@ Make sure to specify the needed environment variables for the script to work (see [here](#set-environment-variables)) on the command line or use an [env file](https://docs.docker.com/reference/cli/docker/container/run/#env). -``` +```bash docker run -d -t -i -e ZABBIX_HOST='https://zabbix.local' \ -e ZABBIX_TOKEN='othersecrettoken' \ -e NETBOX_HOST='https://netbox.local' \ @@ -30,7 +30,7 @@ The image uses the default `config.py` for it's configuration, you can use a volume mount in the docker run command to override with your own config file if needed (see [config file](#config-file)): -``` +```bash docker run -d -t -i -v $(pwd)/config.py:/opt/netbox-zabbix/config.py ... ``` @@ -38,7 +38,7 @@ docker run -d -t -i -v $(pwd)/config.py:/opt/netbox-zabbix/config.py ... ### Cloning the repository -``` +```bash git clone https://github.com/TheNetworkGuy/netbox-zabbix-sync.git ``` @@ -66,7 +66,7 @@ cp config.py.example config.py Set the following environment variables: -``` +```bash ZABBIX_HOST="https://zabbix.local" ZABBIX_USER="username" ZABBIX_PASS="Password" @@ -77,7 +77,7 @@ NETBOX_TOKEN="secrettoken" Or, you can use a Zabbix API token to login instead of using a username and password. In that case `ZABBIX_USER` and `ZABBIX_PASS` will be ignored. -``` +```bash ZABBIX_TOKEN=othersecrettoken ``` @@ -183,9 +183,9 @@ used: | cluster | VM cluster name | | cluster_type | VM cluster type | -You can specify the value sperated by a "/" like so: +You can specify the value seperated by a "/" like so: -``` +```python hostgroup_format = "tenant/site/dev_location/role" ``` @@ -232,7 +232,7 @@ have a relationship with a tenant. - Device_role: PDU - Site: HQ-AMS -``` +```python hostgroup_format = "site/tenant/device_role" ``` @@ -245,7 +245,7 @@ generated for both hosts: The same logic applies to custom fields being used in the HG format: -``` +```python hostgroup_format = "site/mycustomfieldname" ``` @@ -299,7 +299,7 @@ seperator. For example, the following map will assign the custom field For Virtual Machines, use `vm_inventory_map`. -``` +```python inventory_sync = True inventory_mode = "manual" device_inventory_map = {"custom_fields/mycustomfield/name": "alias"} @@ -324,14 +324,14 @@ sticking to the custom field. You can change the behaviour in the config file. By default this setting is false but you can set it to true to use config context: -``` +```python templates_config_context = True ``` After that make sure that for each host there is at least one template defined in the config context in this format: -``` +```json { "zabbix": { "templates": [ @@ -349,10 +349,114 @@ added benefit of overwriting the template should a device in NetBox have a device specific context defined. In this case the device specific context template(s) will take priority over the device type custom field template. -``` +```python templates_config_context_overrule = True ``` +### Usermacros + +You can choose to use NetBox as a source for Host usermacros by +enabling the following option in the configuration file: + +``` +usermacro_sync = True +``` + +Please be advised that enabling this option will _clear_ any usermacros +manually set on the managed hosts and override them with the usermacros +from NetBox. + +There are two NetBox sources that can be used to populate usermacros: + +1. NetBox config context +2. NetBox fields + +#### Config context + +By defining a dictionary `usermacros` within the `zabbix` key in +config context, you can dynamically assign usermacro values based on +anything that you can target based on +[config contexts](https://netboxlabs.com/docs/netbox/en/stable/features/context-data/) +within NetBox. + +Through this method, it is possible to define the following types of usermacros: + +1. Text +2. Secret +3. Vault + +The default macro type is text if no `type` and `value` have been set. +It is also possible to create usermacros with +[context](https://www.zabbix.com/documentation/7.0/en/manual/config/macros/user_macros_context). +Examples: + +```json +{ + "zabbix": { + "usermacros": { + "{$USER_MACRO}": "test value", + "{$CONTEXT_MACRO:\"test\"}": "test value", + "{$CONTEXT_REGEX_MACRO:regex:\".*\"}": "test value", + "{$SECRET_MACRO}": { + "type": "secret", + "value": "PaSsPhRaSe" + }, + "{$VAULT_MACRO}": { + "type": "vault", + "value": "secret/vmware:password" + }, + "{$USER_MACRO2}": { + "type": "text", + "value": "another test value" + } + } + } +} + +``` + +Please be aware that secret usermacros are only synced _once_ by default. +This is the default behaviour because Zabbix API won't return the value of +secrets so the script cannot compare the values with the ones set in NetBox. + +If you update a secret usermacro value, just remove the value from the host +in Zabbix and the new value will be synced during the next run. + +Alternatively, you can set the following option in the config file: + +```python +usermacro_sync = "full" +``` + +This will force a full usermacro sync on every run on hosts that have secret usermacros set. +That way, you will know for sure the secret values are always up to date. + +Keep in mind that NetBox (and the log output of this script) will show your secrets +in plain text. If true secrecy is required, consider switching to +[vault](https://www.zabbix.com/documentation/current/en/manual/config/macros/secret_macros#vault-secret) +usermacros. + +#### Netbox Fields + +To use NetBox fields as a source for usermacros, you will need to set up usermacro maps +for devices and/or virtual machines in the configuration file. +This method only supports `text` type usermacros. + +For example: +```python +usermacro_sync = True +device_usermacro_map = {"serial": "{$HW_SERIAL}", + "role/name": "{$DEV_ROLE}", + "url": "{$NB_URL}", + "id": "{$NB_ID}"} +vm_usermacro_map = {"memory": "{$TOTAL_MEMORY}", + "role/name": "{$DEV_ROLE}", + "url": "{$NB_URL}", + "id": "{$NB_ID}"} +``` + + + ## Permissions ### NetBox @@ -521,9 +625,13 @@ environment. For example, you could: } ``` -I would recommend using macros for sensitive data such as community strings +I would recommend using usermacros for sensitive data such as community strings since the data in NetBox is plain-text. > **_NOTE:_** Not all SNMP data is required for a working configuration. > [The following parameters are allowed](https://www.zabbix.com/documentation/current/manual/api/reference/hostinterface/object#details_tag "The following parameters are allowed")but > are not all required, depending on your environment. + + + + diff --git a/config.py.example b/config.py.example index 0a653d6..68f6fea 100644 --- a/config.py.example +++ b/config.py.example @@ -101,3 +101,23 @@ device_inventory_map = { "asset_tag": "asset_tag", vm_inventory_map = { "status/label": "deployment_status", "comments": "notes", "name": "name" } + +# To allow syncing of usermacros from NetBox, set to True. +# this will enable both field mapping and config context usermacros. +# +# If set to "full", it will force the update of secret usermacros every run. +# Please see the README.md for more information. +usermacro_sync = False + +# device usermacro_map to map NetBox fields to usermacros. +device_usermacro_map = {"serial": "{$HW_SERIAL}", + "role/name": "{$DEV_ROLE}", + "url": "{$NB_URL}", + "id": "{$NB_ID}"} + +# virtual machine usermacro_map to map NetBox fields to usermacros. +vm_usermacro_map = {"memory": "{$TOTAL_MEMORY}", + "role/name": "{$DEV_ROLE}", + "url": "{$NB_URL}", + "id": "{$NB_ID}"} + From fd70045c6d0d7b51383a6244748c97fcce0188b4 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Mon, 17 Feb 2025 12:57:57 +0100 Subject: [PATCH 17/93] Minor doc updates --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 612588f..eb0d0c3 100644 --- a/README.md +++ b/README.md @@ -358,7 +358,7 @@ templates_config_context_overrule = True You can choose to use NetBox as a source for Host usermacros by enabling the following option in the configuration file: -``` +```python usermacro_sync = True ``` @@ -388,6 +388,7 @@ Through this method, it is possible to define the following types of usermacros: The default macro type is text if no `type` and `value` have been set. It is also possible to create usermacros with [context](https://www.zabbix.com/documentation/7.0/en/manual/config/macros/user_macros_context). + Examples: ```json @@ -443,6 +444,7 @@ for devices and/or virtual machines in the configuration file. This method only supports `text` type usermacros. For example: + ```python usermacro_sync = True device_usermacro_map = {"serial": "{$HW_SERIAL}", From d65fa5b699385183731f43798a09f628a7de46c9 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 19 Feb 2025 15:56:01 +0100 Subject: [PATCH 18/93] Added tag support --- config.py.example | 30 ++++++++++ modules/device.py | 49 +++++++++++++--- modules/tags.py | 117 +++++++++++++++++++++++++++++++++++++ modules/tools.py | 11 ++++ modules/virtual_machine.py | 7 ++- netbox_zabbix_sync.py | 2 + 6 files changed, 208 insertions(+), 8 deletions(-) create mode 100644 modules/tags.py diff --git a/config.py.example b/config.py.example index 68f6fea..e4082e6 100644 --- a/config.py.example +++ b/config.py.example @@ -121,3 +121,33 @@ vm_usermacro_map = {"memory": "{$TOTAL_MEMORY}", "url": "{$NB_URL}", "id": "{$NB_ID}"} +# To sync host tags to Zabbix, set to True. +tag_sync = False + +# Setting tag_lower to True will lower capital letters ain tag names and values +# This is more inline with the Zabbix way of working with tags. +# +# You can however set this to False to ensure capital letters are synced to Zabbix tags. +tag_lower = True + +# We can sync NetBox device/VM tags to Zabbix, but as NetBox tags don't follow the key/value +# pattern, we need to specify a tag name to register the NetBox tags in Zabbix. +# +# +# +# If tag_name is set to False, we won't sync NetBox device/VM tags to Zabbix. +tag_name = 'NetBox' + +# We can choose to use 'name', 'slug' or 'display' NetBox tag properties as a value in Zabbix. +# 'name'is used by default. +tag_value = "name" + +# device tag_map to map NetBox fields to host tags. +device_tag_map = {"site/name": "site", + "rack/name": "rack", + "platform/name": "target"} + +# Virtual machine tag_map to map NetBox fields to host tags. +vm_tag_map = {"site/name": "site", + "cluster/name": "cluster", + "platform/name": "target"} diff --git a/modules/device.py b/modules/device.py index 76f07cf..4ec96b5 100644 --- a/modules/device.py +++ b/modules/device.py @@ -12,8 +12,9 @@ from modules.exceptions import (SyncInventoryError, TemplateError, SyncExternalE InterfaceConfigError, JournalError) from modules.interface import ZabbixInterface from modules.usermacros import ZabbixUsermacros +from modules.tags import ZabbixTags from modules.hostgroups import Hostgroup -from modules.tools import field_mapper +from modules.tools import field_mapper, remove_duplicates try: from config import ( @@ -24,7 +25,12 @@ try: inventory_mode, device_inventory_map, usermacro_sync, - device_usermacro_map + device_usermacro_map, + tag_sync, + tag_lower, + tag_name, + tag_value, + device_tag_map ) except ModuleNotFoundError: print("Configuration file config.py not found in main directory." @@ -60,6 +66,7 @@ class PhysicalDevice(): self.inventory_mode = -1 self.inventory = {} self.usermacros = {} + self.tags = {} self.logger = logger if logger else getLogger(__name__) self._setBasics() @@ -77,6 +84,10 @@ class PhysicalDevice(): """ Use device inventory maps """ return device_usermacro_map + def _tag_map(self): + """ Use device host tag maps """ + return device_tag_map + def _setBasics(self): """ Sets basic information like IP address. @@ -362,6 +373,21 @@ class PhysicalDevice(): self.usermacros = macros.generate() return True + + def set_tags(self): + """ + Generates Host Tags + """ + tags = ZabbixTags(self.nb, self._tag_map(), + tag_sync, tag_lower, tag_name=tag_name, + tag_value=tag_value, logger=self.logger, + host=self.name) + if tags.sync is False: + self.tags = [] + + self.tags = tags.generate() + return True + def setProxy(self, proxy_list): """ Sets proxy or proxy group if this @@ -432,7 +458,8 @@ class PhysicalDevice(): "description": description, "inventory_mode": self.inventory_mode, "inventory": self.inventory, - "macros": self.usermacros + "macros": self.usermacros, + "tags": self.tags } # If a Zabbix proxy or Zabbix Proxy group has been defined if self.zbxproxy: @@ -548,7 +575,8 @@ class PhysicalDevice(): selectHostGroups=["groupid"], selectParentTemplates=["templateid"], selectInventory=list(self._inventory_map().values()), - selectMacros=["macro","value","type","description"] + selectMacros=["macro","value","type","description"], + selectTags=["tag","value"] ) if len(host) > 1: e = (f"Got {len(host)} results for Zabbix hosts " @@ -598,9 +626,8 @@ class PhysicalDevice(): if group["groupid"] == self.group_id: self.logger.debug(f"Host {self.name}: hostgroup in-sync.") break - else: - self.logger.warning(f"Host {self.name}: hostgroup OUT of sync.") - self.updateZabbixHost(groups={'groupid': self.group_id}) + self.logger.warning(f"Host {self.name}: hostgroup OUT of sync.") + self.updateZabbixHost(groups={'groupid': self.group_id}) if int(host["status"]) == self.zabbix_state: self.logger.debug(f"Host {self.name}: status in-sync.") @@ -688,6 +715,14 @@ class PhysicalDevice(): self.logger.warning(f"Host {self.name}: usermacros OUT of sync.") self.updateZabbixHost(macros=self.usermacros) + # Check host usermacros + if tag_sync: + if remove_duplicates(host['tags'],sortkey='tag') == self.tags: + self.logger.debug(f"Host {self.name}: tags in-sync.") + else: + self.logger.warning(f"Host {self.name}: tags OUT of sync.") + self.updateZabbixHost(tags=self.tags) + # If only 1 interface has been found # pylint: disable=too-many-nested-blocks if len(host['interfaces']) == 1: diff --git a/modules/tags.py b/modules/tags.py new file mode 100644 index 0000000..4993cd3 --- /dev/null +++ b/modules/tags.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments, logging-fstring-interpolation +""" +All of the Zabbix Usermacro related configuration +""" +from logging import getLogger +from modules.tools import field_mapper, remove_duplicates + +class ZabbixTags(): + """Class that represents a Zabbix interface.""" + + def __init__(self, nb, tag_map, tag_sync, tag_lower=True, + tag_name=None, tag_value=None, logger=None, host=None): + self.nb = nb + self.name = host if host else nb.name + self.tag_map = tag_map + self.logger = logger if logger else getLogger(__name__) + self.tags = {} + self.lower = tag_lower + self.tag_name = tag_name + self.tag_value = tag_value + self.tag_sync = tag_sync + self.sync = False + self._set_config() + + def __repr__(self): + return self.name + + def __str__(self): + return self.__repr__() + + def _set_config(self): + """ + Setup class + """ + if self.tag_sync: + self.sync = True + + return True + + def validate_tag(self, tag_name): + """ + Validates tag name + """ + if tag_name and isinstance(tag_name, str) and len(tag_name)<=256: + return True + return False + + def validate_value(self, tag_value): + """ + Validates tag value + """ + if tag_value and isinstance(tag_value, str) and len(tag_value)<=256: + return True + return False + + def render_tag(self, tag_name, tag_value): + """ + Renders a tag + """ + tag={} + if self.validate_tag(tag_name): + if self.lower: + tag['tag'] = tag_name.lower() + else: + tag['tag'] = tag_name + else: + self.logger.error(f'Tag {tag_name} is not a valid tag name, skipping.') + return False + + if self.validate_value(tag_value): + if self.lower: + tag['value'] = tag_value.lower() + else: + tag['value'] = tag_value + else: + self.logger.error(f'Tag {tag_name} has an invalid value: \'{tag_value}\', skipping.') + return False + return tag + + def generate(self): + """ + Generate full set of Usermacros + """ + # pylint: disable=too-many-branches + tags=[] + # Parse the field mapper for tags + if self.tag_map: + self.logger.debug(f"Host {self.nb.name}: Starting tag mapper") + field_tags = field_mapper(self.nb.name, self.tag_map, self.nb, self.logger) + for tag, value in field_tags.items(): + t = self.render_tag(tag, value) + if t: + tags.append(t) + + # Parse NetBox config context for tags + if ("zabbix" in self.nb.config_context and "tags" in self.nb.config_context['zabbix'] + and isinstance(self.nb.config_context['zabbix']['tags'], list)): + for tag in self.nb.config_context['zabbix']['tags']: + if isinstance(tag, dict): + for tagname, value in tag.items(): + t = self.render_tag(tagname, value) + if t: + tags.append(t) + + # Pull in NetBox device tags if tag_name is set + if self.tag_name and isinstance(self.tag_name, str): + for tag in self.nb.tags: + if self.tag_value.lower() in ['display', 'name', 'slug']: + value = tag[self.tag_value] + else: + value = tag['name'] + t = self.render_tag(self.tag_name, value) + if t: + tags.append(t) + + return remove_duplicates(tags, sortkey='tag') diff --git a/modules/tools.py b/modules/tools.py index f32e802..8d658a3 100644 --- a/modules/tools.py +++ b/modules/tools.py @@ -76,3 +76,14 @@ def field_mapper(host, mapper, nbdevice, logger): logger.debug(f"Host {host}: Field mapping complete. " f"Mapped {len(list(filter(None, data.values())))} field(s)") return data + +def remove_duplicates(input_list, sortkey=None): + """ + Removes duplicate entries from a list and sorts the list + """ + output_list = [] + if isinstance(input_list, list): + output_list = [dict(t) for t in {tuple(d.items()) for d in input_list}] + if sortkey and isinstance(sortkey, str): + output_list.sort(key=lambda x: x[sortkey]) + return output_list diff --git a/modules/virtual_machine.py b/modules/virtual_machine.py index 5afdb18..273f9e7 100644 --- a/modules/virtual_machine.py +++ b/modules/virtual_machine.py @@ -11,6 +11,7 @@ try: from config import ( vm_inventory_map, vm_usermacro_map, + vm_tag_map, traverse_site_groups, traverse_regions ) @@ -31,9 +32,13 @@ class VirtualMachine(PhysicalDevice): return vm_inventory_map def _usermacro_map(self): - """ use VM inventory maps """ + """ use VM usermacro maps """ return vm_usermacro_map + def _tag_map(self): + """ use VM tag maps """ + return vm_tag_map + def set_hostgroup(self, hg_format, nb_site_groups, nb_regions): """Set the hostgroup for this device""" # Create new Hostgroup instance diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 5498edc..04a4e07 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -173,6 +173,7 @@ def main(arguments): continue vm.set_inventory(nb_vm) vm.set_usermacros() + vm.set_tags() # Checks if device is in cleanup state if vm.status in zabbix_device_removal: if vm.zabbix_id: @@ -227,6 +228,7 @@ def main(arguments): continue device.set_inventory(nb_device) device.set_usermacros() + device.set_tags() # Checks if device is part of cluster. # Requires clustering variable if device.isCluster() and clustering: From 523393308d6f1e4dd87ea395d73e055d9a5ad055 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 19 Feb 2025 16:25:11 +0100 Subject: [PATCH 19/93] Updated docs --- README.md | 80 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/README.md b/README.md index eb0d0c3..d533ec6 100644 --- a/README.md +++ b/README.md @@ -353,6 +353,86 @@ template(s) will take priority over the device type custom field template. templates_config_context_overrule = True ``` +### Tags + +This script can sync host tags to your Zabbix hosts for use in filtering, +SLA calculations and event correlation. + +Tags can be synced from the following sources: + +1. NetBox device/vm tags +2. NetBox config ontext +3. NetBox fields + +Syncing tags will override any tags that were set manually on the host, +making NetBox the single source-of-truth for managing tags. + +To enable syncing, turn on tag_sync in the config file. +By default, this script will modify tag names and tag values to lowercase. +You can change this behaviour by setting tag_lower to False. + +```python +tag_sync = True +tag_lower = True +``` + +#### Device tags + +As NetBox doesn't follow the tag/value pattern for tags, we will need a tag +name set to register the netwbox tags. + +By default the tag name is "NetBox", but you can change this to whatever you want. +The value for the tag can be choosen from 'name', 'display' or 'slug'. + +```python +tag_name = 'NetBox' +tag_value = 'name' +``` + +#### Config context + +You can supply custom tags via config context by adding the following: + +```json +{ + "zabbix": { + "tags": [ + { + "MyTagName": "MyTagValue" + }, + { + "environment": "production" + } + ], + } +} +``` + +This will allow you to assign tags based on the config context rules. + +#### NetBox Field + +NetBox field can also be used as input for tags, just like inventory and usermacros. +To enable syncing from fields, make sure to configure a `device_tag_map` and/or a `vm_tag_map`. + +```python +device_tag_map = {"site/name": "site", + "rack/name": "rack", + "platform/name": "target"} + +vm_tag_map = {"site/name": "site", + "cluster/name": "cluster", + "platform/name": "target"} +``` + +To turn off field syncing, set the maps to empty dictionaries: + +```python +device_tag_map = {} +vm_tag_map = {} +``` + + ### Usermacros You can choose to use NetBox as a source for Host usermacros by From 593c8707afeb65d85929271e81c54a24fb638f2d Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Thu, 20 Feb 2025 11:01:04 +0100 Subject: [PATCH 20/93] New publish-image workflow Should remove the dependency on PAT --- .github/workflows/publish-image.yml | 83 +++++++++++++++-------------- 1 file changed, 44 insertions(+), 39 deletions(-) diff --git a/.github/workflows/publish-image.yml b/.github/workflows/publish-image.yml index e9e6421..bf87bf4 100644 --- a/.github/workflows/publish-image.yml +++ b/.github/workflows/publish-image.yml @@ -1,46 +1,51 @@ -name: Publish Docker image to GHCR on a new version +name: Build and Push Docker Image + +permissions: + contents: read + packages: write on: - push: - branches: - - main - - dockertest -# tags: -# - [0-9]+.* - -env: - REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }} + release: + types: [published] + pull_request: + types: [opened, synchronize] jobs: - test_quality: - uses: ./.github/workflows/quality.yml - build_and_publish: + build: runs-on: ubuntu-latest steps: - - name: Checkout sources - uses: actions/checkout@v4 - - name: Log in to the container registry - uses: docker/login-action@v3 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GHCR_PAT }} - - name: Extract metadata (tags, labels) - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - tags: | - type=semver,pattern={{ version }} - type=ref,event=branch - type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'master') }} - type=sha - - name: Build and push Docker image - uses: docker/build-push-action@v5 - with: - context: . - push: true - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 + + - name: Login to GitHub Container Registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata + id: meta + uses: docker/metadata-action@369eb591f429131d6889c46b94e711f089e6ca96 + with: + images: ghcr.io/${{ github.repository }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + + - name: Build and push Docker image + uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 + with: + context: . + file: ./Dockerfile + push: true + platforms: linux/amd64,linux/arm64 + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + annotations: | + index:org.opencontainers.image.description=Python script to synchronise NetBox devices to Zabbix. From 733df33b7102012772e0c6b4fc30239653af6afa Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Thu, 20 Feb 2025 11:02:43 +0100 Subject: [PATCH 21/93] added step to run linting tests --- .github/workflows/publish-image.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/publish-image.yml b/.github/workflows/publish-image.yml index bf87bf4..615b784 100644 --- a/.github/workflows/publish-image.yml +++ b/.github/workflows/publish-image.yml @@ -11,6 +11,8 @@ on: types: [opened, synchronize] jobs: + test_quality: + uses: ./.github/workflows/quality.yml build: runs-on: ubuntu-latest steps: From 825d788cfe1c30a35f2c7201e95b0c6f26808490 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Thu, 20 Feb 2025 11:42:25 +0100 Subject: [PATCH 22/93] Update Dockerfile --- Dockerfile | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Dockerfile b/Dockerfile index fa8d9c4..0551bd1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,12 @@ # syntax=docker/dockerfile:1 FROM python:3.12-alpine +LABEL org.opencontainers.image.source=https://github.com/TheNetworkGuy/netbox-zabbix-sync +LABEL org.opencontainers.image.title="NetBox-Zabbix-Sync" +LABEL org.opencontainers.image.description="Python script to synchronise NetBox devices to Zabbix." +LABEL org.opencontainers.image.documentation=https://github.com/TheNetworkGuy/netbox-zabbix-sync/ +LABEL org.opencontainers.image.licenses=MIT +LABEL org.opencontainers.image.authors="Twan Kamans" + RUN mkdir -p /opt/netbox-zabbix COPY . /opt/netbox-zabbix WORKDIR /opt/netbox-zabbix From 525904cf43d7f77a7010c73001549e86f8d658db Mon Sep 17 00:00:00 2001 From: Wouter de Bruijn Date: Wed, 26 Feb 2025 10:07:51 +0100 Subject: [PATCH 23/93] =?UTF-8?q?=F0=9F=9A=A8=20Linted=20and=20formatted?= =?UTF-8?q?=20file?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- netbox_zabbix_sync.py | 175 +++++++++++++++++++++++++----------------- 1 file changed, 104 insertions(+), 71 deletions(-) diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 04a4e07..b355657 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -2,46 +2,55 @@ # pylint: disable=invalid-name, logging-not-lazy, too-many-locals, logging-fstring-interpolation """NetBox to Zabbix sync script.""" -import logging import argparse +import logging import ssl from os import environ, path, sys + from pynetbox import api from pynetbox.core.query import RequestError as NBRequestError from requests.exceptions import ConnectionError as RequestsConnectionError -from zabbix_utils import ZabbixAPI, APIRequestError, ProcessingError +from zabbix_utils import APIRequestError, ProcessingError, ZabbixAPI + from modules.device import PhysicalDevice -from modules.virtual_machine import VirtualMachine -from modules.tools import convert_recordset, proxy_prepper from modules.exceptions import EnvironmentVarError, HostgroupError, SyncError +from modules.tools import convert_recordset, proxy_prepper +from modules.virtual_machine import VirtualMachine + try: from config import ( + clustering, + create_hostgroups, + create_journal, + full_proxy_sync, + hostgroup_format, + nb_device_filter, + nb_vm_filter, + sync_vms, templates_config_context, templates_config_context_overrule, - clustering, create_hostgroups, - create_journal, full_proxy_sync, - zabbix_device_removal, - zabbix_device_disable, - hostgroup_format, vm_hostgroup_format, - nb_device_filter, - sync_vms, - nb_vm_filter + zabbix_device_disable, + zabbix_device_removal, ) except ModuleNotFoundError: - print("Configuration file config.py not found in main directory." - "Please create the file or rename the config.py.example file to config.py.") + print( + "Configuration file config.py not found in main directory." + "Please create the file or rename the config.py.example file to config.py." + ) sys.exit(1) # Set logging -log_format = logging.Formatter('%(asctime)s - %(name)s - ' - '%(levelname)s - %(message)s') +log_format = logging.Formatter( + "%(asctime)s - %(name)s - " "%(levelname)s - %(message)s" +) lgout = logging.StreamHandler() lgout.setFormatter(log_format) lgout.setLevel(logging.DEBUG) -lgfile = logging.FileHandler(path.join(path.dirname( - path.realpath(__file__)), "sync.log")) +lgfile = logging.FileHandler( + path.join(path.dirname(path.realpath(__file__)), "sync.log") +) lgfile.setFormatter(log_format) lgfile.setLevel(logging.DEBUG) @@ -84,15 +93,26 @@ def main(arguments): netbox = api(netbox_host, token=netbox_token, threading=True) # Check if the provided Hostgroup layout is valid hg_objects = hostgroup_format.split("/") - allowed_objects = ["location", "role", "manufacturer", "region", - "site", "site_group", "tenant", "tenant_group"] + allowed_objects = [ + "location", + "role", + "manufacturer", + "region", + "site", + "site_group", + "tenant", + "tenant_group", + ] # Create API call to get all custom fields which are on the device objects try: - device_cfs = list(netbox.extras.custom_fields.filter( - type="text", content_type_id=23)) + device_cfs = list( + netbox.extras.custom_fields.filter(type="text", content_type_id=23) + ) except RequestsConnectionError: - logger.error(f"Unable to connect to NetBox with URL {netbox_host}." - " Please check the URL and status of NetBox.") + logger.error( + f"Unable to connect to NetBox with URL {netbox_host}." + " Please check the URL and status of NetBox." + ) sys.exit(1) except NBRequestError as e: logger.error(f"NetBox error: {e}") @@ -101,8 +121,10 @@ def main(arguments): allowed_objects.append(cf.name) for hg_object in hg_objects: if hg_object not in allowed_objects: - e = (f"Hostgroup item {hg_object} is not valid. Make sure you" - " use valid items and seperate them with '/'.") + e = ( + f"Hostgroup item {hg_object} is not valid. Make sure you" + " use valid items and seperate them with '/'." + ) logger.error(e) raise HostgroupError(e) # Set Zabbix API @@ -114,18 +136,18 @@ def main(arguments): ssl_ctx.load_verify_locations(environ["REQUESTS_CA_BUNDLE"]) if not zabbix_token: - zabbix = ZabbixAPI(zabbix_host, user=zabbix_user, - password=zabbix_pass, ssl_context=ssl_ctx) - else: zabbix = ZabbixAPI( - zabbix_host, token=zabbix_token, ssl_context=ssl_ctx) + zabbix_host, user=zabbix_user, password=zabbix_pass, ssl_context=ssl_ctx + ) + else: + zabbix = ZabbixAPI(zabbix_host, token=zabbix_token, ssl_context=ssl_ctx) zabbix.check_auth() except (APIRequestError, ProcessingError) as e: e = f"Zabbix returned the following error: {str(e)}" logger.error(e) sys.exit(1) # Set API parameter mapping based on API version - if not str(zabbix.version).startswith('7'): + if not str(zabbix.version).startswith("7"): proxy_name = "host" else: proxy_name = "name" @@ -133,23 +155,21 @@ def main(arguments): netbox_devices = list(netbox.dcim.devices.filter(**nb_device_filter)) netbox_vms = [] if sync_vms: - netbox_vms = list( - netbox.virtualization.virtual_machines.filter(**nb_vm_filter)) + netbox_vms = list(netbox.virtualization.virtual_machines.filter(**nb_vm_filter)) netbox_site_groups = convert_recordset((netbox.dcim.site_groups.all())) netbox_regions = convert_recordset(netbox.dcim.regions.all()) netbox_journals = netbox.extras.journal_entries - zabbix_groups = zabbix.hostgroup.get(output=['groupid', 'name']) - zabbix_templates = zabbix.template.get(output=['templateid', 'name']) - zabbix_proxies = zabbix.proxy.get(output=['proxyid', proxy_name]) + zabbix_groups = zabbix.hostgroup.get(output=["groupid", "name"]) + zabbix_templates = zabbix.template.get(output=["templateid", "name"]) + zabbix_proxies = zabbix.proxy.get(output=["proxyid", proxy_name]) # Set empty list for proxy processing Zabbix <= 6 zabbix_proxygroups = [] - if str(zabbix.version).startswith('7'): - zabbix_proxygroups = zabbix.proxygroup.get( - output=["proxy_groupid", "name"]) + if str(zabbix.version).startswith("7"): + zabbix_proxygroups = zabbix.proxygroup.get(output=["proxy_groupid", "name"]) # Sanitize proxy data if proxy_name == "host": for proxy in zabbix_proxies: - proxy['name'] = proxy.pop('host') + proxy["name"] = proxy.pop("host") # Prepare list of all proxy and proxy_groups zabbix_proxy_list = proxy_prepper(zabbix_proxies, zabbix_proxygroups) @@ -159,15 +179,15 @@ def main(arguments): # Go through all NetBox devices for nb_vm in netbox_vms: try: - vm = VirtualMachine(nb_vm, zabbix, netbox_journals, nb_version, - create_journal, logger) + vm = VirtualMachine( + nb_vm, zabbix, netbox_journals, nb_version, create_journal, logger + ) logger.debug(f"Host {vm.name}: Started operations on VM.") vm.set_vm_template() # Check if a valid template has been found for this VM. if not vm.zbx_template_names: continue - vm.set_hostgroup(vm_hostgroup_format, - netbox_site_groups, netbox_regions) + vm.set_hostgroup(vm_hostgroup_format, netbox_site_groups, netbox_regions) # Check if a valid hostgroup has been found for this VM. if not vm.hostgroup: continue @@ -184,17 +204,23 @@ def main(arguments): continue # Device has been added to NetBox # but is not in Activate state - logger.info(f"VM {vm.name}: skipping since this VM is " - f"not in the active state.") + logger.info( + f"VM {vm.name}: skipping since this VM is " + f"not in the active state." + ) continue # Check if the VM is in the disabled state if vm.status in zabbix_device_disable: vm.zabbix_state = 1 # Check if VM is already in Zabbix if vm.zabbix_id: - vm.ConsistencyCheck(zabbix_groups, zabbix_templates, - zabbix_proxy_list, full_proxy_sync, - create_hostgroups) + vm.ConsistencyCheck( + zabbix_groups, + zabbix_templates, + zabbix_proxy_list, + full_proxy_sync, + create_hostgroups, + ) continue # Add hostgroup is config is set if create_hostgroups: @@ -205,24 +231,24 @@ def main(arguments): # Add new hostgroups to zabbix group list zabbix_groups.append(group) # Add VM to Zabbix - vm.createInZabbix(zabbix_groups, zabbix_templates, - zabbix_proxy_list) + vm.createInZabbix(zabbix_groups, zabbix_templates, zabbix_proxy_list) except SyncError: pass for nb_device in netbox_devices: try: # Set device instance set data such as hostgroup and template information. - device = PhysicalDevice(nb_device, zabbix, netbox_journals, nb_version, - create_journal, logger) + device = PhysicalDevice( + nb_device, zabbix, netbox_journals, nb_version, create_journal, logger + ) logger.debug(f"Host {device.name}: started operations on device.") - device.set_template(templates_config_context, - templates_config_context_overrule) + device.set_template( + templates_config_context, templates_config_context_overrule + ) # Check if a valid template has been found for this VM. if not device.zbx_template_names: continue - device.set_hostgroup( - hostgroup_format, netbox_site_groups, netbox_regions) + device.set_hostgroup(hostgroup_format, netbox_site_groups, netbox_regions) # Check if a valid hostgroup has been found for this VM. if not device.hostgroup: continue @@ -234,14 +260,15 @@ def main(arguments): if device.isCluster() and clustering: # Check if device is primary or secondary if device.promoteMasterDevice(): - e = (f"Device {device.name}: is " - f"part of cluster and primary.") + e = f"Device {device.name}: is " f"part of cluster and primary." logger.info(e) else: # Device is secondary in cluster. # Don't continue with this device. - e = (f"Device {device.name}: is part of cluster " - f"but not primary. Skipping this host...") + e = ( + f"Device {device.name}: is part of cluster " + f"but not primary. Skipping this host..." + ) logger.info(e) continue # Checks if device is in cleanup state @@ -254,17 +281,23 @@ def main(arguments): continue # Device has been added to NetBox # but is not in Activate state - logger.info(f"Device {device.name}: skipping since this device is " - f"not in the active state.") + logger.info( + f"Device {device.name}: skipping since this device is " + f"not in the active state." + ) continue # Check if the device is in the disabled state if device.status in zabbix_device_disable: device.zabbix_state = 1 # Check if device is already in Zabbix if device.zabbix_id: - device.ConsistencyCheck(zabbix_groups, zabbix_templates, - zabbix_proxy_list, full_proxy_sync, - create_hostgroups) + device.ConsistencyCheck( + zabbix_groups, + zabbix_templates, + zabbix_proxy_list, + full_proxy_sync, + create_hostgroups, + ) continue # Add hostgroup is config is set if create_hostgroups: @@ -275,17 +308,17 @@ def main(arguments): # Add new hostgroups to zabbix group list zabbix_groups.append(group) # Add device to Zabbix - device.createInZabbix(zabbix_groups, zabbix_templates, - zabbix_proxy_list) + device.createInZabbix(zabbix_groups, zabbix_templates, zabbix_proxy_list) except SyncError: pass if __name__ == "__main__": parser = argparse.ArgumentParser( - description='A script to sync Zabbix with NetBox device data.' + description="A script to sync Zabbix with NetBox device data." + ) + parser.add_argument( + "-v", "--verbose", help="Turn on debugging.", action="store_true" ) - parser.add_argument("-v", "--verbose", help="Turn on debugging.", - action="store_true") args = parser.parse_args() main(args) From 53066d2d51dd177b2c9cc801d3d5dcfcacef6a88 Mon Sep 17 00:00:00 2001 From: Wouter de Bruijn Date: Wed, 26 Feb 2025 10:09:35 +0100 Subject: [PATCH 24/93] =?UTF-8?q?=E2=9C=A8=20Added=20separate=20log=20leve?= =?UTF-8?q?ls?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- netbox_zabbix_sync.py | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index b355657..5d6606e 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -41,23 +41,18 @@ except ModuleNotFoundError: sys.exit(1) # Set logging -log_format = logging.Formatter( - "%(asctime)s - %(name)s - " "%(levelname)s - %(message)s" -) lgout = logging.StreamHandler() -lgout.setFormatter(log_format) -lgout.setLevel(logging.DEBUG) - lgfile = logging.FileHandler( path.join(path.dirname(path.realpath(__file__)), "sync.log") ) -lgfile.setFormatter(log_format) -lgfile.setLevel(logging.DEBUG) + +logging.basicConfig( + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", + level=logging.WARNING, + handlers=[lgout, lgfile], +) logger = logging.getLogger("NetBox-Zabbix-sync") -logger.addHandler(lgout) -logger.addHandler(lgfile) -logger.setLevel(logging.WARNING) def main(arguments): @@ -65,7 +60,11 @@ def main(arguments): # pylint: disable=too-many-branches, too-many-statements # set environment variables if arguments.verbose: + logger.setLevel(logging.INFO) + if arguments.debug: logger.setLevel(logging.DEBUG) + if arguments.debug_all: + logging.getLogger().setLevel(logging.DEBUG) env_vars = ["ZABBIX_HOST", "NETBOX_HOST", "NETBOX_TOKEN"] if "ZABBIX_TOKEN" in environ: env_vars.append("ZABBIX_TOKEN") @@ -320,5 +319,14 @@ if __name__ == "__main__": parser.add_argument( "-v", "--verbose", help="Turn on debugging.", action="store_true" ) + parser.add_argument( + "-vv", "--debug", help="Turn on debugging.", action="store_true" + ) + parser.add_argument( + "-vvv", + "--debug-all", + help="Turn on debugging for all modules.", + action="store_true", + ) args = parser.parse_args() main(args) From a5312365f9ccc3ee5aaf2ddb6421b66cd18eb0f0 Mon Sep 17 00:00:00 2001 From: Wouter de Bruijn Date: Wed, 26 Feb 2025 10:11:47 +0100 Subject: [PATCH 25/93] =?UTF-8?q?=F0=9F=93=84=20Added=20new=20cli=20params?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index d533ec6..0bd6bfc 100644 --- a/README.md +++ b/README.md @@ -575,9 +575,11 @@ python3 netbox_zabbix_sync.py ### Flags -| Flag | Option | Description | -| ---- | ------- | ---------------------- | -| -v | verbose | Log with debugging on. | +| Flag | Option | Description | +| ---- | --------- | ------------------------------------- | +| -v | verbose | Log with info on. | +| -vv | debug | Log with debugging on. | +| -vvv | debug-all | Log with debugging on for all modules | ## Config context From 0c798ec96890da56a7d39d40e5314070217238e9 Mon Sep 17 00:00:00 2001 From: Wouter de Bruijn Date: Wed, 26 Feb 2025 11:10:56 +0100 Subject: [PATCH 26/93] =?UTF-8?q?=E2=9C=A8=20Added=20quiet=20param?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- netbox_zabbix_sync.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 5d6606e..75ad65c 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -65,6 +65,9 @@ def main(arguments): logger.setLevel(logging.DEBUG) if arguments.debug_all: logging.getLogger().setLevel(logging.DEBUG) + if arguments.quiet: + logging.getLogger().setLevel(logging.ERROR) + env_vars = ["ZABBIX_HOST", "NETBOX_HOST", "NETBOX_TOKEN"] if "ZABBIX_TOKEN" in environ: env_vars.append("ZABBIX_TOKEN") @@ -328,5 +331,6 @@ if __name__ == "__main__": help="Turn on debugging for all modules.", action="store_true", ) + parser.add_argument("-q", "--quiet", help="Turn off warnings.", action="store_true") args = parser.parse_args() main(args) From b314b2c8836a4d0a978bf83407c5ce35ab83c97e Mon Sep 17 00:00:00 2001 From: Wouter de Bruijn Date: Wed, 26 Feb 2025 14:00:18 +0100 Subject: [PATCH 27/93] =?UTF-8?q?=F0=9F=9A=A8=20Formatted=20and=20linted?= =?UTF-8?q?=20files?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- modules/device.py | 452 +++++++++++++++++++++++-------------- modules/exceptions.py | 34 ++- modules/hostgroups.py | 82 +++++-- modules/interface.py | 44 ++-- modules/tags.py | 54 +++-- modules/tools.py | 44 ++-- modules/usermacros.py | 63 ++++-- modules/virtual_machine.py | 45 ++-- 8 files changed, 514 insertions(+), 304 deletions(-) diff --git a/modules/device.py b/modules/device.py index 4ec96b5..83e6fdc 100644 --- a/modules/device.py +++ b/modules/device.py @@ -3,48 +3,61 @@ """ Device specific handeling for NetBox to Zabbix """ -from os import sys -from re import search from copy import deepcopy from logging import getLogger +from os import sys +from re import search + from zabbix_utils import APIRequestError -from modules.exceptions import (SyncInventoryError, TemplateError, SyncExternalError, - InterfaceConfigError, JournalError) -from modules.interface import ZabbixInterface -from modules.usermacros import ZabbixUsermacros -from modules.tags import ZabbixTags + +from modules.exceptions import ( + InterfaceConfigError, + JournalError, + SyncExternalError, + SyncInventoryError, + TemplateError, +) from modules.hostgroups import Hostgroup +from modules.interface import ZabbixInterface +from modules.tags import ZabbixTags from modules.tools import field_mapper, remove_duplicates +from modules.usermacros import ZabbixUsermacros try: from config import ( - template_cf, device_cf, - traverse_site_groups, - traverse_regions, - inventory_sync, - inventory_mode, + device_cf, device_inventory_map, - usermacro_sync, + device_tag_map, device_usermacro_map, - tag_sync, + inventory_mode, + inventory_sync, tag_lower, tag_name, + tag_sync, tag_value, - device_tag_map + template_cf, + traverse_regions, + traverse_site_groups, + usermacro_sync, ) except ModuleNotFoundError: - print("Configuration file config.py not found in main directory." - "Please create the file or rename the config.py.example file to config.py.") + print( + "Configuration file config.py not found in main directory." + "Please create the file or rename the config.py.example file to config.py." + ) sys.exit(0) -class PhysicalDevice(): + +class PhysicalDevice: # pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments """ Represents Network device. INPUT: (NetBox device class, ZabbixAPI class, journal flag, NB journal class) """ - def __init__(self, nb, zabbix, nb_journal_class, nb_version, journal=None, logger=None): + def __init__( + self, nb, zabbix, nb_journal_class, nb_version, journal=None, logger=None + ): self.nb = nb self.id = nb.id self.name = nb.name @@ -77,15 +90,15 @@ class PhysicalDevice(): return self.__repr__() def _inventory_map(self): - """ Use device inventory maps """ + """Use device inventory maps""" return device_inventory_map def _usermacro_map(self): - """ Use device inventory maps """ + """Use device inventory maps""" return device_usermacro_map def _tag_map(self): - """ Use device host tag maps """ + """Use device host tag maps""" return device_tag_map def _setBasics(self): @@ -112,30 +125,38 @@ class PhysicalDevice(): # Validate hostname format. odd_character_list = ["ä", "ö", "ü", "Ä", "Ö", "Ü", "ß"] self.use_visible_name = False - if (any(letter in self.name for letter in odd_character_list) or - bool(search('[\u0400-\u04FF]', self.name))): + if any(letter in self.name for letter in odd_character_list) or bool( + search("[\u0400-\u04ff]", self.name) + ): self.name = f"NETBOX_ID{self.id}" self.visible_name = self.nb.name self.use_visible_name = True - self.logger.info(f"Host {self.visible_name} contains special characters. " - f"Using {self.name} as name for the NetBox object " - f"and using {self.visible_name} as visible name in Zabbix.") + self.logger.info( + f"Host {self.visible_name} contains special characters. " + f"Using {self.name} as name for the NetBox object " + f"and using {self.visible_name} as visible name in Zabbix." + ) else: pass def set_hostgroup(self, hg_format, nb_site_groups, nb_regions): """Set the hostgroup for this device""" # Create new Hostgroup instance - hg = Hostgroup("dev", self.nb, self.nb_api_version, logger=self.logger, - nested_sitegroup_flag=traverse_site_groups, - nested_region_flag=traverse_regions, - nb_groups=nb_site_groups, - nb_regions=nb_regions) + hg = Hostgroup( + "dev", + self.nb, + self.nb_api_version, + logger=self.logger, + nested_sitegroup_flag=traverse_site_groups, + nested_region_flag=traverse_regions, + nb_groups=nb_site_groups, + nb_regions=nb_regions, + ) # Generate hostgroup based on hostgroup format self.hostgroup = hg.generate(hg_format) def set_template(self, prefer_config_context, overrule_custom): - """ Set Template """ + """Set Template""" self.zbx_template_names = None # Gather templates ONLY from the device specific context if prefer_config_context: @@ -159,7 +180,7 @@ class PhysicalDevice(): return True def get_templates_cf(self): - """ Get template from custom field """ + """Get template from custom field""" # Get Zabbix templates from the device type device_type_cfs = self.nb.device_type.custom_fields # Check if the ZBX Template CF is present @@ -167,20 +188,26 @@ class PhysicalDevice(): # Set value to template return [device_type_cfs[template_cf]] # Custom field not found, return error - e = (f"Custom field {template_cf} not " + e = ( + f"Custom field {template_cf} not " f"found for {self.nb.device_type.manufacturer.name}" - f" - {self.nb.device_type.display}.") + f" - {self.nb.device_type.display}." + ) raise TemplateError(e) def get_templates_context(self): - """ Get Zabbix templates from the device context """ + """Get Zabbix templates from the device context""" if "zabbix" not in self.config_context: - e = (f"Host {self.name}: Key 'zabbix' not found in config " - "context for template lookup") + e = ( + f"Host {self.name}: Key 'zabbix' not found in config " + "context for template lookup" + ) raise TemplateError(e) if "templates" not in self.config_context["zabbix"]: - e = (f"Host {self.name}: Key 'templates' not found in config " - "context 'zabbix' for template lookup") + e = ( + f"Host {self.name}: Key 'templates' not found in config " + "context 'zabbix' for template lookup" + ) raise TemplateError(e) # Check if format is list or string. if isinstance(self.config_context["zabbix"]["templates"], str): @@ -188,25 +215,31 @@ class PhysicalDevice(): return self.config_context["zabbix"]["templates"] def set_inventory(self, nbdevice): - """ Set host inventory """ + """Set host inventory""" # Set inventory mode. Default is disabled (see class init function). if inventory_mode == "disabled": if inventory_sync: - self.logger.error(f"Host {self.name}: Unable to map NetBox inventory to Zabbix. " - "Inventory sync is enabled in config but inventory mode is disabled.") + self.logger.error( + f"Host {self.name}: Unable to map NetBox inventory to Zabbix. " + "Inventory sync is enabled in config but inventory mode is disabled." + ) return True if inventory_mode == "manual": self.inventory_mode = 0 elif inventory_mode == "automatic": self.inventory_mode = 1 else: - self.logger.error(f"Host {self.name}: Specified value for inventory mode in" - f" config is not valid. Got value {inventory_mode}") + self.logger.error( + f"Host {self.name}: Specified value for inventory mode in" + f" config is not valid. Got value {inventory_mode}" + ) return False self.inventory = {} - if inventory_sync and self.inventory_mode in [0,1]: + if inventory_sync and self.inventory_mode in [0, 1]: self.logger.debug(f"Host {self.name}: Starting inventory mapper") - self.inventory = field_mapper(self.name, self._inventory_map(), nbdevice, self.logger) + self.inventory = field_mapper( + self.name, self._inventory_map(), nbdevice, self.logger + ) return True def isCluster(self): @@ -220,13 +253,17 @@ class PhysicalDevice(): Returns chassis master ID. """ if not self.isCluster(): - e = (f"Unable to proces {self.name} for cluster calculation: " - f"not part of a cluster.") + e = ( + f"Unable to proces {self.name} for cluster calculation: " + f"not part of a cluster." + ) self.logger.warning(e) raise SyncInventoryError(e) if not self.nb.virtual_chassis.master: - e = (f"{self.name} is part of a NetBox virtual chassis which does " - "not have a master configured. Skipping for this reason.") + e = ( + f"{self.name} is part of a NetBox virtual chassis which does " + "not have a master configured. Skipping for this reason." + ) self.logger.error(e) raise SyncInventoryError(e) return self.nb.virtual_chassis.master.id @@ -239,9 +276,11 @@ class PhysicalDevice(): """ masterid = self.getClusterMaster() if masterid == self.id: - self.logger.debug(f"Host {self.name} is primary cluster member. " - f"Modifying hostname from {self.name} to " + - f"{self.nb.virtual_chassis.name}.") + self.logger.debug( + f"Host {self.name} is primary cluster member. " + f"Modifying hostname from {self.name} to " + + f"{self.nb.virtual_chassis.name}." + ) self.name = self.nb.virtual_chassis.name return True self.logger.debug(f"Host {self.name} is non-primary cluster member.") @@ -266,18 +305,24 @@ class PhysicalDevice(): # Go through all templates found in Zabbix for zbx_template in templates: # If the template names match - if zbx_template['name'] == nb_template: + if zbx_template["name"] == nb_template: # Set match variable to true, add template details # to class variable and return debug log template_match = True - self.zbx_templates.append({"templateid": zbx_template['templateid'], - "name": zbx_template['name']}) + self.zbx_templates.append( + { + "templateid": zbx_template["templateid"], + "name": zbx_template["name"], + } + ) e = f"Host {self.name}: found template {zbx_template['name']}" self.logger.debug(e) # Return error should the template not be found in Zabbix if not template_match: - e = (f"Unable to find template {nb_template} " - f"for host {self.name} in Zabbix. Skipping host...") + e = ( + f"Unable to find template {nb_template} " + f"for host {self.name} in Zabbix. Skipping host..." + ) self.logger.warning(e) raise SyncInventoryError(e) @@ -289,8 +334,8 @@ class PhysicalDevice(): """ # Go through all groups for group in groups: - if group['name'] == self.hostgroup: - self.group_id = group['groupid'] + if group["name"] == self.hostgroup: + self.group_id = group["groupid"] e = f"Host {self.name}: matched group {group['name']}" self.logger.debug(e) return True @@ -304,10 +349,13 @@ class PhysicalDevice(): if self.zabbix_id: try: # Check if the Zabbix host exists in Zabbix - zbx_host = bool(self.zabbix.host.get(filter={'hostid': self.zabbix_id}, - output=[])) - e = (f"Host {self.name}: was already deleted from Zabbix." - " Removed link in NetBox.") + zbx_host = bool( + self.zabbix.host.get(filter={"hostid": self.zabbix_id}, output=[]) + ) + e = ( + f"Host {self.name}: was already deleted from Zabbix." + " Removed link in NetBox." + ) if zbx_host: # Delete host should it exists self.zabbix.host.delete(self.zabbix_id) @@ -332,9 +380,9 @@ class PhysicalDevice(): """ # Validate the hostname or visible name field if not self.use_visible_name: - zbx_filter = {'host': self.name} + zbx_filter = {"host": self.name} else: - zbx_filter = {'name': self.visible_name} + zbx_filter = {"name": self.visible_name} host = self.zabbix.host.get(filter=zbx_filter, output=[]) return bool(host) @@ -364,24 +412,33 @@ class PhysicalDevice(): """ Generates Usermacros """ - macros = ZabbixUsermacros(self.nb, self._usermacro_map(), - usermacro_sync, logger=self.logger, - host=self.name) + macros = ZabbixUsermacros( + self.nb, + self._usermacro_map(), + usermacro_sync, + logger=self.logger, + host=self.name, + ) if macros.sync is False: self.usermacros = [] self.usermacros = macros.generate() return True - def set_tags(self): """ Generates Host Tags """ - tags = ZabbixTags(self.nb, self._tag_map(), - tag_sync, tag_lower, tag_name=tag_name, - tag_value=tag_value, logger=self.logger, - host=self.name) + tags = ZabbixTags( + self.nb, + self._tag_map(), + tag_sync, + tag_lower, + tag_name=tag_name, + tag_value=tag_value, + logger=self.logger, + host=self.name, + ) if tags.sync is False: self.tags = [] @@ -398,14 +455,16 @@ class PhysicalDevice(): # check if the key Zabbix is defined in the config context if not "zabbix" in self.nb.config_context: return False - if ("proxy" in self.nb.config_context["zabbix"] and - not self.nb.config_context["zabbix"]["proxy"]): + if ( + "proxy" in self.nb.config_context["zabbix"] + and not self.nb.config_context["zabbix"]["proxy"] + ): return False # Proxy group takes priority over a proxy due # to it being HA and therefore being more reliable # Includes proxy group fix since Zabbix <= 6 should ignore this proxy_types = ["proxy"] - if str(self.zabbix.version).startswith('7'): + if str(self.zabbix.version).startswith("7"): # Only insert groups in front of list for Zabbix7 proxy_types.insert(0, "proxy_group") for proxy_type in proxy_types: @@ -419,15 +478,23 @@ class PhysicalDevice(): continue # If the proxy name matches if proxy["name"] == proxy_name: - self.logger.debug(f"Host {self.name}: using {proxy['type']}" - f" {proxy_name}") + self.logger.debug( + f"Host {self.name}: using {proxy['type']}" f" {proxy_name}" + ) self.zbxproxy = proxy return True - self.logger.warning(f"Host {self.name}: unable to find proxy {proxy_name}") + self.logger.warning( + f"Host {self.name}: unable to find proxy {proxy_name}" + ) return False - def createInZabbix(self, groups, templates, proxies, - description="Host added by NetBox sync script."): + def createInZabbix( + self, + groups, + templates, + proxies, + description="Host added by NetBox sync script.", + ): """ Creates Zabbix host object with parameters from NetBox object. """ @@ -435,37 +502,40 @@ class PhysicalDevice(): if not self._zabbixHostnameExists(): # Set group and template ID's for host if not self.setZabbixGroupID(groups): - e = (f"Unable to find group '{self.hostgroup}' " - f"for host {self.name} in Zabbix.") + e = ( + f"Unable to find group '{self.hostgroup}' " + f"for host {self.name} in Zabbix." + ) self.logger.warning(e) raise SyncInventoryError(e) self.zbxTemplatePrepper(templates) templateids = [] for template in self.zbx_templates: - templateids.append({'templateid': template['templateid']}) + templateids.append({"templateid": template["templateid"]}) # Set interface, group and template configuration interfaces = self.setInterfaceDetails() groups = [{"groupid": self.group_id}] # Set Zabbix proxy if defined self.setProxy(proxies) # Set basic data for host creation - create_data = {"host": self.name, - "name": self.visible_name, - "status": self.zabbix_state, - "interfaces": interfaces, - "groups": groups, - "templates": templateids, - "description": description, - "inventory_mode": self.inventory_mode, - "inventory": self.inventory, - "macros": self.usermacros, - "tags": self.tags - } + create_data = { + "host": self.name, + "name": self.visible_name, + "status": self.zabbix_state, + "interfaces": interfaces, + "groups": groups, + "templates": templateids, + "description": description, + "inventory_mode": self.inventory_mode, + "inventory": self.inventory, + "macros": self.usermacros, + "tags": self.tags, + } # If a Zabbix proxy or Zabbix Proxy group has been defined if self.zbxproxy: # If a lower version than 7 is used, we can assume that # the proxy is a normal proxy and not a proxy group - if not str(self.zabbix.version).startswith('7'): + if not str(self.zabbix.version).startswith("7"): create_data["proxy_hostid"] = self.zbxproxy["id"] else: # Configure either a proxy or proxy group @@ -496,8 +566,8 @@ class PhysicalDevice(): """ final_data = [] # Check if the hostgroup is in a nested format and check each parent - for pos in range(len(self.hostgroup.split('/'))): - zabbix_hg = self.hostgroup.rsplit('/', pos)[0] + for pos in range(len(self.hostgroup.split("/"))): + zabbix_hg = self.hostgroup.rsplit("/", pos)[0] if self.lookupZabbixHostgroup(hostgroups, zabbix_hg): # Hostgroup already exists continue @@ -508,7 +578,9 @@ class PhysicalDevice(): e = f"Hostgroup '{zabbix_hg}': created in Zabbix." self.logger.info(e) # Add group to final data - final_data.append({'groupid': groupid["groupids"][0], 'name': zabbix_hg}) + final_data.append( + {"groupid": groupid["groupids"][0], "name": zabbix_hg} + ) except APIRequestError as e: msg = f"Hostgroup '{zabbix_hg}': unable to create. Zabbix returned {str(e)}." self.logger.error(msg) @@ -535,20 +607,24 @@ class PhysicalDevice(): try: self.zabbix.host.update(hostid=self.zabbix_id, **kwargs) except APIRequestError as e: - e = (f"Host {self.name}: Unable to update. " - f"Zabbix returned the following error: {str(e)}.") + e = ( + f"Host {self.name}: Unable to update. " + f"Zabbix returned the following error: {str(e)}." + ) self.logger.error(e) raise SyncExternalError(e) from None self.logger.info(f"Updated host {self.name} with data {kwargs}.") self.create_journal_entry("info", "Updated host in Zabbix with latest NB data.") - def ConsistencyCheck(self, groups, templates, proxies, proxy_power, create_hostgroups): + def ConsistencyCheck( + self, groups, templates, proxies, proxy_power, create_hostgroups + ): # pylint: disable=too-many-branches, too-many-statements """ Checks if Zabbix object is still valid with NetBox parameters. """ # If group is found or if the hostgroup is nested - if not self.setZabbixGroupID(groups) or len(self.hostgroup.split('/')) > 1: + if not self.setZabbixGroupID(groups) or len(self.hostgroup.split("/")) > 1: if create_hostgroups: # Script is allowed to create a new hostgroup new_groups = self.createZabbixHostgroup(groups) @@ -559,50 +635,59 @@ class PhysicalDevice(): if not self.group_id: # Function returns true / false but also sets GroupID if not self.setZabbixGroupID(groups) and not create_hostgroups: - e = (f"Host {self.name}: different hostgroup is required but " - "unable to create hostgroup without generation permission.") + e = ( + f"Host {self.name}: different hostgroup is required but " + "unable to create hostgroup without generation permission." + ) self.logger.warning(e) raise SyncInventoryError(e) # Prepare templates and proxy config self.zbxTemplatePrepper(templates) self.setProxy(proxies) # Get host object from Zabbix - host = self.zabbix.host.get(filter={'hostid': self.zabbix_id}, - selectInterfaces=['type', 'ip', - 'port', 'details', - 'interfaceid'], - selectGroups=["groupid"], - selectHostGroups=["groupid"], - selectParentTemplates=["templateid"], - selectInventory=list(self._inventory_map().values()), - selectMacros=["macro","value","type","description"], - selectTags=["tag","value"] - ) + host = self.zabbix.host.get( + filter={"hostid": self.zabbix_id}, + selectInterfaces=["type", "ip", "port", "details", "interfaceid"], + selectGroups=["groupid"], + selectHostGroups=["groupid"], + selectParentTemplates=["templateid"], + selectInventory=list(self._inventory_map().values()), + selectMacros=["macro", "value", "type", "description"], + selectTags=["tag", "value"], + ) if len(host) > 1: - e = (f"Got {len(host)} results for Zabbix hosts " - f"with ID {self.zabbix_id} - hostname {self.name}.") + e = ( + f"Got {len(host)} results for Zabbix hosts " + f"with ID {self.zabbix_id} - hostname {self.name}." + ) self.logger.error(e) raise SyncInventoryError(e) if len(host) == 0: - e = (f"Host {self.name}: No Zabbix host found. " - f"This is likely the result of a deleted Zabbix host " - f"without zeroing the ID field in NetBox.") + e = ( + f"Host {self.name}: No Zabbix host found. " + f"This is likely the result of a deleted Zabbix host " + f"without zeroing the ID field in NetBox." + ) self.logger.error(e) raise SyncInventoryError(e) host = host[0] if host["host"] == self.name: self.logger.debug(f"Host {self.name}: hostname in-sync.") else: - self.logger.warning(f"Host {self.name}: hostname OUT of sync. " - f"Received value: {host['host']}") + self.logger.warning( + f"Host {self.name}: hostname OUT of sync. " + f"Received value: {host['host']}" + ) self.updateZabbixHost(host=self.name) # Execute check depending on wether the name is special or not if self.use_visible_name: if host["name"] == self.visible_name: self.logger.debug(f"Host {self.name}: visible name in-sync.") else: - self.logger.warning(f"Host {self.name}: visible name OUT of sync." - f" Received value: {host['name']}") + self.logger.warning( + f"Host {self.name}: visible name OUT of sync." + f" Received value: {host['name']}" + ) self.updateZabbixHost(name=self.visible_name) # Check if the templates are in-sync @@ -611,23 +696,24 @@ class PhysicalDevice(): # Prepare Templates for API parsing templateids = [] for template in self.zbx_templates: - templateids.append({'templateid': template['templateid']}) + templateids.append({"templateid": template["templateid"]}) # Update Zabbix with NB templates and clear any old / lost templates - self.updateZabbixHost(templates_clear=host["parentTemplates"], - templates=templateids) + self.updateZabbixHost( + templates_clear=host["parentTemplates"], templates=templateids + ) else: self.logger.debug(f"Host {self.name}: template(s) in-sync.") # Check if Zabbix version is 6 or higher. Issue #93 group_dictname = "hostgroups" - if str(self.zabbix.version).startswith(('6', '5')): + if str(self.zabbix.version).startswith(("6", "5")): group_dictname = "groups" for group in host[group_dictname]: if group["groupid"] == self.group_id: self.logger.debug(f"Host {self.name}: hostgroup in-sync.") break self.logger.warning(f"Host {self.name}: hostgroup OUT of sync.") - self.updateZabbixHost(groups={'groupid': self.group_id}) + self.updateZabbixHost(groups={"groupid": self.group_id}) if int(host["status"]) == self.zabbix_state: self.logger.debug(f"Host {self.name}: status in-sync.") @@ -637,8 +723,10 @@ class PhysicalDevice(): # Check if a proxy has been defined if self.zbxproxy: # Check if proxy or proxy group is defined - if (self.zbxproxy["idtype"] in host and - host[self.zbxproxy["idtype"]] == self.zbxproxy["id"]): + if ( + self.zbxproxy["idtype"] in host + and host[self.zbxproxy["idtype"]] == self.zbxproxy["id"] + ): self.logger.debug(f"Host {self.name}: proxy in-sync.") # Backwards compatibility for Zabbix <= 6 elif "proxy_hostid" in host and host["proxy_hostid"] == self.zbxproxy["id"]: @@ -647,13 +735,15 @@ class PhysicalDevice(): else: self.logger.warning(f"Host {self.name}: proxy OUT of sync.") # Zabbix <= 6 patch - if not str(self.zabbix.version).startswith('7'): - self.updateZabbixHost(proxy_hostid=self.zbxproxy['id']) + if not str(self.zabbix.version).startswith("7"): + self.updateZabbixHost(proxy_hostid=self.zbxproxy["id"]) # Zabbix 7+ else: # Prepare data structure for updating either proxy or group - update_data = {self.zbxproxy["idtype"]: self.zbxproxy["id"], - "monitored_by": self.zbxproxy['monitored_by']} + update_data = { + self.zbxproxy["idtype"]: self.zbxproxy["id"], + "monitored_by": self.zbxproxy["monitored_by"], + } self.updateZabbixHost(**update_data) else: # No proxy is defined in NetBox @@ -665,8 +755,10 @@ class PhysicalDevice(): proxy_set = True if proxy_power and proxy_set: # Zabbix <= 6 fix - self.logger.warning(f"Host {self.name}: no proxy is configured in NetBox " - "but is configured in Zabbix. Removing proxy config in Zabbix") + self.logger.warning( + f"Host {self.name}: no proxy is configured in NetBox " + "but is configured in Zabbix. Removing proxy config in Zabbix" + ) if "proxy_hostid" in host and bool(host["proxy_hostid"]): self.updateZabbixHost(proxy_hostid=0) # Zabbix 7 proxy @@ -678,21 +770,23 @@ class PhysicalDevice(): # Checks if a proxy has been defined in Zabbix and if proxy_power config has been set if proxy_set and not proxy_power: # Display error message - self.logger.error(f"Host {self.name} is configured " - f"with proxy in Zabbix but not in NetBox. The" - " -p flag was ommited: no " - "changes have been made.") + self.logger.error( + f"Host {self.name} is configured " + f"with proxy in Zabbix but not in NetBox. The" + " -p flag was ommited: no " + "changes have been made." + ) if not proxy_set: self.logger.debug(f"Host {self.name}: proxy in-sync.") # Check host inventory mode - if str(host['inventory_mode']) == str(self.inventory_mode): + if str(host["inventory_mode"]) == str(self.inventory_mode): self.logger.debug(f"Host {self.name}: inventory_mode in-sync.") else: self.logger.warning(f"Host {self.name}: inventory_mode OUT of sync.") self.updateZabbixHost(inventory_mode=str(self.inventory_mode)) - if inventory_sync and self.inventory_mode in [0,1]: + if inventory_sync and self.inventory_mode in [0, 1]: # Check host inventory mapping - if host['inventory'] == self.inventory: + if host["inventory"] == self.inventory: self.logger.debug(f"Host {self.name}: inventory in-sync.") else: self.logger.warning(f"Host {self.name}: inventory OUT of sync.") @@ -704,12 +798,12 @@ class PhysicalDevice(): # Do not re-sync secret usermacros unless sync is set to 'full' if str(usermacro_sync).lower() != "full": for m in deepcopy(self.usermacros): - if m['type'] == str(1): + if m["type"] == str(1): # Remove the value as the api doesn't return it # this will allow us to only update usermacros that don't exist - m.pop('value') + m.pop("value") macros_filtered.append(m) - if host['macros'] == self.usermacros or host['macros'] == macros_filtered: + if host["macros"] == self.usermacros or host["macros"] == macros_filtered: self.logger.debug(f"Host {self.name}: usermacros in-sync.") else: self.logger.warning(f"Host {self.name}: usermacros OUT of sync.") @@ -717,7 +811,7 @@ class PhysicalDevice(): # Check host usermacros if tag_sync: - if remove_duplicates(host['tags'],sortkey='tag') == self.tags: + if remove_duplicates(host["tags"], sortkey="tag") == self.tags: self.logger.debug(f"Host {self.name}: tags in-sync.") else: self.logger.warning(f"Host {self.name}: tags OUT of sync.") @@ -725,7 +819,7 @@ class PhysicalDevice(): # If only 1 interface has been found # pylint: disable=too-many-nested-blocks - if len(host['interfaces']) == 1: + if len(host["interfaces"]) == 1: updates = {} # Go through each key / item and check if it matches Zabbix for key, item in self.setInterfaceDetails()[0].items(): @@ -733,7 +827,7 @@ class PhysicalDevice(): if key in host["interfaces"][0]: # If SNMP is used, go through nested dict # to compare SNMP parameters - if isinstance(item,dict) and key == "details": + if isinstance(item, dict) and key == "details": for k, i in item.items(): if k in host["interfaces"][0][key]: # Set update if values don't match @@ -761,12 +855,14 @@ class PhysicalDevice(): self.logger.warning(f"Host {self.name}: Interface OUT of sync.") if "type" in updates: # Changing interface type not supported. Raise exception. - e = (f"Host {self.name}: changing interface type to " - f"{str(updates['type'])} is not supported.") + e = ( + f"Host {self.name}: changing interface type to " + f"{str(updates['type'])} is not supported." + ) self.logger.error(e) raise InterfaceConfigError(e) # Set interfaceID for Zabbix config - updates["interfaceid"] = host["interfaces"][0]['interfaceid'] + updates["interfaceid"] = host["interfaces"][0]["interfaceid"] try: # API call to Zabbix self.zabbix.hostinterface.update(updates) @@ -782,9 +878,11 @@ class PhysicalDevice(): e = f"Host {self.name}: interface in-sync." self.logger.debug(e) else: - e = (f"Host {self.name} has unsupported interface configuration." - f" Host has total of {len(host['interfaces'])} interfaces. " - "Manual interfention required.") + e = ( + f"Host {self.name} has unsupported interface configuration." + f" Host has total of {len(host['interfaces'])} interfaces. " + "Manual interfention required." + ) self.logger.error(e) raise SyncInventoryError(e) @@ -796,20 +894,25 @@ class PhysicalDevice(): if self.journal: # Check if the severity is valid if severity not in ["info", "success", "warning", "danger"]: - self.logger.warning(f"Value {severity} not valid for NB journal entries.") + self.logger.warning( + f"Value {severity} not valid for NB journal entries." + ) return False - journal = {"assigned_object_type": "dcim.device", - "assigned_object_id": self.id, - "kind": severity, - "comments": message - } + journal = { + "assigned_object_type": "dcim.device", + "assigned_object_id": self.id, + "kind": severity, + "comments": message, + } try: self.nb_journals.create(journal) self.logger.debug(f"Host {self.name}: Created journal entry in NetBox") return True except JournalError(e) as e: - self.logger.warning("Unable to create journal entry for " - f"{self.name}: NB returned {e}") + self.logger.warning( + "Unable to create journal entry for " + f"{self.name}: NB returned {e}" + ) return False return False @@ -832,10 +935,15 @@ class PhysicalDevice(): # and add this NB template to the list of successfull templates tmpls_from_zabbix.pop(pos) succesfull_templates.append(nb_tmpl) - self.logger.debug(f"Host {self.name}: template " - f"{nb_tmpl['name']} is present in Zabbix.") + self.logger.debug( + f"Host {self.name}: template " + f"{nb_tmpl['name']} is present in Zabbix." + ) break - if len(succesfull_templates) == len(self.zbx_templates) and len(tmpls_from_zabbix) == 0: + if ( + len(succesfull_templates) == len(self.zbx_templates) + and len(tmpls_from_zabbix) == 0 + ): # All of the NetBox templates have been confirmed as successfull # and the ZBX template list is empty. This means that # all of the templates match. diff --git a/modules/exceptions.py b/modules/exceptions.py index 27a141c..ddac2b0 100644 --- a/modules/exceptions.py +++ b/modules/exceptions.py @@ -2,35 +2,47 @@ """ All custom exceptions used for Exception generation """ + + class SyncError(Exception): - """ Class SyncError """ + """Class SyncError""" + class JournalError(Exception): - """ Class SyncError """ + """Class SyncError""" + class SyncExternalError(SyncError): - """ Class SyncExternalError """ + """Class SyncExternalError""" + class SyncInventoryError(SyncError): - """ Class SyncInventoryError """ + """Class SyncInventoryError""" + class SyncDuplicateError(SyncError): - """ Class SyncDuplicateError """ + """Class SyncDuplicateError""" + class EnvironmentVarError(SyncError): - """ Class EnvironmentVarError """ + """Class EnvironmentVarError""" + class InterfaceConfigError(SyncError): - """ Class InterfaceConfigError """ + """Class InterfaceConfigError""" + class ProxyConfigError(SyncError): - """ Class ProxyConfigError """ + """Class ProxyConfigError""" + class HostgroupError(SyncError): - """ Class HostgroupError """ + """Class HostgroupError""" + class TemplateError(SyncError): - """ Class TemplateError """ + """Class TemplateError""" + class UsermacroError(SyncError): - """ Class UsermacroError """ + """Class UsermacroError""" diff --git a/modules/hostgroups.py b/modules/hostgroups.py index 6e2db75..c67f5e6 100644 --- a/modules/hostgroups.py +++ b/modules/hostgroups.py @@ -1,14 +1,26 @@ """Module for all hostgroup related code""" + from logging import getLogger + from modules.exceptions import HostgroupError from modules.tools import build_path -class Hostgroup(): + +class Hostgroup: """Hostgroup class for devices and VM's Takes type (vm or dev) and NB object""" - def __init__(self, obj_type, nb_obj, version, logger=None, #pylint: disable=too-many-arguments, too-many-positional-arguments - nested_sitegroup_flag=False, nested_region_flag=False, - nb_regions=None, nb_groups=None): + + def __init__( + self, + obj_type, + nb_obj, + version, + logger=None, # pylint: disable=too-many-arguments, too-many-positional-arguments + nested_sitegroup_flag=False, + nested_region_flag=False, + nb_regions=None, + nb_groups=None, + ): self.logger = logger if logger else getLogger(__name__) if obj_type not in ("vm", "dev"): msg = f"Unable to create hostgroup with type {type}" @@ -19,8 +31,9 @@ class Hostgroup(): self.name = self.nb.name self.nb_version = version # Used for nested data objects - self.set_nesting(nested_sitegroup_flag, nested_region_flag, - nb_groups, nb_regions) + self.set_nesting( + nested_sitegroup_flag, nested_region_flag, nb_groups, nb_regions + ) self._set_format_options() def __str__(self): @@ -49,20 +62,28 @@ class Hostgroup(): format_options["site_group"] = None if self.nb.site: if self.nb.site.region: - format_options["region"] = self.generate_parents("region", - str(self.nb.site.region)) + format_options["region"] = self.generate_parents( + "region", str(self.nb.site.region) + ) if self.nb.site.group: - format_options["site_group"] = self.generate_parents("site_group", - str(self.nb.site.group)) + format_options["site_group"] = self.generate_parents( + "site_group", str(self.nb.site.group) + ) format_options["role"] = role format_options["site"] = self.nb.site.name if self.nb.site else None format_options["tenant"] = str(self.nb.tenant) if self.nb.tenant else None - format_options["tenant_group"] = str(self.nb.tenant.group) if self.nb.tenant else None - format_options["platform"] = self.nb.platform.name if self.nb.platform else None + format_options["tenant_group"] = ( + str(self.nb.tenant.group) if self.nb.tenant else None + ) + format_options["platform"] = ( + self.nb.platform.name if self.nb.platform else None + ) # Variables only applicable for devices if self.type == "dev": format_options["manufacturer"] = self.nb.device_type.manufacturer.name - format_options["location"] = str(self.nb.location) if self.nb.location else None + format_options["location"] = ( + str(self.nb.location) if self.nb.location else None + ) # Variables only applicable for VM's if self.type == "vm": # Check if a cluster is configured. Could also be configured in a site. @@ -72,17 +93,22 @@ class Hostgroup(): self.format_options = format_options - def set_nesting(self, nested_sitegroup_flag, nested_region_flag, - nb_groups, nb_regions): + def set_nesting( + self, nested_sitegroup_flag, nested_region_flag, nb_groups, nb_regions + ): """Set nesting options for this Hostgroup""" - self.nested_objects = {"site_group": {"flag": nested_sitegroup_flag, "data": nb_groups}, - "region": {"flag": nested_region_flag, "data": nb_regions}} + self.nested_objects = { + "site_group": {"flag": nested_sitegroup_flag, "data": nb_groups}, + "region": {"flag": nested_region_flag, "data": nb_regions}, + } def generate(self, hg_format=None): """Generate hostgroup based on a provided format""" # Set format to default in case its not specified if not hg_format: - hg_format = "site/manufacturer/role" if self.type == "dev" else "cluster/role" + hg_format = ( + "site/manufacturer/role" if self.type == "dev" else "cluster/role" + ) # Split all given names hg_output = [] hg_items = hg_format.split("/") @@ -93,8 +119,10 @@ class Hostgroup(): cf_data = self.custom_field_lookup(hg_item) # CF does not exist if not cf_data["result"]: - msg = (f"Unable to generate hostgroup for host {self.name}. " - f"Item type {hg_item} not supported.") + msg = ( + f"Unable to generate hostgroup for host {self.name}. " + f"Item type {hg_item} not supported." + ) self.logger.error(msg) raise HostgroupError(msg) # CF data is populated @@ -109,10 +137,12 @@ class Hostgroup(): # Check if the hostgroup is populated with at least one item. if bool(hg_output): return "/".join(hg_output) - msg = (f"Unable to generate hostgroup for host {self.name}." - " Not enough valid items. This is most likely" - " due to the use of custom fields that are empty" - " or an invalid hostgroup format.") + msg = ( + f"Unable to generate hostgroup for host {self.name}." + " Not enough valid items. This is most likely" + " due to the use of custom fields that are empty" + " or an invalid hostgroup format." + ) self.logger.error(msg) raise HostgroupError(msg) @@ -157,7 +187,9 @@ class Hostgroup(): return child_object # If the nested flag is True, perform parent calculation if self.nested_objects[nest_type]["flag"]: - final_nested_object = build_path(child_object, self.nested_objects[nest_type]["data"]) + final_nested_object = build_path( + child_object, self.nested_objects[nest_type]["data"] + ) return "/".join(final_nested_object) # Nesting is not allowed for this object. Return child_object return child_object diff --git a/modules/interface.py b/modules/interface.py index e4413c6..1bd1e37 100644 --- a/modules/interface.py +++ b/modules/interface.py @@ -4,7 +4,8 @@ All of the Zabbix interface related configuration """ from modules.exceptions import InterfaceConfigError -class ZabbixInterface(): + +class ZabbixInterface: """Class that represents a Zabbix interface.""" def __init__(self, context, ip): @@ -15,21 +16,16 @@ class ZabbixInterface(): def _set_default_port(self): """Sets default TCP / UDP port for different interface types""" - interface_mapping = { - 1: 10050, - 2: 161, - 3: 623, - 4: 12345 - } + interface_mapping = {1: 10050, 2: 161, 3: 623, 4: 12345} # Check if interface type is listed in mapper. - if self.interface['type'] not in interface_mapping: + if self.interface["type"] not in interface_mapping: return False # Set default port to interface - self.interface["port"] = str(interface_mapping[self.interface['type']]) + self.interface["port"] = str(interface_mapping[self.interface["type"]]) return True def get_context(self): - """ check if NetBox custom context has been defined. """ + """check if NetBox custom context has been defined.""" if "zabbix" in self.context: zabbix = self.context["zabbix"] if "interface_type" in zabbix: @@ -43,7 +39,7 @@ class ZabbixInterface(): return False def set_snmp(self): - """ Check if interface is type SNMP """ + """Check if interface is type SNMP""" # pylint: disable=too-many-branches if self.interface["type"] == 2: # Checks if SNMP settings are defined in NetBox @@ -63,7 +59,7 @@ class ZabbixInterface(): e = "SNMP version option is not defined." raise InterfaceConfigError(e) # If version 1 or 2 is used, get community string - if self.interface["details"]["version"] in ['1','2']: + if self.interface["details"]["version"] in ["1", "2"]: if "community" in snmp: # Set SNMP community to confix context value community = snmp["community"] @@ -73,10 +69,16 @@ class ZabbixInterface(): self.interface["details"]["community"] = str(community) # If version 3 has been used, get all # SNMPv3 NetBox related configs - elif self.interface["details"]["version"] == '3': - items = ["securityname", "securitylevel", "authpassphrase", - "privpassphrase", "authprotocol", "privprotocol", - "contextname"] + elif self.interface["details"]["version"] == "3": + items = [ + "securityname", + "securitylevel", + "authpassphrase", + "privpassphrase", + "authprotocol", + "privprotocol", + "contextname", + ] for key, item in snmp.items(): if key in items: self.interface["details"][key] = str(item) @@ -91,13 +93,15 @@ class ZabbixInterface(): raise InterfaceConfigError(e) def set_default_snmp(self): - """ Set default config to SNMPv2, port 161 and community macro. """ + """Set default config to SNMPv2, port 161 and community macro.""" self.interface = self.skelet self.interface["type"] = "2" self.interface["port"] = "161" - self.interface["details"] = {"version": "2", - "community": "{$SNMP_COMMUNITY}", - "bulk": "1"} + self.interface["details"] = { + "version": "2", + "community": "{$SNMP_COMMUNITY}", + "bulk": "1", + } def set_default_agent(self): """Sets interface to Zabbix agent defaults""" diff --git a/modules/tags.py b/modules/tags.py index 4993cd3..9dda995 100644 --- a/modules/tags.py +++ b/modules/tags.py @@ -4,13 +4,24 @@ All of the Zabbix Usermacro related configuration """ from logging import getLogger + from modules.tools import field_mapper, remove_duplicates -class ZabbixTags(): + +class ZabbixTags: """Class that represents a Zabbix interface.""" - def __init__(self, nb, tag_map, tag_sync, tag_lower=True, - tag_name=None, tag_value=None, logger=None, host=None): + def __init__( + self, + nb, + tag_map, + tag_sync, + tag_lower=True, + tag_name=None, + tag_value=None, + logger=None, + host=None, + ): self.nb = nb self.name = host if host else nb.name self.tag_map = tag_map @@ -42,7 +53,7 @@ class ZabbixTags(): """ Validates tag name """ - if tag_name and isinstance(tag_name, str) and len(tag_name)<=256: + if tag_name and isinstance(tag_name, str) and len(tag_name) <= 256: return True return False @@ -50,7 +61,7 @@ class ZabbixTags(): """ Validates tag value """ - if tag_value and isinstance(tag_value, str) and len(tag_value)<=256: + if tag_value and isinstance(tag_value, str) and len(tag_value) <= 256: return True return False @@ -58,23 +69,25 @@ class ZabbixTags(): """ Renders a tag """ - tag={} + tag = {} if self.validate_tag(tag_name): if self.lower: - tag['tag'] = tag_name.lower() + tag["tag"] = tag_name.lower() else: - tag['tag'] = tag_name + tag["tag"] = tag_name else: - self.logger.error(f'Tag {tag_name} is not a valid tag name, skipping.') + self.logger.error(f"Tag {tag_name} is not a valid tag name, skipping.") return False if self.validate_value(tag_value): if self.lower: - tag['value'] = tag_value.lower() + tag["value"] = tag_value.lower() else: - tag['value'] = tag_value + tag["value"] = tag_value else: - self.logger.error(f'Tag {tag_name} has an invalid value: \'{tag_value}\', skipping.') + self.logger.error( + f"Tag {tag_name} has an invalid value: '{tag_value}', skipping." + ) return False return tag @@ -83,7 +96,7 @@ class ZabbixTags(): Generate full set of Usermacros """ # pylint: disable=too-many-branches - tags=[] + tags = [] # Parse the field mapper for tags if self.tag_map: self.logger.debug(f"Host {self.nb.name}: Starting tag mapper") @@ -94,9 +107,12 @@ class ZabbixTags(): tags.append(t) # Parse NetBox config context for tags - if ("zabbix" in self.nb.config_context and "tags" in self.nb.config_context['zabbix'] - and isinstance(self.nb.config_context['zabbix']['tags'], list)): - for tag in self.nb.config_context['zabbix']['tags']: + if ( + "zabbix" in self.nb.config_context + and "tags" in self.nb.config_context["zabbix"] + and isinstance(self.nb.config_context["zabbix"]["tags"], list) + ): + for tag in self.nb.config_context["zabbix"]["tags"]: if isinstance(tag, dict): for tagname, value in tag.items(): t = self.render_tag(tagname, value) @@ -106,12 +122,12 @@ class ZabbixTags(): # Pull in NetBox device tags if tag_name is set if self.tag_name and isinstance(self.tag_name, str): for tag in self.nb.tags: - if self.tag_value.lower() in ['display', 'name', 'slug']: + if self.tag_value.lower() in ["display", "name", "slug"]: value = tag[self.tag_value] else: - value = tag['name'] + value = tag["name"] t = self.render_tag(self.tag_name, value) if t: tags.append(t) - return remove_duplicates(tags, sortkey='tag') + return remove_duplicates(tags, sortkey="tag") diff --git a/modules/tools.py b/modules/tools.py index 8d658a3..791025d 100644 --- a/modules/tools.py +++ b/modules/tools.py @@ -1,12 +1,14 @@ """A collection of tools used by several classes""" + def convert_recordset(recordset): - """ Converts netbox RedcordSet to list of dicts. """ + """Converts netbox RedcordSet to list of dicts.""" recordlist = [] for record in recordset: recordlist.append(record.__dict__) return recordlist + def build_path(endpoint, list_of_dicts): """ Builds a path list of related parent/child items. @@ -14,16 +16,17 @@ def build_path(endpoint, list_of_dicts): be used in hostgroups. """ item_path = [] - itemlist = [i for i in list_of_dicts if i['name'] == endpoint] + itemlist = [i for i in list_of_dicts if i["name"] == endpoint] item = itemlist[0] if len(itemlist) == 1 else None - item_path.append(item['name']) - while item['_depth'] > 0: - itemlist = [i for i in list_of_dicts if i['name'] == str(item['parent'])] + item_path.append(item["name"]) + while item["_depth"] > 0: + itemlist = [i for i in list_of_dicts if i["name"] == str(item["parent"])] item = itemlist[0] if len(itemlist) == 1 else None - item_path.append(item['name']) + item_path.append(item["name"]) item_path.reverse() return item_path + def proxy_prepper(proxy_list, proxy_group_list): """ Function that takes 2 lists and converts them using a @@ -44,15 +47,16 @@ def proxy_prepper(proxy_list, proxy_group_list): output.append(group) return output + def field_mapper(host, mapper, nbdevice, logger): """ Maps NetBox field data to Zabbix properties. Used for Inventory, Usermacros and Tag mappings. """ - data={} + data = {} # Let's build an dict for each property in the map for nb_field, zbx_field in mapper.items(): - field_list = nb_field.split("/") # convert str to list based on delimiter + field_list = nb_field.split("/") # convert str to list based on delimiter # start at the base of the dict... value = nbdevice # ... and step through the dict till we find the needed value @@ -61,22 +65,30 @@ def field_mapper(host, mapper, nbdevice, logger): # Check if the result is usable and expected # We want to apply any int or float 0 values, # even if python thinks those are empty. - if ((value and isinstance(value, int | float | str )) or - (isinstance(value, int | float) and int(value) ==0)): + if (value and isinstance(value, int | float | str)) or ( + isinstance(value, int | float) and int(value) == 0 + ): data[zbx_field] = str(value) elif not value: # empty value should just be an empty string for API compatibility - logger.debug(f"Host {host}: NetBox lookup for " - f"'{nb_field}' returned an empty value") + logger.debug( + f"Host {host}: NetBox lookup for " + f"'{nb_field}' returned an empty value" + ) data[zbx_field] = "" else: # Value is not a string or numeral, probably not what the user expected. - logger.error(f"Host {host}: Lookup for '{nb_field}'" - " returned an unexpected type: it will be skipped.") - logger.debug(f"Host {host}: Field mapping complete. " - f"Mapped {len(list(filter(None, data.values())))} field(s)") + logger.error( + f"Host {host}: Lookup for '{nb_field}'" + " returned an unexpected type: it will be skipped." + ) + logger.debug( + f"Host {host}: Field mapping complete. " + f"Mapped {len(list(filter(None, data.values())))} field(s)" + ) return data + def remove_duplicates(input_list, sortkey=None): """ Removes duplicate entries from a list and sorts the list diff --git a/modules/usermacros.py b/modules/usermacros.py index 71efbde..29580d1 100644 --- a/modules/usermacros.py +++ b/modules/usermacros.py @@ -3,11 +3,13 @@ """ All of the Zabbix Usermacro related configuration """ -from re import match from logging import getLogger +from re import match + from modules.tools import field_mapper -class ZabbixUsermacros(): + +class ZabbixUsermacros: """Class that represents a Zabbix interface.""" def __init__(self, nb, usermacro_map, usermacro_sync, logger=None, host=None): @@ -42,40 +44,46 @@ class ZabbixUsermacros(): """ Validates usermacro name """ - pattern = r'\{\$[A-Z0-9\._]*(\:.*)?\}' + pattern = r"\{\$[A-Z0-9\._]*(\:.*)?\}" return match(pattern, macro_name) def render_macro(self, macro_name, macro_properties): """ Renders a full usermacro from partial input """ - macro={} - macrotypes={'text': 0, 'secret': 1, 'vault': 2} + macro = {} + macrotypes = {"text": 0, "secret": 1, "vault": 2} if self.validate_macro(macro_name): - macro['macro'] = str(macro_name) + macro["macro"] = str(macro_name) if isinstance(macro_properties, dict): - if not 'value' in macro_properties: - self.logger.error(f'Usermacro {macro_name} has no value, skipping.') + if not "value" in macro_properties: + self.logger.error(f"Usermacro {macro_name} has no value, skipping.") return False - macro['value'] = macro_properties['value'] + macro["value"] = macro_properties["value"] - if 'type' in macro_properties and macro_properties['type'].lower() in macrotypes: - macro['type'] = str(macrotypes[macro_properties['type']]) + if ( + "type" in macro_properties + and macro_properties["type"].lower() in macrotypes + ): + macro["type"] = str(macrotypes[macro_properties["type"]]) else: - macro['type'] = str(0) + macro["type"] = str(0) - if ('description' in macro_properties and - isinstance(macro_properties['description'], str)): - macro['description'] = macro_properties['description'] + if "description" in macro_properties and isinstance( + macro_properties["description"], str + ): + macro["description"] = macro_properties["description"] else: - macro['description'] = "" + macro["description"] = "" elif isinstance(macro_properties, str): - macro['value'] = macro_properties - macro['type'] = str(0) - macro['description'] = "" + macro["value"] = macro_properties + macro["type"] = str(0) + macro["description"] = "" else: - self.logger.error(f'Usermacro {macro_name} is not a valid usermacro name, skipping.') + self.logger.error( + f"Usermacro {macro_name} is not a valid usermacro name, skipping." + ) return False return macro @@ -83,18 +91,25 @@ class ZabbixUsermacros(): """ Generate full set of Usermacros """ - macros=[] + macros = [] # Parse the field mapper for usermacros if self.usermacro_map: self.logger.debug(f"Host {self.nb.name}: Starting usermacro mapper") - field_macros = field_mapper(self.nb.name, self.usermacro_map, self.nb, self.logger) + field_macros = field_mapper( + self.nb.name, self.usermacro_map, self.nb, self.logger + ) for macro, value in field_macros.items(): m = self.render_macro(macro, value) if m: macros.append(m) # Parse NetBox config context for usermacros - if "zabbix" in self.nb.config_context and "usermacros" in self.nb.config_context['zabbix']: - for macro, properties in self.nb.config_context['zabbix']['usermacros'].items(): + if ( + "zabbix" in self.nb.config_context + and "usermacros" in self.nb.config_context["zabbix"] + ): + for macro, properties in self.nb.config_context["zabbix"][ + "usermacros" + ].items(): m = self.render_macro(macro, properties) if m: macros.append(m) diff --git a/modules/virtual_machine.py b/modules/virtual_machine.py index 273f9e7..6038811 100644 --- a/modules/virtual_machine.py +++ b/modules/virtual_machine.py @@ -3,55 +3,66 @@ """Module that hosts all functions for virtual machine processing""" from os import sys + from modules.device import PhysicalDevice +from modules.exceptions import InterfaceConfigError, SyncInventoryError, TemplateError from modules.hostgroups import Hostgroup from modules.interface import ZabbixInterface -from modules.exceptions import TemplateError, InterfaceConfigError, SyncInventoryError + try: from config import ( - vm_inventory_map, - vm_usermacro_map, - vm_tag_map, + traverse_regions, traverse_site_groups, - traverse_regions + vm_inventory_map, + vm_tag_map, + vm_usermacro_map, ) except ModuleNotFoundError: - print("Configuration file config.py not found in main directory." - "Please create the file or rename the config.py.example file to config.py.") + print( + "Configuration file config.py not found in main directory." + "Please create the file or rename the config.py.example file to config.py." + ) sys.exit(0) + class VirtualMachine(PhysicalDevice): """Model for virtual machines""" + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.hostgroup = None self.zbx_template_names = None def _inventory_map(self): - """ use VM inventory maps """ + """use VM inventory maps""" return vm_inventory_map def _usermacro_map(self): - """ use VM usermacro maps """ + """use VM usermacro maps""" return vm_usermacro_map def _tag_map(self): - """ use VM tag maps """ + """use VM tag maps""" return vm_tag_map def set_hostgroup(self, hg_format, nb_site_groups, nb_regions): """Set the hostgroup for this device""" # Create new Hostgroup instance - hg = Hostgroup("vm", self.nb, self.nb_api_version, logger=self.logger, - nested_sitegroup_flag=traverse_site_groups, - nested_region_flag=traverse_regions, - nb_groups=nb_site_groups, - nb_regions=nb_regions) + hg = Hostgroup( + "vm", + self.nb, + self.nb_api_version, + logger=self.logger, + nested_sitegroup_flag=traverse_site_groups, + nested_region_flag=traverse_regions, + nb_groups=nb_site_groups, + nb_regions=nb_regions, + ) # Generate hostgroup based on hostgroup format self.hostgroup = hg.generate(hg_format) def set_vm_template(self): - """ Set Template for VMs. Overwrites default class + """Set Template for VMs. Overwrites default class to skip a lookup of custom fields.""" # Gather templates ONLY from the device specific context try: @@ -60,7 +71,7 @@ class VirtualMachine(PhysicalDevice): self.logger.warning(e) return True - def setInterfaceDetails(self): # pylint: disable=invalid-name + def setInterfaceDetails(self): # pylint: disable=invalid-name """ Overwrites device function to select an agent interface type by default Agent type interfaces are more likely to be used with VMs then SNMP From 886c5b24b999310e127dce80e963b3baccad47af Mon Sep 17 00:00:00 2001 From: Wouter de Bruijn Date: Wed, 26 Feb 2025 14:45:20 +0100 Subject: [PATCH 28/93] =?UTF-8?q?=F0=9F=94=8A=20Improved=20log=20levels?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- modules/device.py | 12 ++++++++---- modules/logging.py | 36 ++++++++++++++++++++++++++++++++++++ modules/tags.py | 4 ++-- netbox_zabbix_sync.py | 25 ++++++++----------------- 4 files changed, 54 insertions(+), 23 deletions(-) create mode 100644 modules/logging.py diff --git a/modules/device.py b/modules/device.py index 83e6fdc..d90e505 100644 --- a/modules/device.py +++ b/modules/device.py @@ -7,6 +7,7 @@ from copy import deepcopy from logging import getLogger from os import sys from re import search +from venv import logger from zabbix_utils import APIRequestError @@ -19,6 +20,7 @@ from modules.exceptions import ( ) from modules.hostgroups import Hostgroup from modules.interface import ZabbixInterface +from modules.logging import get_logger from modules.tags import ZabbixTags from modules.tools import field_mapper, remove_duplicates from modules.usermacros import ZabbixUsermacros @@ -111,7 +113,7 @@ class PhysicalDevice: self.ip = self.cidr.split("/")[0] else: e = f"Host {self.name}: no primary IP." - self.logger.info(e) + self.logger.warning(e) raise SyncInventoryError(e) # Check if device has custom field for ZBX ID @@ -193,6 +195,7 @@ class PhysicalDevice: f"found for {self.nb.device_type.manufacturer.name}" f" - {self.nb.device_type.display}." ) + self.logger.warning(e) raise TemplateError(e) def get_templates_context(self): @@ -548,7 +551,7 @@ class PhysicalDevice: except APIRequestError as e: e = f"Host {self.name}: Couldn't create. Zabbix returned {str(e)}." self.logger.error(e) - raise SyncExternalError(e) from None + raise SyncExternalError(e) from e # Set NetBox custom field to hostID value. self.nb.custom_fields[device_cf] = int(self.zabbix_id) self.nb.save() @@ -556,8 +559,9 @@ class PhysicalDevice: self.logger.info(msg) self.create_journal_entry("success", msg) else: - e = f"Host {self.name}: Unable to add to Zabbix. Host already present." - self.logger.warning(e) + self.logger.error( + f"Host {self.name}: Unable to add to Zabbix. Host already present." + ) def createZabbixHostgroup(self, hostgroups): """ diff --git a/modules/logging.py b/modules/logging.py new file mode 100644 index 0000000..851ea4c --- /dev/null +++ b/modules/logging.py @@ -0,0 +1,36 @@ +import logging +from os import path + +logger = logging.getLogger("NetBox-Zabbix-sync") + + +def get_logger(): + """ + Return the logger for Netbox Zabbix Sync + """ + return logger + + +def setup_logger(): + """ + Prepare a logger with stream and file handlers + """ + # Set logging + lgout = logging.StreamHandler() + lgfile = logging.FileHandler( + path.join(path.dirname(path.realpath(__file__)), "sync.log") + ) + + logging.basicConfig( + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", + level=logging.WARNING, + handlers=[lgout, lgfile], + ) + + +def set_log_levels(root_level, own_level): + """ + Configure log levels for root and Netbox-Zabbix-sync logger + """ + logging.getLogger().setLevel(root_level) + logger.setLevel(own_level) diff --git a/modules/tags.py b/modules/tags.py index 9dda995..441ebe2 100644 --- a/modules/tags.py +++ b/modules/tags.py @@ -76,7 +76,7 @@ class ZabbixTags: else: tag["tag"] = tag_name else: - self.logger.error(f"Tag {tag_name} is not a valid tag name, skipping.") + self.logger.warning(f"Tag {tag_name} is not a valid tag name, skipping.") return False if self.validate_value(tag_value): @@ -85,7 +85,7 @@ class ZabbixTags: else: tag["value"] = tag_value else: - self.logger.error( + self.logger.warning( f"Tag {tag_name} has an invalid value: '{tag_value}', skipping." ) return False diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 75ad65c..2f30ccd 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -14,6 +14,7 @@ from zabbix_utils import APIRequestError, ProcessingError, ZabbixAPI from modules.device import PhysicalDevice from modules.exceptions import EnvironmentVarError, HostgroupError, SyncError +from modules.logging import get_logger, set_log_levels, setup_logger from modules.tools import convert_recordset, proxy_prepper from modules.virtual_machine import VirtualMachine @@ -40,19 +41,9 @@ except ModuleNotFoundError: ) sys.exit(1) -# Set logging -lgout = logging.StreamHandler() -lgfile = logging.FileHandler( - path.join(path.dirname(path.realpath(__file__)), "sync.log") -) -logging.basicConfig( - format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", - level=logging.WARNING, - handlers=[lgout, lgfile], -) - -logger = logging.getLogger("NetBox-Zabbix-sync") +setup_logger() +logger = get_logger() def main(arguments): @@ -60,13 +51,13 @@ def main(arguments): # pylint: disable=too-many-branches, too-many-statements # set environment variables if arguments.verbose: - logger.setLevel(logging.INFO) + set_log_levels(logging.WARNING, logging.INFO) if arguments.debug: - logger.setLevel(logging.DEBUG) + set_log_levels(logging.WARNING, logging.DEBUG) if arguments.debug_all: - logging.getLogger().setLevel(logging.DEBUG) + set_log_levels(logging.DEBUG, logging.DEBUG) if arguments.quiet: - logging.getLogger().setLevel(logging.ERROR) + set_log_levels(logging.ERROR, logging.ERROR) env_vars = ["ZABBIX_HOST", "NETBOX_HOST", "NETBOX_TOKEN"] if "ZABBIX_TOKEN" in environ: @@ -202,7 +193,7 @@ def main(arguments): # Delete device from Zabbix # and remove hostID from NetBox. vm.cleanup() - logger.info(f"VM {vm.name}: cleanup complete") + logger.debug(f"VM {vm.name}: cleanup complete") continue # Device has been added to NetBox # but is not in Activate state From 9ab5e09dd51a146edbeea2f4bf4f707e941fbaa2 Mon Sep 17 00:00:00 2001 From: Wouter de Bruijn Date: Wed, 26 Feb 2025 14:54:08 +0100 Subject: [PATCH 29/93] =?UTF-8?q?=F0=9F=92=A1=20Added=20docstring=20for=20?= =?UTF-8?q?module?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- modules/logging.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/logging.py b/modules/logging.py index 851ea4c..c36c2c1 100644 --- a/modules/logging.py +++ b/modules/logging.py @@ -1,3 +1,7 @@ +""" +Logging module for Netbox-Zabbix-sync +""" + import logging from os import path From 7781bc673206439317e60fa517478645261d5f58 Mon Sep 17 00:00:00 2001 From: Wouter de Bruijn Date: Wed, 26 Feb 2025 14:54:20 +0100 Subject: [PATCH 30/93] =?UTF-8?q?=F0=9F=9A=A8=20"Fixed"=20linter=20warning?= =?UTF-8?q?s?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- modules/device.py | 11 ++++------- modules/hostgroups.py | 3 ++- modules/virtual_machine.py | 1 - netbox_zabbix_sync.py | 2 +- 4 files changed, 7 insertions(+), 10 deletions(-) diff --git a/modules/device.py b/modules/device.py index d90e505..b8d038d 100644 --- a/modules/device.py +++ b/modules/device.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python3 -# pylint: disable=invalid-name, logging-not-lazy, too-many-locals, logging-fstring-interpolation, too-many-lines, too-many-public-methods +# pylint: disable=invalid-name, logging-not-lazy, too-many-locals, logging-fstring-interpolation, too-many-lines, too-many-public-methods, duplicate-code """ Device specific handeling for NetBox to Zabbix """ @@ -7,7 +6,6 @@ from copy import deepcopy from logging import getLogger from os import sys from re import search -from venv import logger from zabbix_utils import APIRequestError @@ -20,7 +18,6 @@ from modules.exceptions import ( ) from modules.hostgroups import Hostgroup from modules.interface import ZabbixInterface -from modules.logging import get_logger from modules.tags import ZabbixTags from modules.tools import field_mapper, remove_duplicates from modules.usermacros import ZabbixUsermacros @@ -549,9 +546,9 @@ class PhysicalDevice: host = self.zabbix.host.create(**create_data) self.zabbix_id = host["hostids"][0] except APIRequestError as e: - e = f"Host {self.name}: Couldn't create. Zabbix returned {str(e)}." - self.logger.error(e) - raise SyncExternalError(e) from e + msg = f"Host {self.name}: Couldn't create. Zabbix returned {str(e)}." + self.logger.error(msg) + raise SyncExternalError(msg) from e # Set NetBox custom field to hostID value. self.nb.custom_fields[device_cf] = int(self.zabbix_id) self.nb.save() diff --git a/modules/hostgroups.py b/modules/hostgroups.py index c67f5e6..d1350bd 100644 --- a/modules/hostgroups.py +++ b/modules/hostgroups.py @@ -10,12 +10,13 @@ class Hostgroup: """Hostgroup class for devices and VM's Takes type (vm or dev) and NB object""" + # pylint: disable=too-many-arguments, disable=too-many-positional-arguments def __init__( self, obj_type, nb_obj, version, - logger=None, # pylint: disable=too-many-arguments, too-many-positional-arguments + logger=None, nested_sitegroup_flag=False, nested_region_flag=False, nb_regions=None, diff --git a/modules/virtual_machine.py b/modules/virtual_machine.py index 6038811..34e3394 100644 --- a/modules/virtual_machine.py +++ b/modules/virtual_machine.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 # pylint: disable=duplicate-code """Module that hosts all functions for virtual machine processing""" diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 2f30ccd..79d8a20 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -5,7 +5,7 @@ import argparse import logging import ssl -from os import environ, path, sys +from os import environ, sys from pynetbox import api from pynetbox.core.query import RequestError as NBRequestError From 50918e43fa20217cdde4a17e3bc058cee8700376 Mon Sep 17 00:00:00 2001 From: Wouter de Bruijn Date: Fri, 28 Feb 2025 15:25:18 +0100 Subject: [PATCH 31/93] =?UTF-8?q?=F0=9F=94=A7=20Changed=20user=20for=20doc?= =?UTF-8?q?ker=20container?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Dockerfile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index fa8d9c4..70ec03d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,10 @@ # syntax=docker/dockerfile:1 FROM python:3.12-alpine + +USER 1000:1000 + RUN mkdir -p /opt/netbox-zabbix -COPY . /opt/netbox-zabbix +COPY --chown=1000:1000 . /opt/netbox-zabbix WORKDIR /opt/netbox-zabbix RUN if ! [ -f ./config.py ]; then cp ./config.py.example ./config.py; fi RUN pip install -r ./requirements.txt From 5a3467538e70921f7b4eadcf4c17e71993acedf1 Mon Sep 17 00:00:00 2001 From: Wouter de Bruijn Date: Fri, 28 Feb 2025 15:25:18 +0100 Subject: [PATCH 32/93] =?UTF-8?q?=F0=9F=94=A7=20Changed=20user=20for=20doc?= =?UTF-8?q?ker=20container?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 0551bd1..de18f3f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,9 +6,11 @@ LABEL org.opencontainers.image.description="Python script to synchronise NetBox LABEL org.opencontainers.image.documentation=https://github.com/TheNetworkGuy/netbox-zabbix-sync/ LABEL org.opencontainers.image.licenses=MIT LABEL org.opencontainers.image.authors="Twan Kamans" + +USER 1000:1000 RUN mkdir -p /opt/netbox-zabbix -COPY . /opt/netbox-zabbix +COPY --chown=1000:1000 . /opt/netbox-zabbix WORKDIR /opt/netbox-zabbix RUN if ! [ -f ./config.py ]; then cp ./config.py.example ./config.py; fi RUN pip install -r ./requirements.txt From 6bdaf4e5b7068e5faebf6eb7a2eb662fd963a534 Mon Sep 17 00:00:00 2001 From: Wouter de Bruijn Date: Fri, 28 Feb 2025 15:30:06 +0100 Subject: [PATCH 33/93] =?UTF-8?q?=F0=9F=90=9B=20Permission=20fixes?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Dockerfile | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index de18f3f..c3bb81e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,12 +6,17 @@ LABEL org.opencontainers.image.description="Python script to synchronise NetBox LABEL org.opencontainers.image.documentation=https://github.com/TheNetworkGuy/netbox-zabbix-sync/ LABEL org.opencontainers.image.licenses=MIT LABEL org.opencontainers.image.authors="Twan Kamans" - -USER 1000:1000 RUN mkdir -p /opt/netbox-zabbix -COPY --chown=1000:1000 . /opt/netbox-zabbix +RUN addgroup -g 1000 -S netbox-zabbix && adduser -u 1000 -S netbox-zabbix -G netbox-zabbix +RUN chown -R 1000:1000 /opt/netbox-zabbix + WORKDIR /opt/netbox-zabbix + +COPY --chown=1000:1000 . /opt/netbox-zabbix + +USER 1000:1000 + RUN if ! [ -f ./config.py ]; then cp ./config.py.example ./config.py; fi RUN pip install -r ./requirements.txt ENTRYPOINT ["python"] From 4449e040cebab080c29cea3193d18e104108176a Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 9 Apr 2025 15:49:38 +0200 Subject: [PATCH 34/93] :bug: added check for empty usermacro value. --- modules/usermacros.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/modules/usermacros.py b/modules/usermacros.py index 29580d1..c1d783b 100644 --- a/modules/usermacros.py +++ b/modules/usermacros.py @@ -57,7 +57,7 @@ class ZabbixUsermacros: macro["macro"] = str(macro_name) if isinstance(macro_properties, dict): if not "value" in macro_properties: - self.logger.error(f"Usermacro {macro_name} has no value, skipping.") + self.logger.warning(f"Usermacro {macro_name} has no value, skipping.") return False macro["value"] = macro_properties["value"] @@ -76,10 +76,14 @@ class ZabbixUsermacros: else: macro["description"] = "" - elif isinstance(macro_properties, str): + elif isinstance(macro_properties, str) and macro_properties: macro["value"] = macro_properties macro["type"] = str(0) macro["description"] = "" + + else: + self.logger.warning(f"Usermacro {macro_name} has no value, skipping.") + return False else: self.logger.error( f"Usermacro {macro_name} is not a valid usermacro name, skipping." From 50b7ede81be9cafa479bc4426e0e9af188bbc855 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 9 Apr 2025 16:03:45 +0200 Subject: [PATCH 35/93] :wrench: quick dockerfile fix --- Dockerfile | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 70ec03d..3188195 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,14 @@ # syntax=docker/dockerfile:1 FROM python:3.12-alpine - -USER 1000:1000 +LABEL org.opencontainers.image.source=https://github.com/TheNetworkGuy/netbox-zabbix-sync +LABEL org.opencontainers.image.title="NetBox-Zabbix-Sync" +LABEL org.opencontainers.image.description="Python script to synchronise NetBox devices to Zabbix." +LABEL org.opencontainers.image.documentation=https://github.com/TheNetworkGuy/netbox-zabbix-sync/ +LABEL org.opencontainers.image.licenses=MIT +LABEL org.opencontainers.image.authors="Twan Kamans" RUN mkdir -p /opt/netbox-zabbix +USER 1000:1000 COPY --chown=1000:1000 . /opt/netbox-zabbix WORKDIR /opt/netbox-zabbix RUN if ! [ -f ./config.py ]; then cp ./config.py.example ./config.py; fi From 73d34851fbc8ea2c1926ab3f4722138a9c17f120 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Thu, 10 Apr 2025 15:34:50 +0200 Subject: [PATCH 36/93] Update Dockerfile --- Dockerfile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 598d219..3217397 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,7 @@ # syntax=docker/dockerfile:1 FROM python:3.12-alpine - -RUN mkdir -p /opt/netbox-zabbix USER 1000:1000 +RUN mkdir -p /opt/netbox-zabbix COPY --chown=1000:1000 . /opt/netbox-zabbix WORKDIR /opt/netbox-zabbix RUN if ! [ -f ./config.py ]; then cp ./config.py.example ./config.py; fi From b56a4332b9148cce969c90af8520d9539aa280b2 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Thu, 10 Apr 2025 15:35:44 +0200 Subject: [PATCH 37/93] Update Dockerfile --- Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 3217397..c4693c1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,5 @@ # syntax=docker/dockerfile:1 FROM python:3.12-alpine -USER 1000:1000 RUN mkdir -p /opt/netbox-zabbix COPY --chown=1000:1000 . /opt/netbox-zabbix WORKDIR /opt/netbox-zabbix From 20a3c67fd4f1fa24774a5c8dbb0b493844380916 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Thu, 10 Apr 2025 15:37:57 +0200 Subject: [PATCH 38/93] Update Dockerfile --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index c4693c1..fa8d9c4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # syntax=docker/dockerfile:1 FROM python:3.12-alpine RUN mkdir -p /opt/netbox-zabbix -COPY --chown=1000:1000 . /opt/netbox-zabbix +COPY . /opt/netbox-zabbix WORKDIR /opt/netbox-zabbix RUN if ! [ -f ./config.py ]; then cp ./config.py.example ./config.py; fi RUN pip install -r ./requirements.txt From 13fe406b635de3ba290d3ffdc1c960bf4652750a Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Thu, 10 Apr 2025 16:00:56 +0200 Subject: [PATCH 39/93] Update Dockerfile --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index fa8d9c4..77628da 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,6 +4,7 @@ RUN mkdir -p /opt/netbox-zabbix COPY . /opt/netbox-zabbix WORKDIR /opt/netbox-zabbix RUN if ! [ -f ./config.py ]; then cp ./config.py.example ./config.py; fi +USER 1000:1000 RUN pip install -r ./requirements.txt ENTRYPOINT ["python"] CMD ["/opt/netbox-zabbix/netbox_zabbix_sync.py", "-v"] From 6abdac2eb44af4c5f284dcd31a1ef7308594bcb5 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Thu, 10 Apr 2025 16:01:53 +0200 Subject: [PATCH 40/93] Update Dockerfile --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index 77628da..ffd868f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,6 +4,7 @@ RUN mkdir -p /opt/netbox-zabbix COPY . /opt/netbox-zabbix WORKDIR /opt/netbox-zabbix RUN if ! [ -f ./config.py ]; then cp ./config.py.example ./config.py; fi +RUN chown -R 1000:1000 /opt/netbox-zabbix USER 1000:1000 RUN pip install -r ./requirements.txt ENTRYPOINT ["python"] From 37b3bfc7fb12970f38a806985326dd56e7979664 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Thu, 10 Apr 2025 16:05:34 +0200 Subject: [PATCH 41/93] Update Dockerfile --- Dockerfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index ffd868f..d4f7eee 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,11 @@ # syntax=docker/dockerfile:1 FROM python:3.12-alpine -RUN mkdir -p /opt/netbox-zabbix -COPY . /opt/netbox-zabbix +RUN mkdir -p /opt/netbox-zabbix && chown -R 1000:1000 /opt/netbox-zabbix + +USER 1000:1000 +COPY --chown=1000:1000 . /opt/netbox-zabbix WORKDIR /opt/netbox-zabbix RUN if ! [ -f ./config.py ]; then cp ./config.py.example ./config.py; fi -RUN chown -R 1000:1000 /opt/netbox-zabbix -USER 1000:1000 RUN pip install -r ./requirements.txt ENTRYPOINT ["python"] CMD ["/opt/netbox-zabbix/netbox_zabbix_sync.py", "-v"] From 2ea2edb6a6e0ee002a2d3e07ed474098c832e921 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Thu, 10 Apr 2025 16:13:37 +0200 Subject: [PATCH 42/93] Update Dockerfile --- Dockerfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Dockerfile b/Dockerfile index d4f7eee..198dbe5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,6 +6,8 @@ USER 1000:1000 COPY --chown=1000:1000 . /opt/netbox-zabbix WORKDIR /opt/netbox-zabbix RUN if ! [ -f ./config.py ]; then cp ./config.py.example ./config.py; fi +USER root RUN pip install -r ./requirements.txt +USER 1000:1000 ENTRYPOINT ["python"] CMD ["/opt/netbox-zabbix/netbox_zabbix_sync.py", "-v"] From ea5b7d31967afdd841615cafd0696672dfdd21e2 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 14 Apr 2025 20:13:15 +0200 Subject: [PATCH 43/93] Added initial unittesting PoC to see if Docker and Python are working correctly --- .github/workflows/unittesting.yml | 37 +++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 .github/workflows/unittesting.yml diff --git a/.github/workflows/unittesting.yml b/.github/workflows/unittesting.yml new file mode 100644 index 0000000..d4a2ad1 --- /dev/null +++ b/.github/workflows/unittesting.yml @@ -0,0 +1,37 @@ +--- +name: Unit testing and functional code control +on: + push: + branches: + - 'main' + - 'develop' + - 'unittesting' + +jobs: + integration: + runs-on: ubuntu-latest + services: + docker: + image: docker:dind + options: --privileged --shm-size=2g + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + container: + image: ubuntu:latest + steps: + - uses: actions/checkout@v4 + + - name: Install Docker + run: | + apt-get update + apt-get install -y docker.io + + - name: Test Docker + run: | + docker version + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.12 + - name: Show Python version + run: python --version \ No newline at end of file From feb719542d8ca6cfe45f35f4c07739acef14f721 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 14 Apr 2025 20:22:43 +0200 Subject: [PATCH 44/93] Added Netbox deployment config --- .github/workflows/unittesting.yml | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/.github/workflows/unittesting.yml b/.github/workflows/unittesting.yml index d4a2ad1..2283710 100644 --- a/.github/workflows/unittesting.yml +++ b/.github/workflows/unittesting.yml @@ -21,17 +21,26 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Install Docker + - name: Install Docker and Docker-compose run: | apt-get update apt-get install -y docker.io + sudo apt-get install docker-compose -y - - name: Test Docker - run: | - docker version - name: Setup Python uses: actions/setup-python@v5 with: python-version: 3.12 - name: Show Python version - run: python --version \ No newline at end of file + run: python --version + - name: Test Docker + run: | + docker version + - name: configure and start Netbox + run: | + git clone -b release https://github.com/netbox-community/netbox-docker.git + mv netbox-docker/docker-compose.override.yml.example netbox-docker/docker-compose.override.yml + docker-compose -f netbox-docker/docker-compose.yml pull + docker-compose -f netbox-docker/docker-compose.yml up -d + - name: Wait 2 minutes for compose stack to build + run: sleep 120s \ No newline at end of file From 38d61dcde74fbf69dbbc787f2383ad6bcf5fb8ec Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 14 Apr 2025 20:25:02 +0200 Subject: [PATCH 45/93] Removed sudo statement --- .github/workflows/unittesting.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/unittesting.yml b/.github/workflows/unittesting.yml index 2283710..ef15ca3 100644 --- a/.github/workflows/unittesting.yml +++ b/.github/workflows/unittesting.yml @@ -25,8 +25,7 @@ jobs: run: | apt-get update apt-get install -y docker.io - sudo apt-get install docker-compose -y - + apt-get install docker-compose -y - name: Setup Python uses: actions/setup-python@v5 with: From f303e7e01d7e7cf76f437a80654888c9a304545c Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 14 Apr 2025 20:27:44 +0200 Subject: [PATCH 46/93] Moved to compose v2 --- .github/workflows/unittesting.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/unittesting.yml b/.github/workflows/unittesting.yml index ef15ca3..b970c5c 100644 --- a/.github/workflows/unittesting.yml +++ b/.github/workflows/unittesting.yml @@ -25,7 +25,7 @@ jobs: run: | apt-get update apt-get install -y docker.io - apt-get install docker-compose -y + apt install docker-compose-v2 -y - name: Setup Python uses: actions/setup-python@v5 with: @@ -39,7 +39,7 @@ jobs: run: | git clone -b release https://github.com/netbox-community/netbox-docker.git mv netbox-docker/docker-compose.override.yml.example netbox-docker/docker-compose.override.yml - docker-compose -f netbox-docker/docker-compose.yml pull - docker-compose -f netbox-docker/docker-compose.yml up -d + docker compose -f netbox-docker/docker-compose.yml pull + docker compose -f netbox-docker/docker-compose.yml up -d - name: Wait 2 minutes for compose stack to build run: sleep 120s \ No newline at end of file From 989f6fa96e2f7a03e18b243da956b73580ff9940 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 14 Apr 2025 20:36:52 +0200 Subject: [PATCH 47/93] Moved compose override logic to infra folder --- .github/workflows/unittesting.yml | 2 +- infra/netbox-compose.yml | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 infra/netbox-compose.yml diff --git a/.github/workflows/unittesting.yml b/.github/workflows/unittesting.yml index b970c5c..8b0e033 100644 --- a/.github/workflows/unittesting.yml +++ b/.github/workflows/unittesting.yml @@ -38,7 +38,7 @@ jobs: - name: configure and start Netbox run: | git clone -b release https://github.com/netbox-community/netbox-docker.git - mv netbox-docker/docker-compose.override.yml.example netbox-docker/docker-compose.override.yml + mv infra/netbox-compose.yml netbox-docker/docker-compose.override.yml docker compose -f netbox-docker/docker-compose.yml pull docker compose -f netbox-docker/docker-compose.yml up -d - name: Wait 2 minutes for compose stack to build diff --git a/infra/netbox-compose.yml b/infra/netbox-compose.yml new file mode 100644 index 0000000..89e04cf --- /dev/null +++ b/infra/netbox-compose.yml @@ -0,0 +1,22 @@ +services: + netbox: + ports: + - "8000:8080" + # If you want the Nginx unit status page visible from the + # outside of the container add the following port mapping: + # - "8001:8081" + healthcheck: + # Time for which the health check can fail after the container is started. + # This depends mostly on the performance of your database. On the first start, + # when all tables need to be created the start_period should be higher than on + # subsequent starts. For the first start after major version upgrades of NetBox + # the start_period might also need to be set higher. + # Default value in our docker-compose.yml is 60s + start_period: 90s + # environment: + # SKIP_SUPERUSER: "false" + # SUPERUSER_API_TOKEN: "" + # SUPERUSER_EMAIL: "" + # SUPERUSER_NAME: "" + # SUPERUSER_PASSWORD: "" + From ad2ace942a713427a385203a5d76bffe9d5b87ec Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 14 Apr 2025 20:37:17 +0200 Subject: [PATCH 48/93] Increased start_period time of Netbox --- infra/netbox-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infra/netbox-compose.yml b/infra/netbox-compose.yml index 89e04cf..c1e8c96 100644 --- a/infra/netbox-compose.yml +++ b/infra/netbox-compose.yml @@ -12,7 +12,7 @@ services: # subsequent starts. For the first start after major version upgrades of NetBox # the start_period might also need to be set higher. # Default value in our docker-compose.yml is 60s - start_period: 90s + start_period: 150s # environment: # SKIP_SUPERUSER: "false" # SUPERUSER_API_TOKEN: "" From 4fd582970db3051d5781269f37e67dd0a2e5898c Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 14 Apr 2025 20:43:32 +0200 Subject: [PATCH 49/93] Container statement removed, added logs output --- .github/workflows/unittesting.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/unittesting.yml b/.github/workflows/unittesting.yml index 8b0e033..52db4bf 100644 --- a/.github/workflows/unittesting.yml +++ b/.github/workflows/unittesting.yml @@ -16,11 +16,8 @@ jobs: options: --privileged --shm-size=2g volumes: - /var/run/docker.sock:/var/run/docker.sock:ro - container: - image: ubuntu:latest steps: - uses: actions/checkout@v4 - - name: Install Docker and Docker-compose run: | apt-get update @@ -42,4 +39,6 @@ jobs: docker compose -f netbox-docker/docker-compose.yml pull docker compose -f netbox-docker/docker-compose.yml up -d - name: Wait 2 minutes for compose stack to build - run: sleep 120s \ No newline at end of file + run: | + sleep 120s + docker compose -f netbox-docker/docker-compose.yml logs netbox \ No newline at end of file From dad7d2911f5846ab52de4e7f91a1a459fcfaf9fc Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Wed, 23 Apr 2025 11:11:05 +0200 Subject: [PATCH 50/93] Reverted previous work --- .github/workflows/unittesting.yml | 44 ------------------------------- infra/netbox-compose.yml | 22 ---------------- 2 files changed, 66 deletions(-) delete mode 100644 .github/workflows/unittesting.yml delete mode 100644 infra/netbox-compose.yml diff --git a/.github/workflows/unittesting.yml b/.github/workflows/unittesting.yml deleted file mode 100644 index 52db4bf..0000000 --- a/.github/workflows/unittesting.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -name: Unit testing and functional code control -on: - push: - branches: - - 'main' - - 'develop' - - 'unittesting' - -jobs: - integration: - runs-on: ubuntu-latest - services: - docker: - image: docker:dind - options: --privileged --shm-size=2g - volumes: - - /var/run/docker.sock:/var/run/docker.sock:ro - steps: - - uses: actions/checkout@v4 - - name: Install Docker and Docker-compose - run: | - apt-get update - apt-get install -y docker.io - apt install docker-compose-v2 -y - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: 3.12 - - name: Show Python version - run: python --version - - name: Test Docker - run: | - docker version - - name: configure and start Netbox - run: | - git clone -b release https://github.com/netbox-community/netbox-docker.git - mv infra/netbox-compose.yml netbox-docker/docker-compose.override.yml - docker compose -f netbox-docker/docker-compose.yml pull - docker compose -f netbox-docker/docker-compose.yml up -d - - name: Wait 2 minutes for compose stack to build - run: | - sleep 120s - docker compose -f netbox-docker/docker-compose.yml logs netbox \ No newline at end of file diff --git a/infra/netbox-compose.yml b/infra/netbox-compose.yml deleted file mode 100644 index c1e8c96..0000000 --- a/infra/netbox-compose.yml +++ /dev/null @@ -1,22 +0,0 @@ -services: - netbox: - ports: - - "8000:8080" - # If you want the Nginx unit status page visible from the - # outside of the container add the following port mapping: - # - "8001:8081" - healthcheck: - # Time for which the health check can fail after the container is started. - # This depends mostly on the performance of your database. On the first start, - # when all tables need to be created the start_period should be higher than on - # subsequent starts. For the first start after major version upgrades of NetBox - # the start_period might also need to be set higher. - # Default value in our docker-compose.yml is 60s - start_period: 150s - # environment: - # SKIP_SUPERUSER: "false" - # SUPERUSER_API_TOKEN: "" - # SUPERUSER_EMAIL: "" - # SUPERUSER_NAME: "" - # SUPERUSER_PASSWORD: "" - From 7383583c43343b8d1434f396e5197780a21657b6 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Fri, 25 Apr 2025 14:43:35 +0200 Subject: [PATCH 51/93] Adjusted Gitignore, added config module, adjusted requirements for YAML support, added first unittests --- .gitignore | 2 +- config.yaml | 27 +++++++ modules/config.py | 37 +++++++++ requirements.txt | 1 + tests/__init__.py | 0 tests/test_device_deletion.py | 144 ++++++++++++++++++++++++++++++++++ 6 files changed, 210 insertions(+), 1 deletion(-) create mode 100644 config.yaml create mode 100644 modules/config.py create mode 100644 tests/__init__.py create mode 100644 tests/test_device_deletion.py diff --git a/.gitignore b/.gitignore index c3069c9..bc472c2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ *.log .venv -config.py +/config.py # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/config.yaml b/config.yaml new file mode 100644 index 0000000..db2f422 --- /dev/null +++ b/config.yaml @@ -0,0 +1,27 @@ +# Required: Custom Field name for Zabbix templates +template_cf: "zabbix_templates" + +# Required: Custom Field name for Zabbix device +device_cf: "zabbix_hostid" + +# Optional: Traverse site groups and assign Zabbix hostgroups based on site groups +traverse_site_groups: false + +# Optional: Traverse regions and assign Zabbix hostgroups based on region hierarchy +traverse_regions: false + +# Optional: Enable inventory syncing for host metadata +inventory_sync: true + +# Optional: Choose which inventory fields to sync ("enabled", "manual", "disabled") +inventory_mode: "manual" + +# Optional: Mapping of NetBox device fields to Zabbix inventory fields +# See: https://www.zabbix.com/documentation/current/en/manual/api/reference/host/object#host_inventory +inventory_map: + serial: "serial" + asset_tag: "asset_tag" + description: "comment" + location: "location" + contact: "contact" + site: "site" \ No newline at end of file diff --git a/modules/config.py b/modules/config.py new file mode 100644 index 0000000..5ee6b5d --- /dev/null +++ b/modules/config.py @@ -0,0 +1,37 @@ +""" +Module for parsing configuration from the top level config.yaml file +""" +from pathlib import Path +import yaml + +DEFAULT_CONFIG = { + "templates_config_context": False, + "templates_config_context_overrule": False, + "template_cf": "zabbix_template", + "device_cf": "zabbix_hostid", + "clustering": False, + "create_hostgroups": True, + "create_journal": False, + "sync_vms": False, + "zabbix_device_removal": ["Decommissioning", "Inventory"], + "zabbix_device_disable": ["Offline", "Planned", "Staged", "Failed"] +} + + +def load_config(config_path="config.yaml"): + """Loads config from YAML file and combines it with default config""" + # Get data from default config. + config = DEFAULT_CONFIG.copy() + # Set config path + config_file = Path(config_path) + # Check if file exists + if config_file.exists(): + try: + with open(config_file, "r", encoding="utf-8") as f: + user_config = yaml.safe_load(f) or {} + config.update(user_config) + except OSError: + # Probably some I/O error with user permissions etc. + # Ignore for now and return default config + pass + return config diff --git a/requirements.txt b/requirements.txt index 33f4b90..832b4b1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,3 @@ pynetbox zabbix-utils==2.0.1 +pyyaml \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_device_deletion.py b/tests/test_device_deletion.py new file mode 100644 index 0000000..f2c9438 --- /dev/null +++ b/tests/test_device_deletion.py @@ -0,0 +1,144 @@ +"""Testing device creation""" +from unittest.mock import MagicMock, patch, call +from modules.device import PhysicalDevice +from modules.config import load_config + +config = load_config() + + +def mock_nb_device(): + mock = MagicMock() + mock.id = 1 + mock.url = "http://netbox:8000/api/dcim/devices/1/" + mock.display_url = "http://netbox:8000/dcim/devices/1/" + mock.display = "SW01" + mock.name = "SW01" + + mock.device_type = MagicMock() + mock.device_type.id = 1 + mock.device_type.url = "http://netbox:8000/api/dcim/device-types/1/" + mock.device_type.display = "Catalyst 3750G-48TS-S" + mock.device_type.manufacturer = MagicMock() + mock.device_type.manufacturer.id = 1 + mock.device_type.manufacturer.url = "http://netbox:8000/api/dcim/manufacturers/1/" + mock.device_type.manufacturer.display = "Cisco" + mock.device_type.manufacturer.name = "Cisco" + mock.device_type.manufacturer.slug = "cisco" + mock.device_type.manufacturer.description = "" + mock.device_type.model = "Catalyst 3750G-48TS-S" + mock.device_type.slug = "cisco-ws-c3750g-48ts-s" + mock.device_type.description = "" + + mock.role = MagicMock() + mock.role.id = 1 + mock.role.url = "http://netbox:8000/api/dcim/device-roles/1/" + mock.role.display = "Switch" + mock.role.name = "Switch" + mock.role.slug = "switch" + mock.role.description = "" + + mock.tenant = None + mock.platform = None + mock.serial = "0031876" + mock.asset_tag = None + + mock.site = MagicMock() + mock.site.id = 2 + mock.site.url = "http://netbox:8000/api/dcim/sites/2/" + mock.site.display = "AMS01" + mock.site.name = "AMS01" + mock.site.slug = "ams01" + mock.site.description = "" + + mock.location = None + mock.rack = None + mock.position = None + mock.face = None + mock.latitude = None + mock.longitude = None + mock.parent_device = None + + mock.status = MagicMock() + mock.status.value = "decommissioning" + mock.status.label = "Decommissioning" + + mock.cluster = None + mock.virtual_chassis = None + mock.vc_position = None + mock.vc_priority = None + mock.description = "" + mock.comments = "" + mock.config_template = None + mock.config_context = {} + mock.local_context_data = None + mock.tags = [] + + mock.custom_fields = {"zabbix_hostid": 1956} + + def save(self): + pass + + return mock + +def mock_zabbix(): + mock = MagicMock() + mock.host.get.return_value = [{}] + mock.host.delete.return_value = True + + return mock + +netbox_journals = MagicMock() +nb_version = '4.2' +create_journal = MagicMock() +logger = MagicMock() + +def test_check_cluster_status(): + """Checks if the isCluster function is functioning properly""" + nb_device = mock_nb_device() + zabbix = mock_zabbix() + device = PhysicalDevice(nb_device, zabbix, None, None, + None, logger) + assert device.isCluster() == False + + +def test_device_deletion_host_exists(): + """Checks device deletion process""" + nb_device = mock_nb_device() + zabbix = mock_zabbix() + with patch.object(PhysicalDevice, 'create_journal_entry') as mock_journal: + # Create device + device = PhysicalDevice(nb_device, zabbix, netbox_journals, nb_version, + create_journal, logger) + device.cleanup() + # Check if Zabbix HostID is empty + assert device.nb.custom_fields[config["device_cf"]] is None + # Check if API calls are executed + device.zabbix.host.get.assert_called_once_with(filter={'hostid': 1956}, output=[]) + device.zabbix.host.delete.assert_called_once_with(1956) + # check logger + mock_journal.assert_called_once_with("warning", "Deleted host from Zabbix") + device.logger.info.assert_called_once_with("Host SW01: Deleted host from Zabbix.") + + +def test_device_deletion_host_notExists(): + nb_device = mock_nb_device() + zabbix = mock_zabbix() + zabbix.host.get.return_value = None + + with patch.object(PhysicalDevice, 'create_journal_entry') as mock_journal: + # Create new device + device = PhysicalDevice(nb_device, zabbix, netbox_journals, nb_version, + create_journal, logger) + # Try to clean the device up in Zabbix + device.cleanup() + # Confirm that a call was issued to Zabbix to check if the host exists + device.zabbix.host.get.assert_called_once_with(filter={'hostid': 1956}, output=[]) + # Confirm that no device was deleted in Zabbix + device.zabbix.host.delete.assert_not_called() + # Test logging + log_calls = [ + call('Host SW01: Deleted host from Zabbix.'), + call('Host SW01: was already deleted from Zabbix. Removed link in NetBox.') + ] + logger.info.assert_has_calls(log_calls) + assert logger.info.call_count == 2 From cb0500d0c0e398ee2ce8c969dbba8fb29479ffa2 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 28 Apr 2025 10:47:52 +0200 Subject: [PATCH 52/93] Fixed test layout and added pipeline step to actually run tests --- .github/workflows/run_tests.yml | 22 ++++++++++++++ tests/test_device_deletion.py | 54 ++++++++++++++++----------------- 2 files changed, 48 insertions(+), 28 deletions(-) create mode 100644 .github/workflows/run_tests.yml diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml new file mode 100644 index 0000000..2a35e78 --- /dev/null +++ b/.github/workflows/run_tests.yml @@ -0,0 +1,22 @@ +--- +name: Pytest code testing + +on: + workflow_call + +jobs: + test_code: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pytest pytest-mock + pip install -r requirements.txt + - name: Analysing the code with pylint + run: | + cp config.py.example config.py + pytest tests diff --git a/tests/test_device_deletion.py b/tests/test_device_deletion.py index f2c9438..41c4420 100644 --- a/tests/test_device_deletion.py +++ b/tests/test_device_deletion.py @@ -7,6 +7,7 @@ config = load_config() def mock_nb_device(): + """Function to mock Netbox device""" mock = MagicMock() mock.id = 1 mock.url = "http://netbox:8000/api/dcim/devices/1/" @@ -15,27 +16,20 @@ def mock_nb_device(): mock.name = "SW01" mock.device_type = MagicMock() - mock.device_type.id = 1 - mock.device_type.url = "http://netbox:8000/api/dcim/device-types/1/" mock.device_type.display = "Catalyst 3750G-48TS-S" mock.device_type.manufacturer = MagicMock() - mock.device_type.manufacturer.id = 1 - mock.device_type.manufacturer.url = "http://netbox:8000/api/dcim/manufacturers/1/" mock.device_type.manufacturer.display = "Cisco" mock.device_type.manufacturer.name = "Cisco" mock.device_type.manufacturer.slug = "cisco" mock.device_type.manufacturer.description = "" mock.device_type.model = "Catalyst 3750G-48TS-S" mock.device_type.slug = "cisco-ws-c3750g-48ts-s" - mock.device_type.description = "" mock.role = MagicMock() mock.role.id = 1 - mock.role.url = "http://netbox:8000/api/dcim/device-roles/1/" mock.role.display = "Switch" mock.role.name = "Switch" mock.role.slug = "switch" - mock.role.description = "" mock.tenant = None mock.platform = None @@ -43,19 +37,14 @@ def mock_nb_device(): mock.asset_tag = None mock.site = MagicMock() - mock.site.id = 2 - mock.site.url = "http://netbox:8000/api/dcim/sites/2/" mock.site.display = "AMS01" mock.site.name = "AMS01" mock.site.slug = "ams01" - mock.site.description = "" mock.location = None mock.rack = None mock.position = None mock.face = None - mock.latitude = None - mock.longitude = None mock.parent_device = None mock.status = MagicMock() @@ -71,34 +60,32 @@ def mock_nb_device(): mock.config_template = None mock.config_context = {} mock.local_context_data = None - mock.tags = [] mock.custom_fields = {"zabbix_hostid": 1956} - - def save(self): - pass - return mock + def mock_zabbix(): + """Function to mock Zabbix""" mock = MagicMock() mock.host.get.return_value = [{}] mock.host.delete.return_value = True - return mock + netbox_journals = MagicMock() -nb_version = '4.2' +NB_VERSION = '4.2' create_journal = MagicMock() logger = MagicMock() + def test_check_cluster_status(): """Checks if the isCluster function is functioning properly""" nb_device = mock_nb_device() zabbix = mock_zabbix() device = PhysicalDevice(nb_device, zabbix, None, None, None, logger) - assert device.isCluster() == False + assert device.isCluster() is False def test_device_deletion_host_exists(): @@ -107,38 +94,49 @@ def test_device_deletion_host_exists(): zabbix = mock_zabbix() with patch.object(PhysicalDevice, 'create_journal_entry') as mock_journal: # Create device - device = PhysicalDevice(nb_device, zabbix, netbox_journals, nb_version, + device = PhysicalDevice(nb_device, zabbix, netbox_journals, NB_VERSION, create_journal, logger) device.cleanup() # Check if Zabbix HostID is empty assert device.nb.custom_fields[config["device_cf"]] is None # Check if API calls are executed - device.zabbix.host.get.assert_called_once_with(filter={'hostid': 1956}, output=[]) + device.zabbix.host.get.assert_called_once_with(filter={'hostid': 1956}, + output=[]) device.zabbix.host.delete.assert_called_once_with(1956) # check logger - mock_journal.assert_called_once_with("warning", "Deleted host from Zabbix") - device.logger.info.assert_called_once_with("Host SW01: Deleted host from Zabbix.") + mock_journal.assert_called_once_with("warning", + "Deleted host from Zabbix") + device.logger.info.assert_called_once_with("Host SW01: Deleted " + "host from Zabbix.") -def test_device_deletion_host_notExists(): +def test_device_deletion_host_not_exists(): + """ + Test if device in Netbox gets unlinked + when host is not present in Zabbix + """ nb_device = mock_nb_device() zabbix = mock_zabbix() zabbix.host.get.return_value = None with patch.object(PhysicalDevice, 'create_journal_entry') as mock_journal: # Create new device - device = PhysicalDevice(nb_device, zabbix, netbox_journals, nb_version, + device = PhysicalDevice(nb_device, zabbix, netbox_journals, NB_VERSION, create_journal, logger) # Try to clean the device up in Zabbix device.cleanup() # Confirm that a call was issued to Zabbix to check if the host exists - device.zabbix.host.get.assert_called_once_with(filter={'hostid': 1956}, output=[]) + device.zabbix.host.get.assert_called_once_with(filter={'hostid': 1956}, + output=[]) # Confirm that no device was deleted in Zabbix device.zabbix.host.delete.assert_not_called() # Test logging log_calls = [ call('Host SW01: Deleted host from Zabbix.'), - call('Host SW01: was already deleted from Zabbix. Removed link in NetBox.') + call('Host SW01: was already deleted from Zabbix. ' + 'Removed link in NetBox.') ] logger.info.assert_has_calls(log_calls) assert logger.info.call_count == 2 + mock_journal.assert_called_once_with("warning", + "Deleted host from Zabbix") From 5fd89a1f8a3e75edb68938b8e5519db3731c17af Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 28 Apr 2025 13:32:28 +0200 Subject: [PATCH 53/93] Added .vscode as exception to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index bc472c2..5fdbd95 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] +.vscode \ No newline at end of file From eb307337f68150a155cc52a94216aa15a637aee9 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 28 Apr 2025 14:50:52 +0200 Subject: [PATCH 54/93] Removed YAML config logic, added python config logic with default fallback. Added ENV variable support for config parameters. --- config.yaml | 27 ------------ modules/config.py | 61 +++++++++++++++++++-------- modules/device.py | 84 +++++++++++++++++--------------------- modules/virtual_machine.py | 24 ++++------- netbox_zabbix_sync.py | 63 +++++++++++----------------- requirements.txt | 3 +- 6 files changed, 114 insertions(+), 148 deletions(-) delete mode 100644 config.yaml diff --git a/config.yaml b/config.yaml deleted file mode 100644 index db2f422..0000000 --- a/config.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Required: Custom Field name for Zabbix templates -template_cf: "zabbix_templates" - -# Required: Custom Field name for Zabbix device -device_cf: "zabbix_hostid" - -# Optional: Traverse site groups and assign Zabbix hostgroups based on site groups -traverse_site_groups: false - -# Optional: Traverse regions and assign Zabbix hostgroups based on region hierarchy -traverse_regions: false - -# Optional: Enable inventory syncing for host metadata -inventory_sync: true - -# Optional: Choose which inventory fields to sync ("enabled", "manual", "disabled") -inventory_mode: "manual" - -# Optional: Mapping of NetBox device fields to Zabbix inventory fields -# See: https://www.zabbix.com/documentation/current/en/manual/api/reference/host/object#host_inventory -inventory_map: - serial: "serial" - asset_tag: "asset_tag" - description: "comment" - location: "location" - contact: "contact" - site: "site" \ No newline at end of file diff --git a/modules/config.py b/modules/config.py index 5ee6b5d..3adda8b 100644 --- a/modules/config.py +++ b/modules/config.py @@ -2,7 +2,9 @@ Module for parsing configuration from the top level config.yaml file """ from pathlib import Path -import yaml +from importlib import util +from os import environ +from logging import getLogger DEFAULT_CONFIG = { "templates_config_context": False, @@ -18,20 +20,43 @@ DEFAULT_CONFIG = { } -def load_config(config_path="config.yaml"): - """Loads config from YAML file and combines it with default config""" - # Get data from default config. - config = DEFAULT_CONFIG.copy() - # Set config path - config_file = Path(config_path) - # Check if file exists - if config_file.exists(): - try: - with open(config_file, "r", encoding="utf-8") as f: - user_config = yaml.safe_load(f) or {} - config.update(user_config) - except OSError: - # Probably some I/O error with user permissions etc. - # Ignore for now and return default config - pass - return config +def load_config(): + """Returns combined config from all sources""" + # Overwrite default config with config.py + conf = load_config_file(config_default=DEFAULT_CONFIG) + # Overwrite default config and config.py with environment variables + for key in conf: + value_setting = load_env_variable(key) + if value_setting is not None: + conf[key] = value_setting + return conf + + +def load_env_variable(config_environvar): + """Returns config from environment variable""" + if config_environvar in environ: + return environ[config_environvar] + return None + + +def load_config_file(config_default, config_file="config.py"): + """Returns config from config.py file""" + # Check if config.py exists and load it + # If it does not exist, return the default config + config_path = Path(config_file) + if config_path.exists(): + dconf = config_default.copy() + # Dynamically import the config module + spec = util.spec_from_file_location("config", config_path) + config_module = util.module_from_spec(spec) + spec.loader.exec_module(config_module) + # Update DEFAULT_CONFIG with variables from the config module + for key in dconf: + if hasattr(config_module, key): + dconf[key] = getattr(config_module, key) + return dconf + else: + getLogger(__name__).warning( + "Config file %s not found. Using default config " + "and environment variables.", config_file) + return None diff --git a/modules/device.py b/modules/device.py index 2ed37e8..5d11c82 100644 --- a/modules/device.py +++ b/modules/device.py @@ -3,7 +3,6 @@ """ Device specific handeling for NetBox to Zabbix """ -from os import sys from re import search from logging import getLogger from zabbix_utils import APIRequestError @@ -11,19 +10,10 @@ from modules.exceptions import (SyncInventoryError, TemplateError, SyncExternalE InterfaceConfigError, JournalError) from modules.interface import ZabbixInterface from modules.hostgroups import Hostgroup -try: - from config import ( - template_cf, device_cf, - traverse_site_groups, - traverse_regions, - inventory_sync, - inventory_mode, - inventory_map - ) -except ModuleNotFoundError: - print("Configuration file config.py not found in main directory." - "Please create the file or rename the config.py.example file to config.py.") - sys.exit(0) +from modules.config import load_config + +config = load_config() + class PhysicalDevice(): # pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-positional-arguments @@ -76,10 +66,10 @@ class PhysicalDevice(): raise SyncInventoryError(e) # Check if device has custom field for ZBX ID - if device_cf in self.nb.custom_fields: - self.zabbix_id = self.nb.custom_fields[device_cf] + if config["device_cf"] in self.nb.custom_fields: + self.zabbix_id = self.nb.custom_fields[config["device_cf"]] else: - e = f"Host {self.name}: Custom field {device_cf} not present" + e = f"Host {self.name}: Custom field {config["device_cf"]} not present" self.logger.warning(e) raise SyncInventoryError(e) @@ -87,7 +77,7 @@ class PhysicalDevice(): odd_character_list = ["ä", "ö", "ü", "Ä", "Ö", "Ü", "ß"] self.use_visible_name = False if (any(letter in self.name for letter in odd_character_list) or - bool(search('[\u0400-\u04FF]', self.name))): + bool(search('[\u0400-\u04FF]', self.name))): self.name = f"NETBOX_ID{self.id}" self.visible_name = self.nb.name self.use_visible_name = True @@ -101,8 +91,8 @@ class PhysicalDevice(): """Set the hostgroup for this device""" # Create new Hostgroup instance hg = Hostgroup("dev", self.nb, self.nb_api_version, logger=self.logger, - nested_sitegroup_flag=traverse_site_groups, - nested_region_flag=traverse_regions, + nested_sitegroup_flag=config["traverse_site_groups"], + nested_region_flag=config["traverse_regions"], nb_groups=nb_site_groups, nb_regions=nb_regions) # Generate hostgroup based on hostgroup format @@ -137,13 +127,13 @@ class PhysicalDevice(): # Get Zabbix templates from the device type device_type_cfs = self.nb.device_type.custom_fields # Check if the ZBX Template CF is present - if template_cf in device_type_cfs: + if config["template_cf"] in device_type_cfs: # Set value to template - return [device_type_cfs[template_cf]] + return [device_type_cfs[config["template_cf"]]] # Custom field not found, return error - e = (f"Custom field {template_cf} not " - f"found for {self.nb.device_type.manufacturer.name}" - f" - {self.nb.device_type.display}.") + e = (f"Custom field {config["template_cf"]} not " + f"found for {self.nb.device_type.manufacturer.name}" + f" - {self.nb.device_type.display}.") raise TemplateError(e) def get_templates_context(self): @@ -164,25 +154,25 @@ class PhysicalDevice(): def set_inventory(self, nbdevice): """ Set host inventory """ # Set inventory mode. Default is disabled (see class init function). - if inventory_mode == "disabled": - if inventory_sync: + if config["inventory_mode"] == "disabled": + if config["inventory_sync"]: self.logger.error(f"Host {self.name}: Unable to map NetBox inventory to Zabbix. " - "Inventory sync is enabled in config but inventory mode is disabled.") + "Inventory sync is enabled in config but inventory mode is disabled.") return True - if inventory_mode == "manual": + if config["inventory_mode"] == "manual": self.inventory_mode = 0 - elif inventory_mode == "automatic": + elif config["inventory_mode"] == "automatic": self.inventory_mode = 1 else: self.logger.error(f"Host {self.name}: Specified value for inventory mode in" - f" config is not valid. Got value {inventory_mode}") + f" config is not valid. Got value {config["inventory_mode"]}") return False self.inventory = {} - if inventory_sync and self.inventory_mode in [0,1]: + if config["inventory_sync"] and self.inventory_mode in [0, 1]: self.logger.debug(f"Host {self.name}: Starting inventory mapper") # Let's build an inventory dict for each property in the inventory_map - for nb_inv_field, zbx_inv_field in inventory_map.items(): - field_list = nb_inv_field.split("/") # convert str to list based on delimiter + for nb_inv_field, zbx_inv_field in config["inventory_map"].items(): + field_list = nb_inv_field.split("/") # convert str to list based on delimiter # start at the base of the dict... value = nbdevice # ... and step through the dict till we find the needed value @@ -191,8 +181,8 @@ class PhysicalDevice(): # Check if the result is usable and expected # We want to apply any int or float 0 values, # even if python thinks those are empty. - if ((value and isinstance(value, int | float | str )) or - (isinstance(value, int | float) and int(value) ==0)): + if ((value and isinstance(value, int | float | str)) or + (isinstance(value, int | float) and int(value) == 0)): self.inventory[zbx_inv_field] = str(value) elif not value: # empty value should just be an empty string for API compatibility @@ -204,7 +194,7 @@ class PhysicalDevice(): self.logger.error(f"Host {self.name}: Inventory lookup for '{nb_inv_field}'" " returned an unexpected type: it will be skipped.") self.logger.debug(f"Host {self.name}: Inventory mapping complete. " - f"Mapped {len(list(filter(None, self.inventory.values())))} field(s)") + f"Mapped {len(list(filter(None, self.inventory.values())))} field(s)") return True def isCluster(self): @@ -275,7 +265,7 @@ class PhysicalDevice(): # Return error should the template not be found in Zabbix if not template_match: e = (f"Unable to find template {nb_template} " - f"for host {self.name} in Zabbix. Skipping host...") + f"for host {self.name} in Zabbix. Skipping host...") self.logger.warning(e) raise SyncInventoryError(e) @@ -305,7 +295,7 @@ class PhysicalDevice(): zbx_host = bool(self.zabbix.host.get(filter={'hostid': self.zabbix_id}, output=[])) e = (f"Host {self.name}: was already deleted from Zabbix." - " Removed link in NetBox.") + " Removed link in NetBox.") if zbx_host: # Delete host should it exists self.zabbix.host.delete(self.zabbix_id) @@ -321,7 +311,7 @@ class PhysicalDevice(): def _zeroize_cf(self): """Sets the hostID custom field in NetBox to zero, effectively destroying the link""" - self.nb.custom_fields[device_cf] = None + self.nb.custom_fields[config["device_cf"]] = None self.nb.save() def _zabbixHostnameExists(self): @@ -366,7 +356,7 @@ class PhysicalDevice(): input: List of all proxies and proxy groups in standardized format """ # check if the key Zabbix is defined in the config context - if not "zabbix" in self.nb.config_context: + if "zabbix" not in self.nb.config_context: return False if ("proxy" in self.nb.config_context["zabbix"] and not self.nb.config_context["zabbix"]["proxy"]): @@ -448,7 +438,7 @@ class PhysicalDevice(): self.logger.error(e) raise SyncExternalError(e) from None # Set NetBox custom field to hostID value. - self.nb.custom_fields[device_cf] = int(self.zabbix_id) + self.nb.custom_fields[config["device_cf"]] = int(self.zabbix_id) self.nb.save() msg = f"Host {self.name}: Created host in Zabbix." self.logger.info(msg) @@ -542,7 +532,7 @@ class PhysicalDevice(): selectGroups=["groupid"], selectHostGroups=["groupid"], selectParentTemplates=["templateid"], - selectInventory=list(inventory_map.values())) + selectInventory=list(config["inventory_map"].values())) if len(host) > 1: e = (f"Got {len(host)} results for Zabbix hosts " f"with ID {self.zabbix_id} - hostname {self.name}.") @@ -645,9 +635,9 @@ class PhysicalDevice(): if proxy_set and not proxy_power: # Display error message self.logger.error(f"Host {self.name} is configured " - f"with proxy in Zabbix but not in NetBox. The" - " -p flag was ommited: no " - "changes have been made.") + f"with proxy in Zabbix but not in NetBox. The" + " -p flag was ommited: no " + "changes have been made.") if not proxy_set: self.logger.debug(f"Host {self.name}: proxy in-sync.") # Check host inventory mode @@ -656,7 +646,7 @@ class PhysicalDevice(): else: self.logger.warning(f"Host {self.name}: inventory_mode OUT of sync.") self.updateZabbixHost(inventory_mode=str(self.inventory_mode)) - if inventory_sync and self.inventory_mode in [0,1]: + if config["inventory_sync"] and self.inventory_mode in [0,1]: # Check host inventory mapping if host['inventory'] == self.inventory: self.logger.debug(f"Host {self.name}: inventory in-sync.") diff --git a/modules/virtual_machine.py b/modules/virtual_machine.py index 331a463..80dadc0 100644 --- a/modules/virtual_machine.py +++ b/modules/virtual_machine.py @@ -1,21 +1,15 @@ #!/usr/bin/env python3 # pylint: disable=duplicate-code """Module that hosts all functions for virtual machine processing""" - -from os import sys from modules.device import PhysicalDevice from modules.hostgroups import Hostgroup from modules.interface import ZabbixInterface -from modules.exceptions import TemplateError, InterfaceConfigError, SyncInventoryError -try: - from config import ( - traverse_site_groups, - traverse_regions - ) -except ModuleNotFoundError: - print("Configuration file config.py not found in main directory." - "Please create the file or rename the config.py.example file to config.py.") - sys.exit(0) +from modules.exceptions import (TemplateError, InterfaceConfigError, + SyncInventoryError) +from modules.config import load_config +# Load config +config = load_config() + class VirtualMachine(PhysicalDevice): """Model for virtual machines""" @@ -28,8 +22,8 @@ class VirtualMachine(PhysicalDevice): """Set the hostgroup for this device""" # Create new Hostgroup instance hg = Hostgroup("vm", self.nb, self.nb_api_version, logger=self.logger, - nested_sitegroup_flag=traverse_site_groups, - nested_region_flag=traverse_regions, + nested_sitegroup_flag=config["traverse_site_groups"], + nested_region_flag=config["traverse_regions"], nb_groups=nb_site_groups, nb_regions=nb_regions) # Generate hostgroup based on hostgroup format @@ -45,7 +39,7 @@ class VirtualMachine(PhysicalDevice): self.logger.warning(e) return True - def setInterfaceDetails(self): # pylint: disable=invalid-name + def setInterfaceDetails(self): # pylint: disable=invalid-name """ Overwrites device function to select an agent interface type by default Agent type interfaces are more likely to be used with VMs then SNMP diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 935b55e..6129f92 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -10,28 +10,13 @@ from pynetbox import api from pynetbox.core.query import RequestError as NBRequestError from requests.exceptions import ConnectionError as RequestsConnectionError from zabbix_utils import ZabbixAPI, APIRequestError, ProcessingError +from modules.config import load_config from modules.device import PhysicalDevice from modules.virtual_machine import VirtualMachine from modules.tools import convert_recordset, proxy_prepper from modules.exceptions import EnvironmentVarError, HostgroupError, SyncError -try: - from config import ( - templates_config_context, - templates_config_context_overrule, - clustering, create_hostgroups, - create_journal, full_proxy_sync, - zabbix_device_removal, - zabbix_device_disable, - hostgroup_format, - vm_hostgroup_format, - nb_device_filter, - sync_vms, - nb_vm_filter - ) -except ModuleNotFoundError: - print("Configuration file config.py not found in main directory." - "Please create the file or rename the config.py.example file to config.py.") - sys.exit(1) + +config = load_config() # Set logging log_format = logging.Formatter('%(asctime)s - %(name)s - ' @@ -83,7 +68,7 @@ def main(arguments): # Set NetBox API netbox = api(netbox_host, token=netbox_token, threading=True) # Check if the provided Hostgroup layout is valid - hg_objects = hostgroup_format.split("/") + hg_objects = config["hostgroup_format"].split("/") allowed_objects = ["location", "role", "manufacturer", "region", "site", "site_group", "tenant", "tenant_group"] # Create API call to get all custom fields which are on the device objects @@ -130,11 +115,11 @@ def main(arguments): else: proxy_name = "name" # Get all Zabbix and NetBox data - netbox_devices = list(netbox.dcim.devices.filter(**nb_device_filter)) + netbox_devices = list(netbox.dcim.devices.filter(**config["nb_device_filter"])) netbox_vms = [] - if sync_vms: + if config["sync_vms"]: netbox_vms = list( - netbox.virtualization.virtual_machines.filter(**nb_vm_filter)) + netbox.virtualization.virtual_machines.filter(**config["nb_vm_filter"])) netbox_site_groups = convert_recordset((netbox.dcim.site_groups.all())) netbox_regions = convert_recordset(netbox.dcim.regions.all()) netbox_journals = netbox.extras.journal_entries @@ -160,19 +145,19 @@ def main(arguments): for nb_vm in netbox_vms: try: vm = VirtualMachine(nb_vm, zabbix, netbox_journals, nb_version, - create_journal, logger) + config["create_journal"], logger) logger.debug(f"Host {vm.name}: started operations on VM.") vm.set_vm_template() # Check if a valid template has been found for this VM. if not vm.zbx_template_names: continue - vm.set_hostgroup(vm_hostgroup_format, + vm.set_hostgroup(config["vm_hostgroup_format"], netbox_site_groups, netbox_regions) # Check if a valid hostgroup has been found for this VM. if not vm.hostgroup: continue # Checks if device is in cleanup state - if vm.status in zabbix_device_removal: + if vm.status in config["zabbix_device_removal"]: if vm.zabbix_id: # Delete device from Zabbix # and remove hostID from NetBox. @@ -185,16 +170,16 @@ def main(arguments): f"not in the active state.") continue # Check if the VM is in the disabled state - if vm.status in zabbix_device_disable: + if vm.status in config["zabbix_device_disable"]: vm.zabbix_state = 1 # Check if VM is already in Zabbix if vm.zabbix_id: vm.ConsistencyCheck(zabbix_groups, zabbix_templates, - zabbix_proxy_list, full_proxy_sync, - create_hostgroups) + zabbix_proxy_list, config["full_proxy_sync"], + config["create_hostgroups"]) continue # Add hostgroup is config is set - if create_hostgroups: + if config["create_hostgroups"]: # Create new hostgroup. Potentially multiple groups if nested hostgroups = vm.createZabbixHostgroup(zabbix_groups) # go through all newly created hostgroups @@ -211,22 +196,22 @@ def main(arguments): try: # Set device instance set data such as hostgroup and template information. device = PhysicalDevice(nb_device, zabbix, netbox_journals, nb_version, - create_journal, logger) + config["create_journal"], logger) logger.debug(f"Host {device.name}: started operations on device.") - device.set_template(templates_config_context, - templates_config_context_overrule) + device.set_template(config["templates_config_context"], + config["templates_config_context_overrule"]) # Check if a valid template has been found for this VM. if not device.zbx_template_names: continue device.set_hostgroup( - hostgroup_format, netbox_site_groups, netbox_regions) + config["hostgroup_format"], netbox_site_groups, netbox_regions) # Check if a valid hostgroup has been found for this VM. if not device.hostgroup: continue device.set_inventory(nb_device) # Checks if device is part of cluster. # Requires clustering variable - if device.isCluster() and clustering: + if device.isCluster() and config["clustering"]: # Check if device is primary or secondary if device.promoteMasterDevice(): e = (f"Device {device.name}: is " @@ -240,7 +225,7 @@ def main(arguments): logger.info(e) continue # Checks if device is in cleanup state - if device.status in zabbix_device_removal: + if device.status in config["zabbix_device_removal"]: if device.zabbix_id: # Delete device from Zabbix # and remove hostID from NetBox. @@ -253,16 +238,16 @@ def main(arguments): f"not in the active state.") continue # Check if the device is in the disabled state - if device.status in zabbix_device_disable: + if device.status in config["zabbix_device_disable"]: device.zabbix_state = 1 # Check if device is already in Zabbix if device.zabbix_id: device.ConsistencyCheck(zabbix_groups, zabbix_templates, - zabbix_proxy_list, full_proxy_sync, - create_hostgroups) + zabbix_proxy_list, config["full_proxy_sync"], + config["create_hostgroups"]) continue # Add hostgroup is config is set - if create_hostgroups: + if config["create_hostgroups"]: # Create new hostgroup. Potentially multiple groups if nested hostgroups = device.createZabbixHostgroup(zabbix_groups) # go through all newly created hostgroups diff --git a/requirements.txt b/requirements.txt index 832b4b1..8da5ce5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,2 @@ pynetbox -zabbix-utils==2.0.1 -pyyaml \ No newline at end of file +zabbix-utils==2.0.1 \ No newline at end of file From e91eecffaabf970234fad3b471f7efa66232e7f6 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 28 Apr 2025 14:58:38 +0200 Subject: [PATCH 55/93] Fixed on statement on new testcode. --- .github/workflows/run_tests.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 2a35e78..cae8b02 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -1,8 +1,9 @@ --- name: Pytest code testing -on: - workflow_call +on: + push: + pull_request: jobs: test_code: From 04a610cf8411aafc477e55043a7aae39c05a9204 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 28 Apr 2025 15:10:48 +0200 Subject: [PATCH 56/93] Fixed some minor Flake8 errors --- modules/device.py | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/modules/device.py b/modules/device.py index 5d11c82..d410103 100644 --- a/modules/device.py +++ b/modules/device.py @@ -157,7 +157,8 @@ class PhysicalDevice(): if config["inventory_mode"] == "disabled": if config["inventory_sync"]: self.logger.error(f"Host {self.name}: Unable to map NetBox inventory to Zabbix. " - "Inventory sync is enabled in config but inventory mode is disabled.") + "Inventory sync is enabled in " + "config but inventory mode is disabled.") return True if config["inventory_mode"] == "manual": self.inventory_mode = 0 @@ -359,7 +360,7 @@ class PhysicalDevice(): if "zabbix" not in self.nb.config_context: return False if ("proxy" in self.nb.config_context["zabbix"] and - not self.nb.config_context["zabbix"]["proxy"]): + not self.nb.config_context["zabbix"]["proxy"]): return False # Proxy group takes priority over a proxy due # to it being HA and therefore being more reliable @@ -409,16 +410,17 @@ class PhysicalDevice(): # Set Zabbix proxy if defined self.setProxy(proxies) # Set basic data for host creation - create_data = {"host": self.name, - "name": self.visible_name, - "status": self.zabbix_state, - "interfaces": interfaces, - "groups": groups, - "templates": templateids, - "description": description, - "inventory_mode": self.inventory_mode, - "inventory": self.inventory - } + create_data = { + "host": self.name, + "name": self.visible_name, + "status": self.zabbix_state, + "interfaces": interfaces, + "groups": groups, + "templates": templateids, + "description": description, + "inventory_mode": self.inventory_mode, + "inventory": self.inventory + } # If a Zabbix proxy or Zabbix Proxy group has been defined if self.zbxproxy: # If a lower version than 7 is used, we can assume that @@ -518,7 +520,7 @@ class PhysicalDevice(): # Function returns true / false but also sets GroupID if not self.setZabbixGroupID(groups) and not create_hostgroups: e = (f"Host {self.name}: different hostgroup is required but " - "unable to create hostgroup without generation permission.") + "unable to create hostgroup without generation permission.") self.logger.warning(e) raise SyncInventoryError(e) # Prepare templates and proxy config @@ -569,7 +571,7 @@ class PhysicalDevice(): templateids.append({'templateid': template['templateid']}) # Update Zabbix with NB templates and clear any old / lost templates self.updateZabbixHost(templates_clear=host["parentTemplates"], - templates=templateids) + templates=templateids) else: self.logger.debug(f"Host {self.name}: template(s) in-sync.") @@ -594,7 +596,7 @@ class PhysicalDevice(): if self.zbxproxy: # Check if proxy or proxy group is defined if (self.zbxproxy["idtype"] in host and - host[self.zbxproxy["idtype"]] == self.zbxproxy["id"]): + host[self.zbxproxy["idtype"]] == self.zbxproxy["id"]): self.logger.debug(f"Host {self.name}: proxy in-sync.") # Backwards compatibility for Zabbix <= 6 elif "proxy_hostid" in host and host["proxy_hostid"] == self.zbxproxy["id"]: @@ -646,7 +648,7 @@ class PhysicalDevice(): else: self.logger.warning(f"Host {self.name}: inventory_mode OUT of sync.") self.updateZabbixHost(inventory_mode=str(self.inventory_mode)) - if config["inventory_sync"] and self.inventory_mode in [0,1]: + if config["inventory_sync"] and self.inventory_mode in [0, 1]: # Check host inventory mapping if host['inventory'] == self.inventory: self.logger.debug(f"Host {self.name}: inventory in-sync.") @@ -664,7 +666,7 @@ class PhysicalDevice(): if key in host["interfaces"][0]: # If SNMP is used, go through nested dict # to compare SNMP parameters - if isinstance(item,dict) and key == "details": + if isinstance(item, dict) and key == "details": for k, i in item.items(): if k in host["interfaces"][0][key]: # Set update if values don't match From 819126ce36b99fd119677bfe1e8c3c22942bfe30 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 28 Apr 2025 15:35:51 +0200 Subject: [PATCH 57/93] Added tests for config file, added logger for config file --- modules/config.py | 4 +- tests/test_configuration_parsing.py | 130 ++++++++++++++++++++++++++++ 2 files changed, 133 insertions(+), 1 deletion(-) create mode 100644 tests/test_configuration_parsing.py diff --git a/modules/config.py b/modules/config.py index 3adda8b..0919b01 100644 --- a/modules/config.py +++ b/modules/config.py @@ -6,6 +6,8 @@ from importlib import util from os import environ from logging import getLogger +logger = getLogger(__name__) + DEFAULT_CONFIG = { "templates_config_context": False, "templates_config_context_overrule": False, @@ -56,7 +58,7 @@ def load_config_file(config_default, config_file="config.py"): dconf[key] = getattr(config_module, key) return dconf else: - getLogger(__name__).warning( + logger.warning( "Config file %s not found. Using default config " "and environment variables.", config_file) return None diff --git a/tests/test_configuration_parsing.py b/tests/test_configuration_parsing.py new file mode 100644 index 0000000..070d3dd --- /dev/null +++ b/tests/test_configuration_parsing.py @@ -0,0 +1,130 @@ +"""Tests for configuration parsing in the modules.config module.""" +from unittest.mock import patch, MagicMock +from pathlib import Path +import os +from modules.config import load_config, DEFAULT_CONFIG, load_config_file, load_env_variable + + +def test_load_config_defaults(): + """Test that load_config returns default values when no config file or env vars are present""" + with patch('modules.config.load_config_file', return_value=DEFAULT_CONFIG.copy()), \ + patch('modules.config.load_env_variable', return_value=None): + config = load_config() + assert config == DEFAULT_CONFIG + assert config["templates_config_context"] is False + assert config["create_hostgroups"] is True + + +def test_load_config_file(): + """Test that load_config properly loads values from config file""" + mock_config = DEFAULT_CONFIG.copy() + mock_config["templates_config_context"] = True + mock_config["sync_vms"] = True + + with patch('modules.config.load_config_file', return_value=mock_config), \ + patch('modules.config.load_env_variable', return_value=None): + config = load_config() + assert config["templates_config_context"] is True + assert config["sync_vms"] is True + # Unchanged values should remain as defaults + assert config["create_journal"] is False + + +def test_load_env_variables(): + """Test that load_config properly loads values from environment variables""" + # Mock env variable loading to return values for specific keys + def mock_load_env(key): + if key == "sync_vms": + return True + if key == "create_journal": + return True + return None + + with patch('modules.config.load_config_file', return_value=DEFAULT_CONFIG.copy()), \ + patch('modules.config.load_env_variable', side_effect=mock_load_env): + config = load_config() + assert config["sync_vms"] is True + assert config["create_journal"] is True + # Unchanged values should remain as defaults + assert config["templates_config_context"] is False + + +def test_env_vars_override_config_file(): + """Test that environment variables override values from config file""" + mock_config = DEFAULT_CONFIG.copy() + mock_config["templates_config_context"] = True + mock_config["sync_vms"] = False + + # Mock env variable that will override the config file value + def mock_load_env(key): + if key == "sync_vms": + return True + return None + + with patch('modules.config.load_config_file', return_value=mock_config), \ + patch('modules.config.load_env_variable', side_effect=mock_load_env): + config = load_config() + # This should be overridden by the env var + assert config["sync_vms"] is True + # This should remain from the config file + assert config["templates_config_context"] is True + + +def test_load_config_file_function(): + """Test the load_config_file function directly""" + # Test when the file exists + with patch('pathlib.Path.exists', return_value=True), \ + patch('importlib.util.spec_from_file_location') as mock_spec: + # Setup the mock module with attributes + mock_module = MagicMock() + mock_module.templates_config_context = True + mock_module.sync_vms = True + + # Setup the mock spec + mock_spec_instance = MagicMock() + mock_spec.return_value = mock_spec_instance + mock_spec_instance.loader.exec_module = lambda x: None + + # Patch module_from_spec to return our mock module + with patch('importlib.util.module_from_spec', return_value=mock_module): + config = load_config_file(DEFAULT_CONFIG.copy()) + assert config["templates_config_context"] is True + assert config["sync_vms"] is True + + +def test_load_config_file_not_found(): + """Test load_config_file when the config file doesn't exist""" + # Instead of trying to assert on the logger call, we'll just check the return value + # and verify the function works as expected in this case + with patch('pathlib.Path.exists', return_value=False): + result = load_config_file(DEFAULT_CONFIG.copy()) + assert result is None + + +def test_load_env_variable_function(): + """Test the load_env_variable function directly""" + # Test when the environment variable exists + with patch.dict(os.environ, {"templates_config_context": "True"}): + value = load_env_variable("templates_config_context") + assert value == "True" + + # Test when the environment variable doesn't exist + with patch.dict(os.environ, {}, clear=True): + value = load_env_variable("nonexistent_variable") + assert value is None + + +def test_load_config_file_exception_handling(): + """Test that load_config_file handles exceptions gracefully""" + # This test requires modifying the load_config_file function to handle exceptions + # For now, we're just checking that an exception is raised + with patch('pathlib.Path.exists', return_value=True), \ + patch('importlib.util.spec_from_file_location', side_effect=Exception("Import error")): + # Since the current implementation doesn't handle exceptions, we should + # expect an exception to be raised + try: + result = load_config_file(DEFAULT_CONFIG.copy()) + assert False, "An exception should have been raised" + except Exception: + # This is expected + pass From 0c715d4f9647292fa257959a1af64113210204e5 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 28 Apr 2025 15:44:45 +0200 Subject: [PATCH 58/93] Fixed some basic Flake8 errors, added Pylinter exception, Fixed some minor logging bugs. --- modules/config.py | 7 +++---- modules/device.py | 10 ++++++---- tests/test_configuration_parsing.py | 21 ++++++++++----------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/modules/config.py b/modules/config.py index 0919b01..660bfec 100644 --- a/modules/config.py +++ b/modules/config.py @@ -57,8 +57,7 @@ def load_config_file(config_default, config_file="config.py"): if hasattr(config_module, key): dconf[key] = getattr(config_module, key) return dconf - else: - logger.warning( - "Config file %s not found. Using default config " - "and environment variables.", config_file) + logger.warning( + "Config file %s not found. Using default config " + "and environment variables.", config_file) return None diff --git a/modules/device.py b/modules/device.py index d410103..aa15a06 100644 --- a/modules/device.py +++ b/modules/device.py @@ -69,7 +69,7 @@ class PhysicalDevice(): if config["device_cf"] in self.nb.custom_fields: self.zabbix_id = self.nb.custom_fields[config["device_cf"]] else: - e = f"Host {self.name}: Custom field {config["device_cf"]} not present" + e = f'Host {self.name}: Custom field {config["device_cf"]} not present' self.logger.warning(e) raise SyncInventoryError(e) @@ -131,11 +131,13 @@ class PhysicalDevice(): # Set value to template return [device_type_cfs[config["template_cf"]]] # Custom field not found, return error - e = (f"Custom field {config["template_cf"]} not " + e = (f'Custom field {config["template_cf"]} not ' f"found for {self.nb.device_type.manufacturer.name}" f" - {self.nb.device_type.display}.") raise TemplateError(e) + + def get_templates_context(self): """ Get Zabbix templates from the device context """ if "zabbix" not in self.config_context: @@ -165,8 +167,8 @@ class PhysicalDevice(): elif config["inventory_mode"] == "automatic": self.inventory_mode = 1 else: - self.logger.error(f"Host {self.name}: Specified value for inventory mode in" - f" config is not valid. Got value {config["inventory_mode"]}") + self.logger.error(f"Host {self.name}: Specified value for inventory mode in " + f'config is not valid. Got value {config["inventory_mode"]}') return False self.inventory = {} if config["inventory_sync"] and self.inventory_mode in [0, 1]: diff --git a/tests/test_configuration_parsing.py b/tests/test_configuration_parsing.py index 070d3dd..4f97abf 100644 --- a/tests/test_configuration_parsing.py +++ b/tests/test_configuration_parsing.py @@ -1,6 +1,5 @@ """Tests for configuration parsing in the modules.config module.""" from unittest.mock import patch, MagicMock -from pathlib import Path import os from modules.config import load_config, DEFAULT_CONFIG, load_config_file, load_env_variable @@ -20,7 +19,7 @@ def test_load_config_file(): mock_config = DEFAULT_CONFIG.copy() mock_config["templates_config_context"] = True mock_config["sync_vms"] = True - + with patch('modules.config.load_config_file', return_value=mock_config), \ patch('modules.config.load_env_variable', return_value=None): config = load_config() @@ -39,7 +38,7 @@ def test_load_env_variables(): if key == "create_journal": return True return None - + with patch('modules.config.load_config_file', return_value=DEFAULT_CONFIG.copy()), \ patch('modules.config.load_env_variable', side_effect=mock_load_env): config = load_config() @@ -54,13 +53,13 @@ def test_env_vars_override_config_file(): mock_config = DEFAULT_CONFIG.copy() mock_config["templates_config_context"] = True mock_config["sync_vms"] = False - + # Mock env variable that will override the config file value def mock_load_env(key): if key == "sync_vms": return True return None - + with patch('modules.config.load_config_file', return_value=mock_config), \ patch('modules.config.load_env_variable', side_effect=mock_load_env): config = load_config() @@ -79,12 +78,12 @@ def test_load_config_file_function(): mock_module = MagicMock() mock_module.templates_config_context = True mock_module.sync_vms = True - + # Setup the mock spec mock_spec_instance = MagicMock() mock_spec.return_value = mock_spec_instance mock_spec_instance.loader.exec_module = lambda x: None - + # Patch module_from_spec to return our mock module with patch('importlib.util.module_from_spec', return_value=mock_module): config = load_config_file(DEFAULT_CONFIG.copy()) @@ -94,7 +93,7 @@ def test_load_config_file_function(): def test_load_config_file_not_found(): """Test load_config_file when the config file doesn't exist""" - # Instead of trying to assert on the logger call, we'll just check the return value + # Instead of trying to assert on the logger call, we'll just check the return value # and verify the function works as expected in this case with patch('pathlib.Path.exists', return_value=False): result = load_config_file(DEFAULT_CONFIG.copy()) @@ -107,7 +106,7 @@ def test_load_env_variable_function(): with patch.dict(os.environ, {"templates_config_context": "True"}): value = load_env_variable("templates_config_context") assert value == "True" - + # Test when the environment variable doesn't exist with patch.dict(os.environ, {}, clear=True): value = load_env_variable("nonexistent_variable") @@ -123,8 +122,8 @@ def test_load_config_file_exception_handling(): # Since the current implementation doesn't handle exceptions, we should # expect an exception to be raised try: - result = load_config_file(DEFAULT_CONFIG.copy()) + load_config_file(DEFAULT_CONFIG.copy()) assert False, "An exception should have been raised" - except Exception: + except Exception: # pylint: disable=broad-except # This is expected pass From 68cf28565d4c56f684f44421f8c78cc57b57e6bc Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 28 Apr 2025 15:47:37 +0200 Subject: [PATCH 59/93] Fixed some pipeline stuff --- .github/workflows/publish-image.yml | 3 --- .github/workflows/quality.yml | 7 ++++--- .github/workflows/run_tests.yml | 2 +- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/.github/workflows/publish-image.yml b/.github/workflows/publish-image.yml index e9e6421..e6d07fe 100644 --- a/.github/workflows/publish-image.yml +++ b/.github/workflows/publish-image.yml @@ -4,9 +4,6 @@ on: push: branches: - main - - dockertest -# tags: -# - [0-9]+.* env: REGISTRY: ghcr.io diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml index 7b01f6f..745f948 100644 --- a/.github/workflows/quality.yml +++ b/.github/workflows/quality.yml @@ -1,11 +1,12 @@ --- name: Pylint Quality control -on: - workflow_call +on: + push: + pull_request: jobs: - build: + python_quality_testing: runs-on: ubuntu-latest strategy: matrix: diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index cae8b02..6413d9c 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -17,7 +17,7 @@ jobs: python -m pip install --upgrade pip pip install pytest pytest-mock pip install -r requirements.txt - - name: Analysing the code with pylint + - name: Testing the code with PyTest run: | cp config.py.example config.py pytest tests From 772fef093089322e748472ac80d2aca37657123d Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 28 Apr 2025 15:57:11 +0200 Subject: [PATCH 60/93] Added prefix for environment variables --- modules/config.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/config.py b/modules/config.py index 660bfec..3c74216 100644 --- a/modules/config.py +++ b/modules/config.py @@ -36,6 +36,8 @@ def load_config(): def load_env_variable(config_environvar): """Returns config from environment variable""" + prefix = "NZS_" + config_environvar = prefix + config_environvar.upper() if config_environvar in environ: return environ[config_environvar] return None From 98edf0ad9987b9ad8da76d2382431bbac8f80d81 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 28 Apr 2025 17:23:51 +0200 Subject: [PATCH 61/93] Adjusted ENV prefix, fixed several linter errors in new tests --- .gitignore | 3 +- modules/config.py | 5 +- tests/test_configuration_parsing.py | 19 +- tests/test_device_deletion.py | 264 +++++++++++--------- tests/test_interface.py | 247 ++++++++++++++++++ tests/test_physical_device.py | 373 ++++++++++++++++++++++++++++ 6 files changed, 784 insertions(+), 127 deletions(-) create mode 100644 tests/test_interface.py create mode 100644 tests/test_physical_device.py diff --git a/.gitignore b/.gitignore index 5fdbd95..27761cd 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,5 @@ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] -.vscode \ No newline at end of file +.vscode +.flake \ No newline at end of file diff --git a/modules/config.py b/modules/config.py index 3c74216..3eca1d8 100644 --- a/modules/config.py +++ b/modules/config.py @@ -18,7 +18,8 @@ DEFAULT_CONFIG = { "create_journal": False, "sync_vms": False, "zabbix_device_removal": ["Decommissioning", "Inventory"], - "zabbix_device_disable": ["Offline", "Planned", "Staged", "Failed"] + "zabbix_device_disable": ["Offline", "Planned", "Staged", "Failed"], + "inventory_mode": "disabled" } @@ -36,7 +37,7 @@ def load_config(): def load_env_variable(config_environvar): """Returns config from environment variable""" - prefix = "NZS_" + prefix = "NBZX_" config_environvar = prefix + config_environvar.upper() if config_environvar in environ: return environ[config_environvar] diff --git a/tests/test_configuration_parsing.py b/tests/test_configuration_parsing.py index 4f97abf..23438b4 100644 --- a/tests/test_configuration_parsing.py +++ b/tests/test_configuration_parsing.py @@ -102,15 +102,26 @@ def test_load_config_file_not_found(): def test_load_env_variable_function(): """Test the load_env_variable function directly""" - # Test when the environment variable exists - with patch.dict(os.environ, {"templates_config_context": "True"}): + # Create a real environment variable for testing with correct prefix and uppercase + test_var = "NBZX_TEMPLATES_CONFIG_CONTEXT" + original_env = os.environ.get(test_var, None) + try: + # Set the environment variable with the proper prefix and case + os.environ[test_var] = "True" + + # Test that it's properly read (using lowercase in the function call) value = load_env_variable("templates_config_context") assert value == "True" - # Test when the environment variable doesn't exist - with patch.dict(os.environ, {}, clear=True): + # Test when the environment variable doesn't exist value = load_env_variable("nonexistent_variable") assert value is None + finally: + # Clean up - restore original environment + if original_env is not None: + os.environ[test_var] = original_env + else: + os.environ.pop(test_var, None) def test_load_config_file_exception_handling(): diff --git a/tests/test_device_deletion.py b/tests/test_device_deletion.py index 41c4420..392ba1a 100644 --- a/tests/test_device_deletion.py +++ b/tests/test_device_deletion.py @@ -1,142 +1,166 @@ -"""Testing device creation""" -from unittest.mock import MagicMock, patch, call +"""Tests for device deletion functionality in the PhysicalDevice class.""" +import unittest +from unittest.mock import MagicMock, patch +from zabbix_utils import APIRequestError from modules.device import PhysicalDevice -from modules.config import load_config - -config = load_config() +from modules.exceptions import SyncExternalError -def mock_nb_device(): - """Function to mock Netbox device""" - mock = MagicMock() - mock.id = 1 - mock.url = "http://netbox:8000/api/dcim/devices/1/" - mock.display_url = "http://netbox:8000/dcim/devices/1/" - mock.display = "SW01" - mock.name = "SW01" +class TestDeviceDeletion(unittest.TestCase): + """Test class for device deletion functionality.""" - mock.device_type = MagicMock() - mock.device_type.display = "Catalyst 3750G-48TS-S" - mock.device_type.manufacturer = MagicMock() - mock.device_type.manufacturer.display = "Cisco" - mock.device_type.manufacturer.name = "Cisco" - mock.device_type.manufacturer.slug = "cisco" - mock.device_type.manufacturer.description = "" - mock.device_type.model = "Catalyst 3750G-48TS-S" - mock.device_type.slug = "cisco-ws-c3750g-48ts-s" + def setUp(self): + """Set up test fixtures.""" + # Create mock NetBox device + self.mock_nb_device = MagicMock() + self.mock_nb_device.id = 123 + self.mock_nb_device.name = "test-device" + self.mock_nb_device.status.label = "Decommissioning" + self.mock_nb_device.custom_fields = {"zabbix_hostid": "456"} + self.mock_nb_device.config_context = {} - mock.role = MagicMock() - mock.role.id = 1 - mock.role.display = "Switch" - mock.role.name = "Switch" - mock.role.slug = "switch" + # Set up a primary IP + primary_ip = MagicMock() + primary_ip.address = "192.168.1.1/24" + self.mock_nb_device.primary_ip = primary_ip - mock.tenant = None - mock.platform = None - mock.serial = "0031876" - mock.asset_tag = None + # Create mock Zabbix API + self.mock_zabbix = MagicMock() + self.mock_zabbix.version = "6.0" - mock.site = MagicMock() - mock.site.display = "AMS01" - mock.site.name = "AMS01" - mock.site.slug = "ams01" + # Set up mock host.get response + self.mock_zabbix.host.get.return_value = [{"hostid": "456"}] - mock.location = None - mock.rack = None - mock.position = None - mock.face = None - mock.parent_device = None + # Mock NetBox journal class + self.mock_nb_journal = MagicMock() - mock.status = MagicMock() - mock.status.value = "decommissioning" - mock.status.label = "Decommissioning" + # Create logger mock + self.mock_logger = MagicMock() - mock.cluster = None - mock.virtual_chassis = None - mock.vc_position = None - mock.vc_priority = None - mock.description = "" - mock.comments = "" - mock.config_template = None - mock.config_context = {} - mock.local_context_data = None + # Create PhysicalDevice instance with mocks + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + self.device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + journal=True, + logger=self.mock_logger + ) - mock.custom_fields = {"zabbix_hostid": 1956} - return mock + def test_cleanup_successful_deletion(self): + """Test successful device deletion from Zabbix.""" + # Setup + self.mock_zabbix.host.get.return_value = [{"hostid": "456"}] + self.mock_zabbix.host.delete.return_value = {"hostids": ["456"]} + # Execute + self.device.cleanup() -def mock_zabbix(): - """Function to mock Zabbix""" - mock = MagicMock() - mock.host.get.return_value = [{}] - mock.host.delete.return_value = True - return mock + # Verify + self.mock_zabbix.host.get.assert_called_once_with(filter={'hostid': '456'}, output=[]) + self.mock_zabbix.host.delete.assert_called_once_with('456') + self.mock_nb_device.save.assert_called_once() + self.assertIsNone(self.mock_nb_device.custom_fields["zabbix_hostid"]) + self.mock_logger.info.assert_called_with(f"Host {self.device.name}: " + "Deleted host from Zabbix.") + def test_cleanup_device_already_deleted(self): + """Test cleanup when device is already deleted from Zabbix.""" + # Setup + self.mock_zabbix.host.get.return_value = [] # Empty list means host not found -netbox_journals = MagicMock() -NB_VERSION = '4.2' -create_journal = MagicMock() -logger = MagicMock() + # Execute + self.device.cleanup() + # Verify + self.mock_zabbix.host.get.assert_called_once_with(filter={'hostid': '456'}, output=[]) + self.mock_zabbix.host.delete.assert_not_called() + self.mock_nb_device.save.assert_called_once() + self.assertIsNone(self.mock_nb_device.custom_fields["zabbix_hostid"]) + self.mock_logger.info.assert_called_with( + f"Host {self.device.name}: was already deleted from Zabbix. Removed link in NetBox.") -def test_check_cluster_status(): - """Checks if the isCluster function is functioning properly""" - nb_device = mock_nb_device() - zabbix = mock_zabbix() - device = PhysicalDevice(nb_device, zabbix, None, None, - None, logger) - assert device.isCluster() is False + def test_cleanup_api_error(self): + """Test cleanup when Zabbix API returns an error.""" + # Setup + self.mock_zabbix.host.get.return_value = [{"hostid": "456"}] + self.mock_zabbix.host.delete.side_effect = APIRequestError("API Error") + # Execute and verify + with self.assertRaises(SyncExternalError): + self.device.cleanup() -def test_device_deletion_host_exists(): - """Checks device deletion process""" - nb_device = mock_nb_device() - zabbix = mock_zabbix() - with patch.object(PhysicalDevice, 'create_journal_entry') as mock_journal: - # Create device - device = PhysicalDevice(nb_device, zabbix, netbox_journals, NB_VERSION, - create_journal, logger) - device.cleanup() - # Check if Zabbix HostID is empty - assert device.nb.custom_fields[config["device_cf"]] is None - # Check if API calls are executed - device.zabbix.host.get.assert_called_once_with(filter={'hostid': 1956}, - output=[]) - device.zabbix.host.delete.assert_called_once_with(1956) - # check logger - mock_journal.assert_called_once_with("warning", - "Deleted host from Zabbix") - device.logger.info.assert_called_once_with("Host SW01: Deleted " - "host from Zabbix.") + # Verify correct calls were made + self.mock_zabbix.host.get.assert_called_once_with(filter={'hostid': '456'}, output=[]) + self.mock_zabbix.host.delete.assert_called_once_with('456') + self.mock_nb_device.save.assert_not_called() + self.mock_logger.error.assert_called() + def test_zeroize_cf(self): + """Test _zeroize_cf method that clears the custom field.""" + # Execute + self.device._zeroize_cf() # pylint: disable=protected-access -def test_device_deletion_host_not_exists(): - """ - Test if device in Netbox gets unlinked - when host is not present in Zabbix - """ - nb_device = mock_nb_device() - zabbix = mock_zabbix() - zabbix.host.get.return_value = None + # Verify + self.assertIsNone(self.mock_nb_device.custom_fields["zabbix_hostid"]) + self.mock_nb_device.save.assert_called_once() - with patch.object(PhysicalDevice, 'create_journal_entry') as mock_journal: - # Create new device - device = PhysicalDevice(nb_device, zabbix, netbox_journals, NB_VERSION, - create_journal, logger) - # Try to clean the device up in Zabbix - device.cleanup() - # Confirm that a call was issued to Zabbix to check if the host exists - device.zabbix.host.get.assert_called_once_with(filter={'hostid': 1956}, - output=[]) - # Confirm that no device was deleted in Zabbix - device.zabbix.host.delete.assert_not_called() - # Test logging - log_calls = [ - call('Host SW01: Deleted host from Zabbix.'), - call('Host SW01: was already deleted from Zabbix. ' - 'Removed link in NetBox.') - ] - logger.info.assert_has_calls(log_calls) - assert logger.info.call_count == 2 - mock_journal.assert_called_once_with("warning", - "Deleted host from Zabbix") + def test_create_journal_entry(self): + """Test create_journal_entry method.""" + # Setup + test_message = "Test journal entry" + + # Execute + result = self.device.create_journal_entry("info", test_message) + + # Verify + self.assertTrue(result) + self.mock_nb_journal.create.assert_called_once() + journal_entry = self.mock_nb_journal.create.call_args[0][0] + self.assertEqual(journal_entry["assigned_object_type"], "dcim.device") + self.assertEqual(journal_entry["assigned_object_id"], 123) + self.assertEqual(journal_entry["kind"], "info") + self.assertEqual(journal_entry["comments"], test_message) + + def test_create_journal_entry_invalid_severity(self): + """Test create_journal_entry with invalid severity.""" + # Execute + result = self.device.create_journal_entry("invalid", "Test message") + + # Verify + self.assertFalse(result) + self.mock_nb_journal.create.assert_not_called() + self.mock_logger.warning.assert_called() + + def test_create_journal_entry_when_disabled(self): + """Test create_journal_entry when journaling is disabled.""" + # Setup - create device with journal=False + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + journal=False, # Disable journaling + logger=self.mock_logger + ) + + # Execute + result = device.create_journal_entry("info", "Test message") + + # Verify + self.assertFalse(result) + self.mock_nb_journal.create.assert_not_called() + + def test_cleanup_updates_journal(self): + """Test that cleanup method creates a journal entry.""" + # Setup + self.mock_zabbix.host.get.return_value = [{"hostid": "456"}] + + # Execute + with patch.object(self.device, 'create_journal_entry') as mock_journal_entry: + self.device.cleanup() + + # Verify + mock_journal_entry.assert_called_once_with("warning", "Deleted host from Zabbix") diff --git a/tests/test_interface.py b/tests/test_interface.py new file mode 100644 index 0000000..ff55218 --- /dev/null +++ b/tests/test_interface.py @@ -0,0 +1,247 @@ +"""Tests for the ZabbixInterface class in the interface module.""" +import unittest +from modules.interface import ZabbixInterface +from modules.exceptions import InterfaceConfigError + + +class TestZabbixInterface(unittest.TestCase): + """Test class for ZabbixInterface functionality.""" + + def setUp(self): + """Set up test fixtures.""" + self.test_ip = "192.168.1.1" + self.empty_context = {} + self.default_interface = ZabbixInterface(self.empty_context, self.test_ip) + + # Create some test contexts for different scenarios + self.snmpv2_context = { + "zabbix": { + "interface_type": 2, + "interface_port": "161", + "snmp": { + "version": 2, + "community": "public", + "bulk": 1 + } + } + } + + self.snmpv3_context = { + "zabbix": { + "interface_type": 2, + "snmp": { + "version": 3, + "securityname": "snmpuser", + "securitylevel": "authPriv", + "authprotocol": "SHA", + "authpassphrase": "authpass123", + "privprotocol": "AES", + "privpassphrase": "privpass123", + "contextname": "context1" + } + } + } + + self.agent_context = { + "zabbix": { + "interface_type": 1, + "interface_port": "10050" + } + } + + def test_init(self): + """Test initialization of ZabbixInterface.""" + interface = ZabbixInterface(self.empty_context, self.test_ip) + + # Check basic properties + self.assertEqual(interface.ip, self.test_ip) + self.assertEqual(interface.context, self.empty_context) + self.assertEqual(interface.interface["ip"], self.test_ip) + self.assertEqual(interface.interface["main"], "1") + self.assertEqual(interface.interface["useip"], "1") + self.assertEqual(interface.interface["dns"], "") + + def test_get_context_empty(self): + """Test get_context with empty context.""" + interface = ZabbixInterface(self.empty_context, self.test_ip) + result = interface.get_context() + self.assertFalse(result) + + def test_get_context_with_interface_type(self): + """Test get_context with interface_type but no port.""" + context = {"zabbix": {"interface_type": 2}} + interface = ZabbixInterface(context, self.test_ip) + + # Should set type and default port + result = interface.get_context() + self.assertTrue(result) + self.assertEqual(interface.interface["type"], 2) + self.assertEqual(interface.interface["port"], "161") # Default port for SNMP + + def test_get_context_with_interface_type_and_port(self): + """Test get_context with both interface_type and port.""" + context = {"zabbix": {"interface_type": 1, "interface_port": "12345"}} + interface = ZabbixInterface(context, self.test_ip) + + # Should set type and specified port + result = interface.get_context() + self.assertTrue(result) + self.assertEqual(interface.interface["type"], 1) + self.assertEqual(interface.interface["port"], "12345") + + def test_set_default_port(self): + """Test _set_default_port for different interface types.""" + interface = ZabbixInterface(self.empty_context, self.test_ip) + + # Test for agent type (1) + interface.interface["type"] = 1 + interface._set_default_port() # pylint: disable=protected-access + self.assertEqual(interface.interface["port"], "10050") + + # Test for SNMP type (2) + interface.interface["type"] = 2 + interface._set_default_port() # pylint: disable=protected-access + self.assertEqual(interface.interface["port"], "161") + + # Test for IPMI type (3) + interface.interface["type"] = 3 + interface._set_default_port() # pylint: disable=protected-access + self.assertEqual(interface.interface["port"], "623") + + # Test for JMX type (4) + interface.interface["type"] = 4 + interface._set_default_port() # pylint: disable=protected-access + self.assertEqual(interface.interface["port"], "12345") + + # Test for unsupported type + interface.interface["type"] = 99 + result = interface._set_default_port() # pylint: disable=protected-access + self.assertFalse(result) + + def test_set_snmp_v2(self): + """Test set_snmp with SNMPv2 configuration.""" + interface = ZabbixInterface(self.snmpv2_context, self.test_ip) + interface.get_context() # Set the interface type + + # Call set_snmp + interface.set_snmp() + + # Check SNMP details + self.assertEqual(interface.interface["details"]["version"], "2") + self.assertEqual(interface.interface["details"]["community"], "public") + self.assertEqual(interface.interface["details"]["bulk"], "1") + + def test_set_snmp_v3(self): + """Test set_snmp with SNMPv3 configuration.""" + interface = ZabbixInterface(self.snmpv3_context, self.test_ip) + interface.get_context() # Set the interface type + + # Call set_snmp + interface.set_snmp() + + # Check SNMP details + self.assertEqual(interface.interface["details"]["version"], "3") + self.assertEqual(interface.interface["details"]["securityname"], "snmpuser") + self.assertEqual(interface.interface["details"]["securitylevel"], "authPriv") + self.assertEqual(interface.interface["details"]["authprotocol"], "SHA") + self.assertEqual(interface.interface["details"]["authpassphrase"], "authpass123") + self.assertEqual(interface.interface["details"]["privprotocol"], "AES") + self.assertEqual(interface.interface["details"]["privpassphrase"], "privpass123") + self.assertEqual(interface.interface["details"]["contextname"], "context1") + + def test_set_snmp_no_snmp_config(self): + """Test set_snmp with missing SNMP configuration.""" + # Create context with interface type but no SNMP config + context = {"zabbix": {"interface_type": 2}} + interface = ZabbixInterface(context, self.test_ip) + interface.get_context() # Set the interface type + + # Call set_snmp - should raise exception + with self.assertRaises(InterfaceConfigError): + interface.set_snmp() + + def test_set_snmp_unsupported_version(self): + """Test set_snmp with unsupported SNMP version.""" + # Create context with invalid SNMP version + context = { + "zabbix": { + "interface_type": 2, + "snmp": { + "version": 4 # Invalid version + } + } + } + interface = ZabbixInterface(context, self.test_ip) + interface.get_context() # Set the interface type + + # Call set_snmp - should raise exception + with self.assertRaises(InterfaceConfigError): + interface.set_snmp() + + def test_set_snmp_no_version(self): + """Test set_snmp with missing SNMP version.""" + # Create context without SNMP version + context = { + "zabbix": { + "interface_type": 2, + "snmp": { + "community": "public" # No version specified + } + } + } + interface = ZabbixInterface(context, self.test_ip) + interface.get_context() # Set the interface type + + # Call set_snmp - should raise exception + with self.assertRaises(InterfaceConfigError): + interface.set_snmp() + + def test_set_snmp_non_snmp_interface(self): + """Test set_snmp with non-SNMP interface type.""" + interface = ZabbixInterface(self.agent_context, self.test_ip) + interface.get_context() # Set the interface type + + # Call set_snmp - should raise exception + with self.assertRaises(InterfaceConfigError): + interface.set_snmp() + + def test_set_default_snmp(self): + """Test set_default_snmp method.""" + interface = ZabbixInterface(self.empty_context, self.test_ip) + interface.set_default_snmp() + + # Check interface properties + self.assertEqual(interface.interface["type"], "2") + self.assertEqual(interface.interface["port"], "161") + self.assertEqual(interface.interface["details"]["version"], "2") + self.assertEqual(interface.interface["details"]["community"], "{$SNMP_COMMUNITY}") + self.assertEqual(interface.interface["details"]["bulk"], "1") + + def test_set_default_agent(self): + """Test set_default_agent method.""" + interface = ZabbixInterface(self.empty_context, self.test_ip) + interface.set_default_agent() + + # Check interface properties + self.assertEqual(interface.interface["type"], "1") + self.assertEqual(interface.interface["port"], "10050") + + def test_snmpv2_no_community(self): + """Test SNMPv2 with no community string specified.""" + # Create context with SNMPv2 but no community + context = { + "zabbix": { + "interface_type": 2, + "snmp": { + "version": 2 + } + } + } + interface = ZabbixInterface(context, self.test_ip) + interface.get_context() # Set the interface type + + # Call set_snmp + interface.set_snmp() + + # Should use default community string + self.assertEqual(interface.interface["details"]["community"], "{$SNMP_COMMUNITY}") diff --git a/tests/test_physical_device.py b/tests/test_physical_device.py new file mode 100644 index 0000000..4fe8ce3 --- /dev/null +++ b/tests/test_physical_device.py @@ -0,0 +1,373 @@ +"""Tests for the PhysicalDevice class in the device module.""" +import unittest +from unittest.mock import MagicMock, patch +from modules.device import PhysicalDevice +from modules.exceptions import TemplateError, SyncInventoryError + + +class TestPhysicalDevice(unittest.TestCase): + """Test class for PhysicalDevice functionality.""" + + def setUp(self): + """Set up test fixtures.""" + # Create mock NetBox device + self.mock_nb_device = MagicMock() + self.mock_nb_device.id = 123 + self.mock_nb_device.name = "test-device" + self.mock_nb_device.status.label = "Active" + self.mock_nb_device.custom_fields = {"zabbix_hostid": None} + self.mock_nb_device.config_context = {} + + # Set up a primary IP + primary_ip = MagicMock() + primary_ip.address = "192.168.1.1/24" + self.mock_nb_device.primary_ip = primary_ip + + # Create mock Zabbix API + self.mock_zabbix = MagicMock() + self.mock_zabbix.version = "6.0" + + # Mock NetBox journal class + self.mock_nb_journal = MagicMock() + + # Create logger mock + self.mock_logger = MagicMock() + + # Create PhysicalDevice instance with mocks + with patch('modules.device.config', + {"device_cf": "zabbix_hostid", + "template_cf": "zabbix_template", + "templates_config_context": False, + "templates_config_context_overrule": False, + "traverse_regions": False, + "traverse_site_groups": False, + "inventory_mode": "disabled", + "inventory_sync": False, + "inventory_map": {} + }): + self.device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + journal=True, + logger=self.mock_logger + ) + + def test_init(self): + """Test the initialization of the PhysicalDevice class.""" + # Check that basic properties are set correctly + self.assertEqual(self.device.name, "test-device") + self.assertEqual(self.device.id, 123) + self.assertEqual(self.device.status, "Active") + self.assertEqual(self.device.ip, "192.168.1.1") + self.assertEqual(self.device.cidr, "192.168.1.1/24") + + def test_init_no_primary_ip(self): + """Test initialization when device has no primary IP.""" + # Set primary_ip to None + self.mock_nb_device.primary_ip = None + + # Creating device should raise SyncInventoryError + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + with self.assertRaises(SyncInventoryError): + PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + def test_set_basics_with_special_characters(self): + """Test _setBasics when device name contains special characters.""" + # Set name with special characters that + # will actually trigger the special character detection + self.mock_nb_device.name = "test-devïce" + + # We need to patch the search function to simulate finding special characters + with patch('modules.device.search') as mock_search, \ + patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + # Make the search function return True to simulate special characters + mock_search.return_value = True + + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # With the mocked search function, the name should be changed to NETBOX_ID format + self.assertEqual(device.name, f"NETBOX_ID{self.mock_nb_device.id}") + # And visible_name should be set to the original name + self.assertEqual(device.visible_name, "test-devïce") + # use_visible_name flag should be set + self.assertTrue(device.use_visible_name) + + def test_get_templates_context(self): + """Test get_templates_context with valid config.""" + # Set up config_context with valid template data + self.mock_nb_device.config_context = { + "zabbix": { + "templates": ["Template1", "Template2"] + } + } + + # Create device with the updated mock + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Test that templates are returned correctly + templates = device.get_templates_context() + self.assertEqual(templates, ["Template1", "Template2"]) + + def test_get_templates_context_with_string(self): + """Test get_templates_context with a string instead of list.""" + # Set up config_context with a string template + self.mock_nb_device.config_context = { + "zabbix": { + "templates": "Template1" + } + } + + # Create device with the updated mock + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Test that template is wrapped in a list + templates = device.get_templates_context() + self.assertEqual(templates, ["Template1"]) + + def test_get_templates_context_no_zabbix_key(self): + """Test get_templates_context when zabbix key is missing.""" + # Set up config_context without zabbix key + self.mock_nb_device.config_context = {} + + # Create device with the updated mock + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Test that TemplateError is raised + with self.assertRaises(TemplateError): + device.get_templates_context() + + def test_get_templates_context_no_templates_key(self): + """Test get_templates_context when templates key is missing.""" + # Set up config_context without templates key + self.mock_nb_device.config_context = {"zabbix": {}} + + # Create device with the updated mock + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Test that TemplateError is raised + with self.assertRaises(TemplateError): + device.get_templates_context() + + def test_set_template_with_config_context(self): + """Test set_template with templates_config_context=True.""" + # Set up config_context with templates + self.mock_nb_device.config_context = { + "zabbix": { + "templates": ["Template1"] + } + } + + # Mock get_templates_context to return expected templates + with patch.object(PhysicalDevice, 'get_templates_context', return_value=["Template1"]): + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Call set_template with prefer_config_context=True + result = device.set_template(prefer_config_context=True, overrule_custom=False) + + # Check result and template names + self.assertTrue(result) + self.assertEqual(device.zbx_template_names, ["Template1"]) + + def test_set_inventory_disabled_mode(self): + """Test set_inventory with inventory_mode=disabled.""" + # Configure with disabled inventory mode + config_patch = { + "device_cf": "zabbix_hostid", + "inventory_mode": "disabled", + "inventory_sync": False + } + + with patch('modules.device.config', config_patch): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Call set_inventory with the config patch still active + with patch('modules.device.config', config_patch): + result = device.set_inventory({}) + + # Check result + self.assertTrue(result) + # Default value for disabled inventory + self.assertEqual(device.inventory_mode, -1) + + def test_set_inventory_manual_mode(self): + """Test set_inventory with inventory_mode=manual.""" + # Configure with manual inventory mode + config_patch = { + "device_cf": "zabbix_hostid", + "inventory_mode": "manual", + "inventory_sync": False + } + + with patch('modules.device.config', config_patch): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Call set_inventory with the config patch still active + with patch('modules.device.config', config_patch): + result = device.set_inventory({}) + + # Check result + self.assertTrue(result) + self.assertEqual(device.inventory_mode, 0) # Manual mode + + def test_set_inventory_automatic_mode(self): + """Test set_inventory with inventory_mode=automatic.""" + # Configure with automatic inventory mode + config_patch = { + "device_cf": "zabbix_hostid", + "inventory_mode": "automatic", + "inventory_sync": False + } + + with patch('modules.device.config', config_patch): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Call set_inventory with the config patch still active + with patch('modules.device.config', config_patch): + result = device.set_inventory({}) + + # Check result + self.assertTrue(result) + self.assertEqual(device.inventory_mode, 1) # Automatic mode + + def test_set_inventory_with_inventory_sync(self): + """Test set_inventory with inventory_sync=True.""" + # Configure with inventory sync enabled + config_patch = { + "device_cf": "zabbix_hostid", + "inventory_mode": "manual", + "inventory_sync": True, + "inventory_map": { + "name": "name", + "serial": "serialno_a" + } + } + + with patch('modules.device.config', config_patch): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Create a mock device with the required attributes + mock_device_data = { + "name": "test-device", + "serial": "ABC123" + } + + # Call set_inventory with the config patch still active + with patch('modules.device.config', config_patch): + result = device.set_inventory(mock_device_data) + + # Check result + self.assertTrue(result) + self.assertEqual(device.inventory_mode, 0) # Manual mode + self.assertEqual(device.inventory, { + "name": "test-device", + "serialno_a": "ABC123" + }) + + def test_iscluster_true(self): + """Test isCluster when device is part of a cluster.""" + # Set up virtual_chassis + self.mock_nb_device.virtual_chassis = MagicMock() + + # Create device with the updated mock + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Check isCluster result + self.assertTrue(device.isCluster()) + + def test_is_cluster_false(self): + """Test isCluster when device is not part of a cluster.""" + # Set virtual_chassis to None + self.mock_nb_device.virtual_chassis = None + + # Create device with the updated mock + with patch('modules.device.config', {"device_cf": "zabbix_hostid"}): + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Check isCluster result + self.assertFalse(device.isCluster()) From d60eb1cb2dfb6fdf3971290da9eed9c2f033e768 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 28 Apr 2025 22:18:59 +0200 Subject: [PATCH 62/93] Removed python test files for linter. --- .github/workflows/quality.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml index 745f948..4421765 100644 --- a/.github/workflows/quality.yml +++ b/.github/workflows/quality.yml @@ -24,4 +24,4 @@ jobs: pip install -r requirements.txt - name: Analysing the code with pylint run: | - pylint --module-naming-style=any $(git ls-files '*.py') + pylint --module-naming-style=any modules/* netbox_zabbix_sync.py From 2998dfde549a8f9b6ec11e286d94df458287c19d Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 28 Apr 2025 22:21:30 +0200 Subject: [PATCH 63/93] Specifiek Python version in pipeline test step --- .github/workflows/run_tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 6413d9c..9093c96 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -12,6 +12,8 @@ jobs: - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 + with: + python-version: 3.12 - name: Install dependencies run: | python -m pip install --upgrade pip From bbe28d97053aff1940e3b2e76524dec92a9c9ad2 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 28 Apr 2025 22:31:36 +0200 Subject: [PATCH 64/93] Added all default config statements and added warning to any curious users. --- modules/config.py | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/modules/config.py b/modules/config.py index 3eca1d8..6528d67 100644 --- a/modules/config.py +++ b/modules/config.py @@ -8,6 +8,8 @@ from logging import getLogger logger = getLogger(__name__) +# PLEASE NOTE: This is a sample config file. You should create your own config.py + DEFAULT_CONFIG = { "templates_config_context": False, "templates_config_context_overrule": False, @@ -17,9 +19,32 @@ DEFAULT_CONFIG = { "create_hostgroups": True, "create_journal": False, "sync_vms": False, + "vm_hostgroup_format": "cluster_type/cluster/role", + "full_proxy_sync": False, "zabbix_device_removal": ["Decommissioning", "Inventory"], "zabbix_device_disable": ["Offline", "Planned", "Staged", "Failed"], - "inventory_mode": "disabled" + "hostgroup_format": "site/manufacturer/role", + "traverse_regions": False, + "traverse_site_groups": False, + "nb_device_filter": {"name__n": "null"}, + "nb_vm_filter": {"name__n": "null"}, + "inventory_mode": "disabled", + "inventory_sync": False, + "inventory_map": { + "asset_tag": "asset_tag", + "virtual_chassis/name": "chassis", + "status/label": "deployment_status", + "location/name": "location", + "latitude": "location_lat", + "longitude": "location_lon", + "comments": "notes", + "name": "name", + "rack/name": "site_rack", + "serial": "serialno_a", + "device_type/model": "type", + "device_type/manufacturer/name": "vendor", + "oob_ip/address": "oob_ip" + } } From 539ad64c8d517a9981a1fd294d0419692ddb744a Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 28 Apr 2025 22:49:04 +0200 Subject: [PATCH 65/93] Adds 2 new tests for virtual chassis --- tests/test_physical_device.py | 56 +++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/tests/test_physical_device.py b/tests/test_physical_device.py index 4fe8ce3..d0ba43b 100644 --- a/tests/test_physical_device.py +++ b/tests/test_physical_device.py @@ -371,3 +371,59 @@ class TestPhysicalDevice(unittest.TestCase): # Check isCluster result self.assertFalse(device.isCluster()) + + + def test_promote_master_device_primary(self): + """Test promoteMasterDevice when device is primary in cluster.""" + # Set up virtual chassis with master device + mock_vc = MagicMock() + mock_vc.name = "virtual-chassis-1" + mock_master = MagicMock() + mock_master.id = self.mock_nb_device.id # Set master ID to match the current device + mock_vc.master = mock_master + self.mock_nb_device.virtual_chassis = mock_vc + + # Create device with the updated mock + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Call promoteMasterDevice and check the result + result = device.promoteMasterDevice() + + # Should return True for primary device + self.assertTrue(result) + # Device name should be updated to virtual chassis name + self.assertEqual(device.name, "virtual-chassis-1") + + + def test_promote_master_device_secondary(self): + """Test promoteMasterDevice when device is secondary in cluster.""" + # Set up virtual chassis with a different master device + mock_vc = MagicMock() + mock_vc.name = "virtual-chassis-1" + mock_master = MagicMock() + mock_master.id = self.mock_nb_device.id + 1 # Different ID than the current device + mock_vc.master = mock_master + self.mock_nb_device.virtual_chassis = mock_vc + + # Create device with the updated mock + device = PhysicalDevice( + self.mock_nb_device, + self.mock_zabbix, + self.mock_nb_journal, + "3.0", + logger=self.mock_logger + ) + + # Call promoteMasterDevice and check the result + result = device.promoteMasterDevice() + + # Should return False for secondary device + self.assertFalse(result) + # Device name should not be modified + self.assertEqual(device.name, "test-device") From bc53737e02017a972cdbe01085128ce0a5ee7a29 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 4 Jun 2025 14:23:01 +0200 Subject: [PATCH 66/93] first supoport of multiple hostgroups --- modules/.device.py.swp | Bin 0 -> 57344 bytes modules/device.py | 92 ++++++++++++++++++++++--------------- modules/hostgroups.py | 1 - modules/virtual_machine.py | 5 +- netbox_zabbix_sync.py | 14 ++++-- 5 files changed, 68 insertions(+), 44 deletions(-) create mode 100644 modules/.device.py.swp diff --git a/modules/.device.py.swp b/modules/.device.py.swp new file mode 100644 index 0000000000000000000000000000000000000000..62a997b799a5c93df7d70bce7a66d87b8f631d7d GIT binary patch literal 57344 zcmeI53zTJ7S?4bb1SAmy>ZrVqw-~!R>8|c1kU&cTNq5p|$jhXgSJRfa>Yl2)>8g8+ zdvA4jbx#AM2o8ge3KGf4Gf`9^0wbcl9UoByWo1CnF)kS(EMd$5iit2d^ZS2$@3Z%L z-0n`0y3kLL%+wjb}z z&-adPA8qs(cB~G%>nr(aM>jvxTgYeDHcp@XW{6D+Y*Jtn1=h!l+n;pKfr~D@z=rx% z`vX_O$uyMV3Puy6xgJ|CIvPrut|ac2PrUKJG=EF>h*2DZb9RJ zfd4<-ALjqx;_t^R@6Yl7U*zw%R^FfO|Nk+6|MSZG2m1dH`TNB_|FQBOlZjnd`ups+yB{Ha|8FbA z|H;aGA8qrtNr6oYY*Jv80-F@rq`)QxHYu=4flUf*Qecw;n-ut#P@vOowWM<&kUq=$ zf6)IIAKq%sgNwip@IBz3?`*X`3;q<`2Hpl<0)7I_gDb(s;BnwA@DCUTz5@Oj{AX|! z%z|^k+2Aa2H->@#4&D!b8N3V(z_Y;~a3RRRIpAK55PuCm1b!8~3A_}X0GEKzpoHHE zJ_EPU>;lzo(R4l+>0^b4)88;6F32`2iw6p;7b@2 zJ_g_-^o3>__*@n~WQ`FYIsy4kFY(x0DMSRaiCt8(AXslLP->yq9nOr_R(j*<_Kx<}gF7RER0GRH zVM2H)>oc0`t<7}0-C;f&71==?SE@8rO2g~(@%pfz<*I%{%I+W^QHL=!J(8z2DN=5? zW2J7$$D?e%GwLnSZV-L7Gwu!g*-GzlPVF;asF_;aZoZh!jq>qjiaMHBNw@BbO*M37 z^POVcS(;nztfh(IXXLv~wyYK)NJfxRD4V74>~Iysxe#+vGL-_u{Uni z&`JFdI->uj5s)qA#1DHBc*clo`B#nN!FzNShYjI6{mJ3fDG zZk(^Ktx(R>y|-3s3IwG(YrVN6c>HP18|nN%<)T=b=vM649@6WGfcQ_qyfh{dtRF58+#xzeL+@ z6la?+uYQNV<$9An@kH7d`p~Qjwat$b`^QprgRs}{jTyF%HwN~$@0#ll?BA75Ujqjn z91EP#`s=prdFE<)>2>=u{~&bQ#g)#|w(OeALcZ?38-CZrFY*yH;wU50ql4jLpQ_{C zA>*lRZDXa^AMeV#y-^3c&W;Cz?W>*s#`a#HM(8i(+dJd&us6R>&)Sw1Z#%=K^;IRG zJi|S-r|F`zvb}iSvNC7aEpIR}bXNA5izqXLQDn3+n#qqbKkjsej+~=?-AdkpyDSWI z4ZZPljv%m-XH0EF1_~{#M{Ydfa7rp2dcO}e9wcw#+}7T0LvnrS<}^#MNWrGa#ld>N zD}iOT!=G+%m@i18@RL51bALSD%8xDNYvb%{b9%jIFup>OOz3bOCsv|hadUE$-DXu* z&l(Sziie|o4szvlZcsP89p+0AbCkUCBOF;PdB|jXq_eV~mmgJfB#@|94zn^inh)cn z$XLsf<8v|G7~aq3v6f8OwYPrGu02l@ZHN^rxHufF`nJq^t80VdIGb*bhZ}Std*wZk zXNoe6LNGUI4g^_XR1Qx!W^(W;nuMttd<|;Qy;cWD^11Fhg4=@FX(gs+Gau3Av7aa9 zFm#iAxY$|9%K`U=$K}uoWB2Jw2DM(%a(!vPZ+Gitj*@kWH$K1o7}+xjli<&A^I`I& zD2y8v{!NLfuA{39hI+uYEQI6UbL-+F=ZZ6zK4)NDLIv zr8F#PG|D@}g=P12P~-@QaE~N%Ahd^c3aP`rT^Ouwgjd}>U(^3B@i@nCve8<;&|5^P zUGDU|ISg;fGz!md;TNtcElQmw(pNhxz3z4!LB`feU^_DP_La`@jf4TN3}A6^L|@qL zi->3nY8_&2!0Ze))9}51iK(uw&(rC)ujb?BL6>xjPPl!6k=K&`|3vh|9y+A-|Gi!z zyc>Q0jbJ}`JosMlRdo7~f{%b-0RI783ciXS|F_^{;1=*a@I>%MboRdh?*q4jRq#~s zXmAhu`rm?2fj5I|!A0OZz~|7@-vW+;99#u10T+XRMnC@wcsr2pz6_oMq_>{~zKDMQ zyWrJe8Ege#M=yUj=zw#;U!!~fGI$}_3mypWM$i5@kS_jG@H}uW_%`q%X!C0DLLfRV zgUxG`0-F@L4-}A)qRXr8SHRSwAf3F}KU^XbVVHM6LsT89M zWqq8Z6s{qD3cb#u*XZ7{PoSer)A zXb$c=6-Fw03zbJRD$36sug+%ui~gDPgCb0{wWU>@%JvtX%Jt*ydJ_>{9z_Nw7+HTd zb?p?X((a+D+97(c+dic3Ws1BSvGgWtq@iSq$uuN^ZIrR1k=6>KbeH86f}z5y7m~F4 zLf%JtLomx5dU-jsscf$(hv8vqIm%f25K^(!({M!sIkqHdRn3D*;?jNc;Brk;bU zp4dNvCabz$dtkrm(U&3K!!$DSryXVP#S+KdJy>1qpp9F2UsKDq(o?ddy|E|qfb@rv zV}?q~gpk^LFfUosLo)*+S=M+~-3VKzhpIV|&Ff|JNQ2#21B=yQBZrJG5xDG${A4q; zXdmdy!hj7SsP(~+fgJ(sx;jujX3AoM<-VnYh0E*XsfvSXU(CL;;-Lt>sfe!cjT>^}9Obxy9MpVFy5Ew&yOZ~h%H<-T>Lk9*u3(vApuX_f;#~W)o z5!&5_-YU#rTYuZ-XrXyA{C?0a+gc21Z_C<#-q24USwFj-J_}3jLy^O&nkkQF>ItRRv}|Q=1zK2+ zkloWOrV@s|0v0{YxWr)H7IdV@GMrWh16I$%6fYy0CZf^8u!o_F={Y0#+Ike9ZeZet znf3FdnIAUH2pWQ<<+|}qhUr8yjS==pFF&fRI>zbZJQ>O-w&Xf>VI8&fJXZ=e-xj)S zx-_J^-JeywylI)QJeHJn`|3`Ay`xDFn}b#e(ZJyXFIdO8*KOz?i|x#$Agw@-2V-I) zEu4@jCp*o-EtZaKJEJX!wjyE$o6cyafp^vUhplX`_ebk%GG@Uoiv+xx%UdTaC6hsW z+VuY)MvT8*j_T8v*9oPjvkAD9F za10y(PXiACzk_ZsUH`>kKez#$4;}~p4&7es^|ygHfgcCY2G0T)fk%KZpx?g{JPT|G zj{sjozyBQgEchsp?*DV32M&SDzy;u|==~oDC&6KG06Yx*6}tb2!P~%b@KkUQI=^fI zZwGVWV(7;q2k>>md211|%YgY&_8 z;4GlM3ZlQY!F^DvlA!9Ax@^%KOIPSZ8A1ZnDgAVmnKT(ig_cF&PdegQvD+8o?#ag; zjB%4z1LqNltOLgZqV7zuGIr?nn;AWt!Ll}uX`E8bO44vYw|NdSd+<<9L1l_TNb5vf z;y{>0(uJBhiPcxlD@mvjAqkT%TFOV4($yYX5*}>2_*s$5al$`$Iv_ky0b$v|=qQ|~HW#@`GwZQ+6 z=fi=U#%;Yy|MrgOEIHIFqEhRM+`D9Xp7yfIR@a%!h62U28hho@aq|$PrPt}@UqFM! zw$o#yys26(4pvyqQ2i~TUC0Py`GB0P@=>>TQ?XS~kQJ?bjni5>_wUJ97_wdGwqV27 z6>yRu=qoINqUwR78e67w3DwKvEf7T$LVb4Gl}##5g$^MZ%Pk%K7{{rNH8svb-u0)s z2va^YSM!lmm3rgMjg?l&BHS6h8tLkPaj=G$%{jEarj0$O*Ea#j3nz6gK?ya{<0#X3 zX9bbHt5M=wDruw!vbd=>6fE4E3s@sx!aPs3Rn;g{7t}mCk(ny9KqK1Mn070aAlxfb zrR!|24XRwK6t|>U1c|dKRBV!zaa_*|GHfsP#4;x6o1sYG8AL{5-P#C@(}S%-F;GXW zJ|QqQGtC8Q>g#JY?lMt9Xj0)qeG1QnSf4;Q-L1V!=BkRDSO`36c$x0cV7Gj-6Lu@? zZOy&X@+`^~tX?PD_Q<-2OQm>l=w!XKQ9mC_(~&b3EiaWTDfQU`X_~ShqZmLhmsvgY zb{Tce5Tq3GyH~C=`}SBj+vbZ~RYOFGIGODWK4P|>IgwZU?i5tX zh7J|UrVPTh*tRyIAySKLB4T7W9PX{HX-*SWW@Z|AmC{;F%qolM{DomIGlQ_VU7l>v zPU@VFZYBvUSQJ?k$C?;Bk+PeDCntV}p6zQ64zVl=jAey0g=>J8Vpp$vmusM(B25~Q zE|Vhb6Jnp?ztarN=4Mf5QCii1)hh_OrL5e;LuLC%EoLa04f}HZzoBWmgZO3Tl@bpqM~Xmx4p5g>oKyyG;Mzh9db{um3N5rSe1Q`@ad^3|^Rq$Hy5^xQ8B={3-1#baIz>~mP z;BT-Mybbih2@Pq_+Bu+UpFjFa+dSiUBh+l+cfoM{d%MLBt25>gF_;eSOD%r{k)|M9E5dH*>`% z(R#hG=G(O6JYi4R%{pwuDnQEa!-UxiHLl*9KRXEOL19J%vgaLFRf6UaCeCqb&?G%CEx6W?c=8DwhtY2p;g}KV6Buw1I_7G!J}~` zrY4D(cTA~V;)>38IN$JEvXb&vzTLZ9q09vpitJ*!Dw9(3Vg`vgAL2{Pf`of){Gnop zn98p0V=;1Jy&SAkptIq9j;-5PMpXjGH!dkk1r20WaYN1sHX|=q?CD$Ou00%&w0t?; zzNDQvR3zm?&^YJ{){exb7xx!d)-hoDc6TEFV((bS(8Gh$l_Gn}Y<6)LYz(d=X^Fom zoJlGeALern*efr!ZiZAk%&CgjZuNLoKZm&gmDGi)>^gJecR7zchpjBVfmY2jTtSCf z$Cs^J899xNG^xDWQ{BC=TArfA)Jdb*o?d6t<5~)@i*_n|VKf%gkM_{@C+84*@>I4E zwrNS;S1wd^S{^{DaCSqL4pgl)-q#Ev%JBu zWN{CyN5pWN84D9gBrHm3HZzBfm=JZwg2sTb3DvQ@8Poq!eOoW^`hPe-?{;+kU;|j? zcLz*^ucPn(5qLFtF?c$7D7XiGU;h8H0jz`PfN5|iy8Y|HVK4`N7@Q04Mu-0(;4=f} z4{(6{H27z9_P+w}1TO{JyWa-)P{@x;hZC z7tzb#4SohlPd^vjiEjQYU;$hS9tu8>ZvGB%3wR08`hFYyAovnG`e(tL!4NEhYrq4* zC(+Aa2fE;~;D15OJHT&%n}KM%2J8ymKfU92tetpf04ny4qyhB1)b!ycz18hzli!UA z8?Epvy{q3&RMMtwR&$a{Sl61o;O4pC;PCpI>qA!tyT3UDSTgbqbqjsVOO?T>QWftQ zFokt~W>{=%-zMkM+TM2cXr41PVB7dcO$P@W%QD#%zctBHk%}x7)rwy{Ni?5i+)LOj zf`~T}Po7O9t;srONniSbV*=2uo=8aS&%Z`9cG(||R+Lj%~O2HErx7Z7zw{c2PS*S>< zk;+1`&%yN`Us|Z{|2HFn=k-;iL@QKO_midn^rS7FH2A;icT|A&T36dCVIfddn;L=^ z+pI(A;=!o`+sPWW81}Rw8pb>GG)CwrPV!$v*kTofkl{B6Ni>9X!U@5=l+v=gCx7tC z_R5s#75`~S%bg}csDyvGLp>S-%cv28rY?pJK{Yl)(7G)o8B7eG^3tuF7~DZSCVB+n z$8iZrS*ov<8@};>B5K)GRu_`a|8|J+5XdE9%-2LcS70_hm!0I z#1EdQO{FOs#d}=X@D0uGH9GI`T*@)y3c)B@Sj|3>Zi=57 zoZOPt<4<%T3EkE8#=YM4#uAd4D!PQrbz7IIY~_KJ>}K178?;vmS|NCtUbL;U+1)lt zKT#7@5sgr>|B6tnUf~K_zy*1kZkJ{|?Q~dvf{{Jp?{XVz?d>{BSI{ur$E5IE12D<$n z;AZe*@FMVZ@C5KMAie*c;AZeMKzskS4`2#>C(wC*pF!XMB)Apa2(;G!Bj9ZCd363y zfY*Y9;A*f9d>x(t9pE|O*;aDgUqSc(FW}{1 z3((&G51{*Ny?#H?e*fFh_kR(rf>|K_|9_(Ae-X&f|CL}2r2lUPTDyM>_(^aOYz3c3 z-@gSM_qzYU>;IwCm025|8^>$em1p5Roh5AmG8w*IaolkMuQo$5L2cHKfr(`@pSBMr z6KG=^1?!mAVuRB-=8BozSZpcpWSW&Zk*@!%vEFQD}-zXo*`B zXr2zUp*`hHB6omLN%fox!fO$!%ydc9uKO}MDS~BLv8GRO*ej12m!13Dj*cTvLNeN0 zNsFy*et8TrC}VzQ)2)`7E1G+nW|R8g?Xg2bmYyVixPRFDm2sj~!+_nPrc)0B zP~+TI=qFzGQ?Ig`Riq51Hnhj^pMHbsPM_;LY{1`xWe6aVrNwlSy;Yv$1 zp`|NpwmN*Qhce}#|_AntjXUb`thP8iQb7eH8t#nbQYz4TvZCyP>@3jA#13Cp< z2Gqh3s=aom_nG$rtHCa@R;X#XOCE4#XVNCNKRm16yW_hcv$_^>t#)N-mT8wyy_|?8 zZug|g3sNKzO^SWXqRP?OtxCnDQBKkLX_HOrM7!u3)U!A~B6gzGaGE_eyCBxv^ifY4 ziA_4kj5NruW*~L8(W1;jFcFF4&L>BwAm~5@i~uwka|{+6+otY~T2cI|%Q{N3ewtEo zgxox)VfqsW=;DzSwcPeLw-h-Mmc)93-!>8a9gXN(duRxfoma2be> z-s7Yw`k5T-pjK#dM$vN^gK>-0<50WE*Vi0Njhv;#axIQ|yu<*Z*h)6+_3#I9wDe4u zr{i8tV$Q=%lZ@k6F58xc<4@eggaOa{1|@{HIMG&7Orc`V!8@tbz?wrTMO>F-B(A0D zDQv&3hC0SA5(HLM6wwnkE*uqaLM3#cG)!fZpJc#Sy!vQuG~z30YRGrQ+;ZZ0Sj;=Q zWmi@ewa(a}uhh?Y{3)=mIH27B@o#OrQ?FYn70%+1`0ovGu>!4_K>QV{9z>1CU{1(z zM4@k3EaS_Kp;MYq+L@^$#BC^zLg!ZqX~YhWe4kReyXUNutLGp0!=>D#uca6-`-plW{dJ_ zkZfi)?TL-+hj(AK?Y&ajwMEDE?PQIsen84)IQT#&PLj5RjK;nB9>?XoqktTLHqYNy zs7<*43)Cu+EkmkBWojiaR_7XP)m%KR0Y+91kMXN}foi4j&^G$csU(Tj(lrZmy!+vJ z4#IfVs!tlFBFD)&Nh~ax)fS{m#j$ki(uW<*JzH7Tb4w^&%WOYwnp2Gcw`W)%pv}p=RGS#a=nn_}fF>=DC{g!R!{*gih z$R&5??|5*S1jtXO&;E}@xb2cE%vI{-Lf_H+EM@Y@r@C< z_}$)u+1n4DDitw_mKA_MNl#5mY@0}mFbZ2vAMM^5Cr5Fn)o3__jSAE4i)CqiVmqx( zJ@r_am_t%l&!xg5GlTPDDc;MyB^}_F?PGO0&!(Qtr*>8NaGhCa4qEQ?Xe)iU#+&P7 zq}=_KSx92UarGRqJZnwVmi4he-XD}`48SqqY=}}>5}Ys$TA@n1Pi0Q+2@|C=6vR^) zouSi8f~Abz75H`TK+Hye3b{tnJ|rC8B&N@Q^@YG_!!3wt8lhYRA+G$wTA}>9r|o?z zrWop~7N5Ww>D|SClF}y8;wZogZJbPaN&l63DCA1>V3qbgt=wUYO4OzX6H={y>0)`k zC)S@RO?J{9u$#kA5Eu|!kleid~7CiCvn0V;X<;U^`KGEQn;BKz7Rkh~{3;I!x z6UTI`mR5E$>)LtfI@^rR$Ae@aO;O8QGpo39Q%Sio%l|2j#rA|&RWjN_*;1(VYHCph zwi2hUnU&+FZ4yRSDKR+@#Bp^tpdBDY^Q+PEF?1i%LTGnZ+lG~t#kQTbOyAJAp4cI%m>k`L!>jGi zLgsX*=lBWFDp+m`QN5`duL)juPGvCT^r&^R~@yF-^FVU@|ZoPr{mv&nkxyVpIL zZMWiEe@yJPjilVmqDpaQAg2U6Iy!fTgnpw+UoM+JOGGeZzt;bsj&eGdj+&hR|3P&9 z5qLJZ6lnclXaBzp$mjn&@MZM+&wzJ>*MKL1zelIne*a$qC&3Ni0&q6?U3B|Ccs_Up z_!K(*yTLlx2_6dcO};hoo#1cK<9{1SzyDG2DDXw}c@&9RX1!x1E>HqWKCE!Bv zdGz>S2QLE81TAnk`ul6aA@F?g6ri>J7Whr{_!oj_fX4#u#eX4qHqd&${P|xBM5mEx z^-WyrZ{e!56TL~t>137?z4XEaTh;gr#^T-k;;fLwm1rrR5_|wHkc;4t>dU+3!4e8z z2^5THt99mG#IVD*y>G+hU~<@mZP@Y0Eb0x8;$Nw?vVry)yLm(&m+qTvyur~j>ki~( zY^L5;$gX1bNf$4zkC=miktROZCNTBAgY4YDpmK)rXCS!C`^XMx&)-cJRGb{HgEu<7#I+=yhS1`+c zMS0c7hn}_<@JV0dyn*<&iq%dxZ;&Z0bhdph(>LEToodJ#;Mium_I=X29H#J#ZPUL) z$%9fZzY>);N`ri6BcGJSl@eqVc}HA{9cmnUvCd|QiU6ZrQ*t`7wWY`D1t09Ofvm$R z81}JJzxTG4hJmC7ISa2E_r#X?%Ov*gjmruL33zL1)y}C3d*Mrl-Xa4_&etg(AwVmZ z$`i`gJRn`cynSqzahz@!ncZ1$eGZ{0Z0bH*TLm<>tl||X_6Ey(2z4mw=Alr<%ETq9 zrTD&B?8ERx}i{-ee+nTZ5-O0*YQW zD*#SknH zH<2P-MG!=g(wPY%fbJx19lEfDt79vU83Q3kXw}o9Y#qw3k%c{P@Aqm~B z3)WY?YUznBls%3^DoGvaP30XkAxt#Anv)b_`1%3|QtG_Pp;I?B0{w|G92FE)jq8m<7c@(5kY=2%;S`4|N&iQEZaqypE&BhJ zUa!3koqr7sz_nl(xEr1SSAq8aKLdO>cqF(Bz5iX{6<`F;2cJjZe=E>gf3raP{~w|A zzZyIXJQCcFzW?ul*7nZ=A4cc@32+H$fjiLmZw8Cthrt8D?G*Oqpa(7l^5_3Ua2r?x z-wVEkuKy6Xn(jNgjJQ1!B z>MJ$A1Bv0#RaJ=Osq?f-9w=DLGEe@v9a%+EvL3@geXBc}rWQxhT^HOJim*E#jZ?L3 zhs@IUX|!;MfOXy(-sn42GQ^lW=_nl0s1LI^*Y@H=D-)Vb=`;;!Q_M7x6m+ojZe)z9 z%U(*PY`fKya7p4o%WFu?xPSN(4B}&_hugm!9@_5E3Mqs;6Na;8QL4a3$|90Nrc@1* zU{e)~mYSsEhir{;MLKv|CpxQfv0d5P`e<1Ma$ThrqU%n{eYARcL%7g}5)J9Qay)2- zcj=w9n)|gy#Pp^Pq1YA4FQno6x64fhbNi;eNwV&55TfZ$@1J}$OBztW)A}4dtSVW- zCimr#fXTU>m#J1(J!bTWja`#TrCr&GbJKcTq7cr|bRU!G66ttLJp)~4LW?9JPcg@TV zG(xG|jy$b2&ZOTqUaYbyt=zXr&E8Yqjy)-vv;pr^{|O1E3DEmaOjZCw4BdT?@D`%e z_KGyBRa~AwW=2s8uF~#{dG5^b35NPK8A>pDfQu4gjGtyiD9vwTuq(RtCa;Vp^QNCj z#c(EfulqKP$*G6i2>xfBdTQ*8&woQoK4gdnXgQVkP_5i#c*>*9f8 zh&H=noGWIp;`{;D*_%0K>0=dk0+h(P!zC%|78?gRK-MMzEV2z{@nfMplpsV{sxEgB zTevRm&|2HzGd7%f+U<>W0PSqC>&X#%`#hhIKOFs^tZ{4+?Se!KDXf}CwW!pD1~U4% zlDaBfi3~WcgG8KCs$caf=uBk>#Xgm|(5#@OsZU-Ru(S*Hsa!^=Jha8Yx$gQJ5;%LX zM&6ZE(d57;A4J-9*0y9vzfQf5y^(ZyU@I~4Vd$D-I&t0XjF-gRN}Qu^Tu>{QU#*i; z;Z$;2OruI^>Hqqc-lwH=qyKL)wH6&1egBoObFJQm!GEkJw!^*z8>ffaBb zcr;Ks-v@pU%!4a|%Ku>S2iOO00Y49(2_6r=fsNpE;LYGhptJjD!9QXr_&E3=cqRA| z@MQ4q;E%BxybJs$cokRz7lC`Q8TPG9xW-doLEe$}iTZ?)53=Kxq)EQd34CaUt zadX~f>qMw8AK-JJyNzv8ZU?wn8d1`h6=hp+kuVLrppvNNeJ2h&id((NW^^#g^HAA7 z;KV66bED~nfJLg}uf7M8lIH75+OZ!5uB#}ty1u}@KXl&RS4e;NhYl8rSCW!KC9gOQYkO1a z?~BPnz|^M$+2YwoOv~3?^;}m*(K9V)QKO8jsT_K)>;9KVv1v)OXsA)@m5ME|u)5Yb zdaY%@{2y-_4Bf!^Up?A=E2L)<#ol^Puj)aLecMIl^WWpU~*~m*hvqe2ql7?wyjQm7Dp338;+?K^}hu~W%zRbhS+rS zg^ZN6r|H2}0(*X65NL9dOB>)Bq&G0%gifW(p8m45OFL`qi6lf@Nvep@_a}HvE4bqE zeZJ(XWEca*ZwD+d%`wlQu_T^8bqTzZ5X|M^@={D>krK01S8jHbR>Fd5rECn z?z0D>#A2}SlLn@|8*kr_M{n zHwSL@c#*@KBK&!7QM6K7bd{v#a#6&NI*_{9OMx+Th+sz_t2(!*?7`NdT9&x~V{cLV zN?SW<@RFpUuof^aM357ZfK;oNPtb0zY>e zu?0TkbMX1=JD;#~=XTu|7Z2fBQ6w4L4er1`E>8CzQ}4n&Q;;}5A9TBO1%ccrQ?x8! z{>1GhN|fCw>Pb<>gYDnDRlXm;_gnV&efIYy_V?BH_m}MN>y!1aa6`|0u)0wDD zh?Y*FxPeXD`%d{uP%HjvW@Kv(4b`9>NCVha7Tr&{5m`w5UJh>C{{Jbo&i6|HMF0OW zuYul;zJDvw+5dCkDd6GYcJzJi0k|4y@BhCA4+Po+@H1clt^)U>GyD;egA(6bmpJV{eL|8EA;w51@8gB1Kt2$1(v`;FaxCjp93BM z{u-TMegp3SuLlQ!zT@{8pl<`*0d4_52QCBmqVIn}IzMt?yyp zQnA@4cHM#fyULui@`d4;Qqw7a*W0&kgY~z_mi<6ctPeTrBpKRdzgJpysGrV)744Gk zwCR-S#rZNjOKCmHIOH!DxZDz~08aI(2bx%cg_XEDOQ zG{Nh9SAm)M&{3WY8z5Th z)&OA*ytlAyyN@bet!#~>aSL0>M3O~iScJGg1Pkj36`IybwrDhCIndY6_lah)x%jXv zorBI>8ls|U=za^Gm4$Vz%_wZU&X^j#(Qem!Q-_AoBSnWWf&#^T3-8+#dwK}24qO|T z6MCB&u=V|RAt4Dy%#ggc*z<NJaHY1Yw5ABx>8&wfo<@j-z@n6LQ`5xbCF$Zpw~uX8xTaP0bES8RHqz@7m2^l}PhBzFklWUS^#-T6al(6Uz3f@{`fii1KmWtyoWkNFI z6nS~6G_Z_Gle=q>u(B3cDdQIwSZZ&H9^vv`;-Hc5jH1W*B3yKNIr9`?c;jJWwMaAu zW+%pgXz7lQ94aUbE7DB`cQb7e+-;fR41^A9XUnX`N~wt#Vae(~ueTP8$eAy>YW;r; zdho;0kEQ=}sE}R%2VMWY;0<5{JPCX+co6si`u=YYRO@7K8iKLw71XM*nle}R7gXW;EX>;FFs z4uhRQ`vvZ%4sQk<;4runSiO9mHgB60*rdQF1vV+LNr6oY)F=?-FJ!1qmosc!4bo$j z&=HIGZn~Upx|}77f~=^UE@$!-os*Gi)8)*$&6+M48Asr<#$D3&w^I^r)8(x2wlaGD Z|L$^DwaNs|-y3Bb{p7Y)eG+A|`M literal 0 HcmV?d00001 diff --git a/modules/device.py b/modules/device.py index b8d038d..0d9bca9 100644 --- a/modules/device.py +++ b/modules/device.py @@ -6,6 +6,7 @@ from copy import deepcopy from logging import getLogger from os import sys from re import search +from operator import itemgetter from zabbix_utils import APIRequestError @@ -64,11 +65,11 @@ class PhysicalDevice: self.status = nb.status.label self.zabbix = zabbix self.zabbix_id = None - self.group_id = None + self.group_ids = [] self.nb_api_version = nb_version self.zbx_template_names = [] self.zbx_templates = [] - self.hostgroup = None + self.hostgroups = [] self.tenant = nb.tenant self.config_context = nb.config_context self.zbxproxy = None @@ -152,7 +153,10 @@ class PhysicalDevice: nb_regions=nb_regions, ) # Generate hostgroup based on hostgroup format - self.hostgroup = hg.generate(hg_format) + if isinstance(hg_format, list): + self.hostgroups = [hg.generate(f) for f in hg_format] + else: + self.hostgroups.append(hg.generate(hg_format)) def set_template(self, prefer_config_context, overrule_custom): """Set Template""" @@ -333,12 +337,16 @@ class PhysicalDevice: OUTPUT: True / False """ # Go through all groups - for group in groups: - if group["name"] == self.hostgroup: - self.group_id = group["groupid"] - e = f"Host {self.name}: matched group {group['name']}" - self.logger.debug(e) - return True + self.logger.debug(self.hostgroups) + + for hg in self.hostgroups: + for group in groups: + if group["name"] == hg: + self.group_ids.append({"groupid": group["groupid"]}) + e = f"Host {self.name}: matched group {group['name']}" + self.logger.debug(e) + if self.group_ids: + return True return False def cleanup(self): @@ -514,7 +522,8 @@ class PhysicalDevice: templateids.append({"templateid": template["templateid"]}) # Set interface, group and template configuration interfaces = self.setInterfaceDetails() - groups = [{"groupid": self.group_id}] + + groups = self.group_ids # Set Zabbix proxy if defined self.setProxy(proxies) # Set basic data for host creation @@ -567,25 +576,26 @@ class PhysicalDevice: """ final_data = [] # Check if the hostgroup is in a nested format and check each parent - for pos in range(len(self.hostgroup.split("/"))): - zabbix_hg = self.hostgroup.rsplit("/", pos)[0] - if self.lookupZabbixHostgroup(hostgroups, zabbix_hg): - # Hostgroup already exists - continue - # Create new group - try: - # API call to Zabbix - groupid = self.zabbix.hostgroup.create(name=zabbix_hg) - e = f"Hostgroup '{zabbix_hg}': created in Zabbix." - self.logger.info(e) - # Add group to final data - final_data.append( - {"groupid": groupid["groupids"][0], "name": zabbix_hg} - ) - except APIRequestError as e: - msg = f"Hostgroup '{zabbix_hg}': unable to create. Zabbix returned {str(e)}." - self.logger.error(msg) - raise SyncExternalError(msg) from e + for hostgroup in self.hostgroups: + for pos in range(len(hostgroup.split("/"))): + zabbix_hg = hostgroup.rsplit("/", pos)[0] + if self.lookupZabbixHostgroup(hostgroups, zabbix_hg): + # Hostgroup already exists + continue + # Create new group + try: + # API call to Zabbix + groupid = self.zabbix.hostgroup.create(name=zabbix_hg) + e = f"Hostgroup '{zabbix_hg}': created in Zabbix." + self.logger.info(e) + # Add group to final data + final_data.append( + {"groupid": groupid["groupids"][0], "name": zabbix_hg} + ) + except APIRequestError as e: + msg = f"Hostgroup '{zabbix_hg}': unable to create. Zabbix returned {str(e)}." + self.logger.error(msg) + raise SyncExternalError(msg) from e return final_data def lookupZabbixHostgroup(self, group_list, lookup_group): @@ -625,7 +635,7 @@ class PhysicalDevice: Checks if Zabbix object is still valid with NetBox parameters. """ # If group is found or if the hostgroup is nested - if not self.setZabbixGroupID(groups) or len(self.hostgroup.split("/")) > 1: + if not self.setZabbixGroupID(groups): # or len(self.hostgroups.split("/")) > 1: if create_hostgroups: # Script is allowed to create a new hostgroup new_groups = self.createZabbixHostgroup(groups) @@ -633,7 +643,7 @@ class PhysicalDevice: # Add all new groups to the list of groups groups.append(group) # check if the initial group was not already found (and this is a nested folder check) - if not self.group_id: + if not self.group_ids: # Function returns true / false but also sets GroupID if not self.setZabbixGroupID(groups) and not create_hostgroups: e = ( @@ -642,6 +652,9 @@ class PhysicalDevice: ) self.logger.warning(e) raise SyncInventoryError(e) + #if self.group_ids: + # self.group_ids.append(self.pri_group_id) + # Prepare templates and proxy config self.zbxTemplatePrepper(templates) self.setProxy(proxies) @@ -680,6 +693,7 @@ class PhysicalDevice: f"Received value: {host['host']}" ) self.updateZabbixHost(host=self.name) + # Execute check depending on wether the name is special or not if self.use_visible_name: if host["name"] == self.visible_name: @@ -709,18 +723,20 @@ class PhysicalDevice: group_dictname = "hostgroups" if str(self.zabbix.version).startswith(("6", "5")): group_dictname = "groups" - for group in host[group_dictname]: - if group["groupid"] == self.group_id: - self.logger.debug(f"Host {self.name}: hostgroup in-sync.") - break - self.logger.warning(f"Host {self.name}: hostgroup OUT of sync.") - self.updateZabbixHost(groups={"groupid": self.group_id}) + # Check if hostgroups match + if (sorted(host[group_dictname], key=itemgetter('groupid')) == + sorted(self.group_ids, key=itemgetter('groupid'))): + self.logger.debug(f"Host {self.name}: hostgroups in-sync.") + else: + self.logger.warning(f"Host {self.name}: hostgroups OUT of sync.") + self.updateZabbixHost(groups=self.group_ids) if int(host["status"]) == self.zabbix_state: self.logger.debug(f"Host {self.name}: status in-sync.") else: self.logger.warning(f"Host {self.name}: status OUT of sync.") self.updateZabbixHost(status=str(self.zabbix_state)) + # Check if a proxy has been defined if self.zbxproxy: # Check if proxy or proxy group is defined @@ -882,7 +898,7 @@ class PhysicalDevice: e = ( f"Host {self.name} has unsupported interface configuration." f" Host has total of {len(host['interfaces'])} interfaces. " - "Manual interfention required." + "Manual intervention required." ) self.logger.error(e) raise SyncInventoryError(e) diff --git a/modules/hostgroups.py b/modules/hostgroups.py index d1350bd..68b0bb1 100644 --- a/modules/hostgroups.py +++ b/modules/hostgroups.py @@ -91,7 +91,6 @@ class Hostgroup: if self.nb.cluster: format_options["cluster"] = self.nb.cluster.name format_options["cluster_type"] = self.nb.cluster.type.name - self.format_options = format_options def set_nesting( diff --git a/modules/virtual_machine.py b/modules/virtual_machine.py index 34e3394..8915832 100644 --- a/modules/virtual_machine.py +++ b/modules/virtual_machine.py @@ -58,7 +58,10 @@ class VirtualMachine(PhysicalDevice): nb_regions=nb_regions, ) # Generate hostgroup based on hostgroup format - self.hostgroup = hg.generate(hg_format) + if isinstance(hg_format, list): + self.hostgroups = [hg.generate(f) for f in hg_format] + else: + self.hostgroups.append(hg.generate(hg_format)) def set_vm_template(self): """Set Template for VMs. Overwrites default class diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 79d8a20..08e3036 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -85,7 +85,13 @@ def main(arguments): # Set NetBox API netbox = api(netbox_host, token=netbox_token, threading=True) # Check if the provided Hostgroup layout is valid - hg_objects = hostgroup_format.split("/") + hg_objects = [] + if isinstance(hostgroup_format,list): + for l in hostgroup_format: + hg_objects = hg_objects + l.split("/") + else: + hg_objects = hostgroup_format.split("/") + hg_objects = sorted(set(hg_objects)) allowed_objects = [ "location", "role", @@ -116,7 +122,7 @@ def main(arguments): if hg_object not in allowed_objects: e = ( f"Hostgroup item {hg_object} is not valid. Make sure you" - " use valid items and seperate them with '/'." + " use valid items and separate them with '/'." ) logger.error(e) raise HostgroupError(e) @@ -215,7 +221,7 @@ def main(arguments): create_hostgroups, ) continue - # Add hostgroup is config is set + # Add hostgroup if config is set if create_hostgroups: # Create new hostgroup. Potentially multiple groups if nested hostgroups = vm.createZabbixHostgroup(zabbix_groups) @@ -243,7 +249,7 @@ def main(arguments): continue device.set_hostgroup(hostgroup_format, netbox_site_groups, netbox_regions) # Check if a valid hostgroup has been found for this VM. - if not device.hostgroup: + if not device.hostgroups: continue device.set_inventory(nb_device) device.set_usermacros() From f7eb47a8a83a450d918a544218992b32eb13ced3 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 4 Jun 2025 14:23:46 +0200 Subject: [PATCH 67/93] removed scratch file --- modules/.device.py.swp | Bin 57344 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 modules/.device.py.swp diff --git a/modules/.device.py.swp b/modules/.device.py.swp deleted file mode 100644 index 62a997b799a5c93df7d70bce7a66d87b8f631d7d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 57344 zcmeI53zTJ7S?4bb1SAmy>ZrVqw-~!R>8|c1kU&cTNq5p|$jhXgSJRfa>Yl2)>8g8+ zdvA4jbx#AM2o8ge3KGf4Gf`9^0wbcl9UoByWo1CnF)kS(EMd$5iit2d^ZS2$@3Z%L z-0n`0y3kLL%+wjb}z z&-adPA8qs(cB~G%>nr(aM>jvxTgYeDHcp@XW{6D+Y*Jtn1=h!l+n;pKfr~D@z=rx% z`vX_O$uyMV3Puy6xgJ|CIvPrut|ac2PrUKJG=EF>h*2DZb9RJ zfd4<-ALjqx;_t^R@6Yl7U*zw%R^FfO|Nk+6|MSZG2m1dH`TNB_|FQBOlZjnd`ups+yB{Ha|8FbA z|H;aGA8qrtNr6oYY*Jv80-F@rq`)QxHYu=4flUf*Qecw;n-ut#P@vOowWM<&kUq=$ zf6)IIAKq%sgNwip@IBz3?`*X`3;q<`2Hpl<0)7I_gDb(s;BnwA@DCUTz5@Oj{AX|! z%z|^k+2Aa2H->@#4&D!b8N3V(z_Y;~a3RRRIpAK55PuCm1b!8~3A_}X0GEKzpoHHE zJ_EPU>;lzo(R4l+>0^b4)88;6F32`2iw6p;7b@2 zJ_g_-^o3>__*@n~WQ`FYIsy4kFY(x0DMSRaiCt8(AXslLP->yq9nOr_R(j*<_Kx<}gF7RER0GRH zVM2H)>oc0`t<7}0-C;f&71==?SE@8rO2g~(@%pfz<*I%{%I+W^QHL=!J(8z2DN=5? zW2J7$$D?e%GwLnSZV-L7Gwu!g*-GzlPVF;asF_;aZoZh!jq>qjiaMHBNw@BbO*M37 z^POVcS(;nztfh(IXXLv~wyYK)NJfxRD4V74>~Iysxe#+vGL-_u{Uni z&`JFdI->uj5s)qA#1DHBc*clo`B#nN!FzNShYjI6{mJ3fDG zZk(^Ktx(R>y|-3s3IwG(YrVN6c>HP18|nN%<)T=b=vM649@6WGfcQ_qyfh{dtRF58+#xzeL+@ z6la?+uYQNV<$9An@kH7d`p~Qjwat$b`^QprgRs}{jTyF%HwN~$@0#ll?BA75Ujqjn z91EP#`s=prdFE<)>2>=u{~&bQ#g)#|w(OeALcZ?38-CZrFY*yH;wU50ql4jLpQ_{C zA>*lRZDXa^AMeV#y-^3c&W;Cz?W>*s#`a#HM(8i(+dJd&us6R>&)Sw1Z#%=K^;IRG zJi|S-r|F`zvb}iSvNC7aEpIR}bXNA5izqXLQDn3+n#qqbKkjsej+~=?-AdkpyDSWI z4ZZPljv%m-XH0EF1_~{#M{Ydfa7rp2dcO}e9wcw#+}7T0LvnrS<}^#MNWrGa#ld>N zD}iOT!=G+%m@i18@RL51bALSD%8xDNYvb%{b9%jIFup>OOz3bOCsv|hadUE$-DXu* z&l(Sziie|o4szvlZcsP89p+0AbCkUCBOF;PdB|jXq_eV~mmgJfB#@|94zn^inh)cn z$XLsf<8v|G7~aq3v6f8OwYPrGu02l@ZHN^rxHufF`nJq^t80VdIGb*bhZ}Std*wZk zXNoe6LNGUI4g^_XR1Qx!W^(W;nuMttd<|;Qy;cWD^11Fhg4=@FX(gs+Gau3Av7aa9 zFm#iAxY$|9%K`U=$K}uoWB2Jw2DM(%a(!vPZ+Gitj*@kWH$K1o7}+xjli<&A^I`I& zD2y8v{!NLfuA{39hI+uYEQI6UbL-+F=ZZ6zK4)NDLIv zr8F#PG|D@}g=P12P~-@QaE~N%Ahd^c3aP`rT^Ouwgjd}>U(^3B@i@nCve8<;&|5^P zUGDU|ISg;fGz!md;TNtcElQmw(pNhxz3z4!LB`feU^_DP_La`@jf4TN3}A6^L|@qL zi->3nY8_&2!0Ze))9}51iK(uw&(rC)ujb?BL6>xjPPl!6k=K&`|3vh|9y+A-|Gi!z zyc>Q0jbJ}`JosMlRdo7~f{%b-0RI783ciXS|F_^{;1=*a@I>%MboRdh?*q4jRq#~s zXmAhu`rm?2fj5I|!A0OZz~|7@-vW+;99#u10T+XRMnC@wcsr2pz6_oMq_>{~zKDMQ zyWrJe8Ege#M=yUj=zw#;U!!~fGI$}_3mypWM$i5@kS_jG@H}uW_%`q%X!C0DLLfRV zgUxG`0-F@L4-}A)qRXr8SHRSwAf3F}KU^XbVVHM6LsT89M zWqq8Z6s{qD3cb#u*XZ7{PoSer)A zXb$c=6-Fw03zbJRD$36sug+%ui~gDPgCb0{wWU>@%JvtX%Jt*ydJ_>{9z_Nw7+HTd zb?p?X((a+D+97(c+dic3Ws1BSvGgWtq@iSq$uuN^ZIrR1k=6>KbeH86f}z5y7m~F4 zLf%JtLomx5dU-jsscf$(hv8vqIm%f25K^(!({M!sIkqHdRn3D*;?jNc;Brk;bU zp4dNvCabz$dtkrm(U&3K!!$DSryXVP#S+KdJy>1qpp9F2UsKDq(o?ddy|E|qfb@rv zV}?q~gpk^LFfUosLo)*+S=M+~-3VKzhpIV|&Ff|JNQ2#21B=yQBZrJG5xDG${A4q; zXdmdy!hj7SsP(~+fgJ(sx;jujX3AoM<-VnYh0E*XsfvSXU(CL;;-Lt>sfe!cjT>^}9Obxy9MpVFy5Ew&yOZ~h%H<-T>Lk9*u3(vApuX_f;#~W)o z5!&5_-YU#rTYuZ-XrXyA{C?0a+gc21Z_C<#-q24USwFj-J_}3jLy^O&nkkQF>ItRRv}|Q=1zK2+ zkloWOrV@s|0v0{YxWr)H7IdV@GMrWh16I$%6fYy0CZf^8u!o_F={Y0#+Ike9ZeZet znf3FdnIAUH2pWQ<<+|}qhUr8yjS==pFF&fRI>zbZJQ>O-w&Xf>VI8&fJXZ=e-xj)S zx-_J^-JeywylI)QJeHJn`|3`Ay`xDFn}b#e(ZJyXFIdO8*KOz?i|x#$Agw@-2V-I) zEu4@jCp*o-EtZaKJEJX!wjyE$o6cyafp^vUhplX`_ebk%GG@Uoiv+xx%UdTaC6hsW z+VuY)MvT8*j_T8v*9oPjvkAD9F za10y(PXiACzk_ZsUH`>kKez#$4;}~p4&7es^|ygHfgcCY2G0T)fk%KZpx?g{JPT|G zj{sjozyBQgEchsp?*DV32M&SDzy;u|==~oDC&6KG06Yx*6}tb2!P~%b@KkUQI=^fI zZwGVWV(7;q2k>>md211|%YgY&_8 z;4GlM3ZlQY!F^DvlA!9Ax@^%KOIPSZ8A1ZnDgAVmnKT(ig_cF&PdegQvD+8o?#ag; zjB%4z1LqNltOLgZqV7zuGIr?nn;AWt!Ll}uX`E8bO44vYw|NdSd+<<9L1l_TNb5vf z;y{>0(uJBhiPcxlD@mvjAqkT%TFOV4($yYX5*}>2_*s$5al$`$Iv_ky0b$v|=qQ|~HW#@`GwZQ+6 z=fi=U#%;Yy|MrgOEIHIFqEhRM+`D9Xp7yfIR@a%!h62U28hho@aq|$PrPt}@UqFM! zw$o#yys26(4pvyqQ2i~TUC0Py`GB0P@=>>TQ?XS~kQJ?bjni5>_wUJ97_wdGwqV27 z6>yRu=qoINqUwR78e67w3DwKvEf7T$LVb4Gl}##5g$^MZ%Pk%K7{{rNH8svb-u0)s z2va^YSM!lmm3rgMjg?l&BHS6h8tLkPaj=G$%{jEarj0$O*Ea#j3nz6gK?ya{<0#X3 zX9bbHt5M=wDruw!vbd=>6fE4E3s@sx!aPs3Rn;g{7t}mCk(ny9KqK1Mn070aAlxfb zrR!|24XRwK6t|>U1c|dKRBV!zaa_*|GHfsP#4;x6o1sYG8AL{5-P#C@(}S%-F;GXW zJ|QqQGtC8Q>g#JY?lMt9Xj0)qeG1QnSf4;Q-L1V!=BkRDSO`36c$x0cV7Gj-6Lu@? zZOy&X@+`^~tX?PD_Q<-2OQm>l=w!XKQ9mC_(~&b3EiaWTDfQU`X_~ShqZmLhmsvgY zb{Tce5Tq3GyH~C=`}SBj+vbZ~RYOFGIGODWK4P|>IgwZU?i5tX zh7J|UrVPTh*tRyIAySKLB4T7W9PX{HX-*SWW@Z|AmC{;F%qolM{DomIGlQ_VU7l>v zPU@VFZYBvUSQJ?k$C?;Bk+PeDCntV}p6zQ64zVl=jAey0g=>J8Vpp$vmusM(B25~Q zE|Vhb6Jnp?ztarN=4Mf5QCii1)hh_OrL5e;LuLC%EoLa04f}HZzoBWmgZO3Tl@bpqM~Xmx4p5g>oKyyG;Mzh9db{um3N5rSe1Q`@ad^3|^Rq$Hy5^xQ8B={3-1#baIz>~mP z;BT-Mybbih2@Pq_+Bu+UpFjFa+dSiUBh+l+cfoM{d%MLBt25>gF_;eSOD%r{k)|M9E5dH*>`% z(R#hG=G(O6JYi4R%{pwuDnQEa!-UxiHLl*9KRXEOL19J%vgaLFRf6UaCeCqb&?G%CEx6W?c=8DwhtY2p;g}KV6Buw1I_7G!J}~` zrY4D(cTA~V;)>38IN$JEvXb&vzTLZ9q09vpitJ*!Dw9(3Vg`vgAL2{Pf`of){Gnop zn98p0V=;1Jy&SAkptIq9j;-5PMpXjGH!dkk1r20WaYN1sHX|=q?CD$Ou00%&w0t?; zzNDQvR3zm?&^YJ{){exb7xx!d)-hoDc6TEFV((bS(8Gh$l_Gn}Y<6)LYz(d=X^Fom zoJlGeALern*efr!ZiZAk%&CgjZuNLoKZm&gmDGi)>^gJecR7zchpjBVfmY2jTtSCf z$Cs^J899xNG^xDWQ{BC=TArfA)Jdb*o?d6t<5~)@i*_n|VKf%gkM_{@C+84*@>I4E zwrNS;S1wd^S{^{DaCSqL4pgl)-q#Ev%JBu zWN{CyN5pWN84D9gBrHm3HZzBfm=JZwg2sTb3DvQ@8Poq!eOoW^`hPe-?{;+kU;|j? zcLz*^ucPn(5qLFtF?c$7D7XiGU;h8H0jz`PfN5|iy8Y|HVK4`N7@Q04Mu-0(;4=f} z4{(6{H27z9_P+w}1TO{JyWa-)P{@x;hZC z7tzb#4SohlPd^vjiEjQYU;$hS9tu8>ZvGB%3wR08`hFYyAovnG`e(tL!4NEhYrq4* zC(+Aa2fE;~;D15OJHT&%n}KM%2J8ymKfU92tetpf04ny4qyhB1)b!ycz18hzli!UA z8?Epvy{q3&RMMtwR&$a{Sl61o;O4pC;PCpI>qA!tyT3UDSTgbqbqjsVOO?T>QWftQ zFokt~W>{=%-zMkM+TM2cXr41PVB7dcO$P@W%QD#%zctBHk%}x7)rwy{Ni?5i+)LOj zf`~T}Po7O9t;srONniSbV*=2uo=8aS&%Z`9cG(||R+Lj%~O2HErx7Z7zw{c2PS*S>< zk;+1`&%yN`Us|Z{|2HFn=k-;iL@QKO_midn^rS7FH2A;icT|A&T36dCVIfddn;L=^ z+pI(A;=!o`+sPWW81}Rw8pb>GG)CwrPV!$v*kTofkl{B6Ni>9X!U@5=l+v=gCx7tC z_R5s#75`~S%bg}csDyvGLp>S-%cv28rY?pJK{Yl)(7G)o8B7eG^3tuF7~DZSCVB+n z$8iZrS*ov<8@};>B5K)GRu_`a|8|J+5XdE9%-2LcS70_hm!0I z#1EdQO{FOs#d}=X@D0uGH9GI`T*@)y3c)B@Sj|3>Zi=57 zoZOPt<4<%T3EkE8#=YM4#uAd4D!PQrbz7IIY~_KJ>}K178?;vmS|NCtUbL;U+1)lt zKT#7@5sgr>|B6tnUf~K_zy*1kZkJ{|?Q~dvf{{Jp?{XVz?d>{BSI{ur$E5IE12D<$n z;AZe*@FMVZ@C5KMAie*c;AZeMKzskS4`2#>C(wC*pF!XMB)Apa2(;G!Bj9ZCd363y zfY*Y9;A*f9d>x(t9pE|O*;aDgUqSc(FW}{1 z3((&G51{*Ny?#H?e*fFh_kR(rf>|K_|9_(Ae-X&f|CL}2r2lUPTDyM>_(^aOYz3c3 z-@gSM_qzYU>;IwCm025|8^>$em1p5Roh5AmG8w*IaolkMuQo$5L2cHKfr(`@pSBMr z6KG=^1?!mAVuRB-=8BozSZpcpWSW&Zk*@!%vEFQD}-zXo*`B zXr2zUp*`hHB6omLN%fox!fO$!%ydc9uKO}MDS~BLv8GRO*ej12m!13Dj*cTvLNeN0 zNsFy*et8TrC}VzQ)2)`7E1G+nW|R8g?Xg2bmYyVixPRFDm2sj~!+_nPrc)0B zP~+TI=qFzGQ?Ig`Riq51Hnhj^pMHbsPM_;LY{1`xWe6aVrNwlSy;Yv$1 zp`|NpwmN*Qhce}#|_AntjXUb`thP8iQb7eH8t#nbQYz4TvZCyP>@3jA#13Cp< z2Gqh3s=aom_nG$rtHCa@R;X#XOCE4#XVNCNKRm16yW_hcv$_^>t#)N-mT8wyy_|?8 zZug|g3sNKzO^SWXqRP?OtxCnDQBKkLX_HOrM7!u3)U!A~B6gzGaGE_eyCBxv^ifY4 ziA_4kj5NruW*~L8(W1;jFcFF4&L>BwAm~5@i~uwka|{+6+otY~T2cI|%Q{N3ewtEo zgxox)VfqsW=;DzSwcPeLw-h-Mmc)93-!>8a9gXN(duRxfoma2be> z-s7Yw`k5T-pjK#dM$vN^gK>-0<50WE*Vi0Njhv;#axIQ|yu<*Z*h)6+_3#I9wDe4u zr{i8tV$Q=%lZ@k6F58xc<4@eggaOa{1|@{HIMG&7Orc`V!8@tbz?wrTMO>F-B(A0D zDQv&3hC0SA5(HLM6wwnkE*uqaLM3#cG)!fZpJc#Sy!vQuG~z30YRGrQ+;ZZ0Sj;=Q zWmi@ewa(a}uhh?Y{3)=mIH27B@o#OrQ?FYn70%+1`0ovGu>!4_K>QV{9z>1CU{1(z zM4@k3EaS_Kp;MYq+L@^$#BC^zLg!ZqX~YhWe4kReyXUNutLGp0!=>D#uca6-`-plW{dJ_ zkZfi)?TL-+hj(AK?Y&ajwMEDE?PQIsen84)IQT#&PLj5RjK;nB9>?XoqktTLHqYNy zs7<*43)Cu+EkmkBWojiaR_7XP)m%KR0Y+91kMXN}foi4j&^G$csU(Tj(lrZmy!+vJ z4#IfVs!tlFBFD)&Nh~ax)fS{m#j$ki(uW<*JzH7Tb4w^&%WOYwnp2Gcw`W)%pv}p=RGS#a=nn_}fF>=DC{g!R!{*gih z$R&5??|5*S1jtXO&;E}@xb2cE%vI{-Lf_H+EM@Y@r@C< z_}$)u+1n4DDitw_mKA_MNl#5mY@0}mFbZ2vAMM^5Cr5Fn)o3__jSAE4i)CqiVmqx( zJ@r_am_t%l&!xg5GlTPDDc;MyB^}_F?PGO0&!(Qtr*>8NaGhCa4qEQ?Xe)iU#+&P7 zq}=_KSx92UarGRqJZnwVmi4he-XD}`48SqqY=}}>5}Ys$TA@n1Pi0Q+2@|C=6vR^) zouSi8f~Abz75H`TK+Hye3b{tnJ|rC8B&N@Q^@YG_!!3wt8lhYRA+G$wTA}>9r|o?z zrWop~7N5Ww>D|SClF}y8;wZogZJbPaN&l63DCA1>V3qbgt=wUYO4OzX6H={y>0)`k zC)S@RO?J{9u$#kA5Eu|!kleid~7CiCvn0V;X<;U^`KGEQn;BKz7Rkh~{3;I!x z6UTI`mR5E$>)LtfI@^rR$Ae@aO;O8QGpo39Q%Sio%l|2j#rA|&RWjN_*;1(VYHCph zwi2hUnU&+FZ4yRSDKR+@#Bp^tpdBDY^Q+PEF?1i%LTGnZ+lG~t#kQTbOyAJAp4cI%m>k`L!>jGi zLgsX*=lBWFDp+m`QN5`duL)juPGvCT^r&^R~@yF-^FVU@|ZoPr{mv&nkxyVpIL zZMWiEe@yJPjilVmqDpaQAg2U6Iy!fTgnpw+UoM+JOGGeZzt;bsj&eGdj+&hR|3P&9 z5qLJZ6lnclXaBzp$mjn&@MZM+&wzJ>*MKL1zelIne*a$qC&3Ni0&q6?U3B|Ccs_Up z_!K(*yTLlx2_6dcO};hoo#1cK<9{1SzyDG2DDXw}c@&9RX1!x1E>HqWKCE!Bv zdGz>S2QLE81TAnk`ul6aA@F?g6ri>J7Whr{_!oj_fX4#u#eX4qHqd&${P|xBM5mEx z^-WyrZ{e!56TL~t>137?z4XEaTh;gr#^T-k;;fLwm1rrR5_|wHkc;4t>dU+3!4e8z z2^5THt99mG#IVD*y>G+hU~<@mZP@Y0Eb0x8;$Nw?vVry)yLm(&m+qTvyur~j>ki~( zY^L5;$gX1bNf$4zkC=miktROZCNTBAgY4YDpmK)rXCS!C`^XMx&)-cJRGb{HgEu<7#I+=yhS1`+c zMS0c7hn}_<@JV0dyn*<&iq%dxZ;&Z0bhdph(>LEToodJ#;Mium_I=X29H#J#ZPUL) z$%9fZzY>);N`ri6BcGJSl@eqVc}HA{9cmnUvCd|QiU6ZrQ*t`7wWY`D1t09Ofvm$R z81}JJzxTG4hJmC7ISa2E_r#X?%Ov*gjmruL33zL1)y}C3d*Mrl-Xa4_&etg(AwVmZ z$`i`gJRn`cynSqzahz@!ncZ1$eGZ{0Z0bH*TLm<>tl||X_6Ey(2z4mw=Alr<%ETq9 zrTD&B?8ERx}i{-ee+nTZ5-O0*YQW zD*#SknH zH<2P-MG!=g(wPY%fbJx19lEfDt79vU83Q3kXw}o9Y#qw3k%c{P@Aqm~B z3)WY?YUznBls%3^DoGvaP30XkAxt#Anv)b_`1%3|QtG_Pp;I?B0{w|G92FE)jq8m<7c@(5kY=2%;S`4|N&iQEZaqypE&BhJ zUa!3koqr7sz_nl(xEr1SSAq8aKLdO>cqF(Bz5iX{6<`F;2cJjZe=E>gf3raP{~w|A zzZyIXJQCcFzW?ul*7nZ=A4cc@32+H$fjiLmZw8Cthrt8D?G*Oqpa(7l^5_3Ua2r?x z-wVEkuKy6Xn(jNgjJQ1!B z>MJ$A1Bv0#RaJ=Osq?f-9w=DLGEe@v9a%+EvL3@geXBc}rWQxhT^HOJim*E#jZ?L3 zhs@IUX|!;MfOXy(-sn42GQ^lW=_nl0s1LI^*Y@H=D-)Vb=`;;!Q_M7x6m+ojZe)z9 z%U(*PY`fKya7p4o%WFu?xPSN(4B}&_hugm!9@_5E3Mqs;6Na;8QL4a3$|90Nrc@1* zU{e)~mYSsEhir{;MLKv|CpxQfv0d5P`e<1Ma$ThrqU%n{eYARcL%7g}5)J9Qay)2- zcj=w9n)|gy#Pp^Pq1YA4FQno6x64fhbNi;eNwV&55TfZ$@1J}$OBztW)A}4dtSVW- zCimr#fXTU>m#J1(J!bTWja`#TrCr&GbJKcTq7cr|bRU!G66ttLJp)~4LW?9JPcg@TV zG(xG|jy$b2&ZOTqUaYbyt=zXr&E8Yqjy)-vv;pr^{|O1E3DEmaOjZCw4BdT?@D`%e z_KGyBRa~AwW=2s8uF~#{dG5^b35NPK8A>pDfQu4gjGtyiD9vwTuq(RtCa;Vp^QNCj z#c(EfulqKP$*G6i2>xfBdTQ*8&woQoK4gdnXgQVkP_5i#c*>*9f8 zh&H=noGWIp;`{;D*_%0K>0=dk0+h(P!zC%|78?gRK-MMzEV2z{@nfMplpsV{sxEgB zTevRm&|2HzGd7%f+U<>W0PSqC>&X#%`#hhIKOFs^tZ{4+?Se!KDXf}CwW!pD1~U4% zlDaBfi3~WcgG8KCs$caf=uBk>#Xgm|(5#@OsZU-Ru(S*Hsa!^=Jha8Yx$gQJ5;%LX zM&6ZE(d57;A4J-9*0y9vzfQf5y^(ZyU@I~4Vd$D-I&t0XjF-gRN}Qu^Tu>{QU#*i; z;Z$;2OruI^>Hqqc-lwH=qyKL)wH6&1egBoObFJQm!GEkJw!^*z8>ffaBb zcr;Ks-v@pU%!4a|%Ku>S2iOO00Y49(2_6r=fsNpE;LYGhptJjD!9QXr_&E3=cqRA| z@MQ4q;E%BxybJs$cokRz7lC`Q8TPG9xW-doLEe$}iTZ?)53=Kxq)EQd34CaUt zadX~f>qMw8AK-JJyNzv8ZU?wn8d1`h6=hp+kuVLrppvNNeJ2h&id((NW^^#g^HAA7 z;KV66bED~nfJLg}uf7M8lIH75+OZ!5uB#}ty1u}@KXl&RS4e;NhYl8rSCW!KC9gOQYkO1a z?~BPnz|^M$+2YwoOv~3?^;}m*(K9V)QKO8jsT_K)>;9KVv1v)OXsA)@m5ME|u)5Yb zdaY%@{2y-_4Bf!^Up?A=E2L)<#ol^Puj)aLecMIl^WWpU~*~m*hvqe2ql7?wyjQm7Dp338;+?K^}hu~W%zRbhS+rS zg^ZN6r|H2}0(*X65NL9dOB>)Bq&G0%gifW(p8m45OFL`qi6lf@Nvep@_a}HvE4bqE zeZJ(XWEca*ZwD+d%`wlQu_T^8bqTzZ5X|M^@={D>krK01S8jHbR>Fd5rECn z?z0D>#A2}SlLn@|8*kr_M{n zHwSL@c#*@KBK&!7QM6K7bd{v#a#6&NI*_{9OMx+Th+sz_t2(!*?7`NdT9&x~V{cLV zN?SW<@RFpUuof^aM357ZfK;oNPtb0zY>e zu?0TkbMX1=JD;#~=XTu|7Z2fBQ6w4L4er1`E>8CzQ}4n&Q;;}5A9TBO1%ccrQ?x8! z{>1GhN|fCw>Pb<>gYDnDRlXm;_gnV&efIYy_V?BH_m}MN>y!1aa6`|0u)0wDD zh?Y*FxPeXD`%d{uP%HjvW@Kv(4b`9>NCVha7Tr&{5m`w5UJh>C{{Jbo&i6|HMF0OW zuYul;zJDvw+5dCkDd6GYcJzJi0k|4y@BhCA4+Po+@H1clt^)U>GyD;egA(6bmpJV{eL|8EA;w51@8gB1Kt2$1(v`;FaxCjp93BM z{u-TMegp3SuLlQ!zT@{8pl<`*0d4_52QCBmqVIn}IzMt?yyp zQnA@4cHM#fyULui@`d4;Qqw7a*W0&kgY~z_mi<6ctPeTrBpKRdzgJpysGrV)744Gk zwCR-S#rZNjOKCmHIOH!DxZDz~08aI(2bx%cg_XEDOQ zG{Nh9SAm)M&{3WY8z5Th z)&OA*ytlAyyN@bet!#~>aSL0>M3O~iScJGg1Pkj36`IybwrDhCIndY6_lah)x%jXv zorBI>8ls|U=za^Gm4$Vz%_wZU&X^j#(Qem!Q-_AoBSnWWf&#^T3-8+#dwK}24qO|T z6MCB&u=V|RAt4Dy%#ggc*z<NJaHY1Yw5ABx>8&wfo<@j-z@n6LQ`5xbCF$Zpw~uX8xTaP0bES8RHqz@7m2^l}PhBzFklWUS^#-T6al(6Uz3f@{`fii1KmWtyoWkNFI z6nS~6G_Z_Gle=q>u(B3cDdQIwSZZ&H9^vv`;-Hc5jH1W*B3yKNIr9`?c;jJWwMaAu zW+%pgXz7lQ94aUbE7DB`cQb7e+-;fR41^A9XUnX`N~wt#Vae(~ueTP8$eAy>YW;r; zdho;0kEQ=}sE}R%2VMWY;0<5{JPCX+co6si`u=YYRO@7K8iKLw71XM*nle}R7gXW;EX>;FFs z4uhRQ`vvZ%4sQk<;4runSiO9mHgB60*rdQF1vV+LNr6oY)F=?-FJ!1qmosc!4bo$j z&=HIGZn~Upx|}77f~=^UE@$!-os*Gi)8)*$&6+M48Asr<#$D3&w^I^r)8(x2wlaGD Z|L$^DwaNs|-y3Bb{p7Y)eG+A|`M From 27ee4c341fee51acd44aec5377652cc0e388d526 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Wed, 4 Jun 2025 15:18:27 +0200 Subject: [PATCH 68/93] Fixed multiple hostgroups for devices --- modules/device.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/modules/device.py b/modules/device.py index 0d9bca9..d5bb635 100644 --- a/modules/device.py +++ b/modules/device.py @@ -337,15 +337,13 @@ class PhysicalDevice: OUTPUT: True / False """ # Go through all groups - self.logger.debug(self.hostgroups) - for hg in self.hostgroups: for group in groups: if group["name"] == hg: self.group_ids.append({"groupid": group["groupid"]}) - e = f"Host {self.name}: matched group {group['name']}" + e = f"Host {self.name}: matched group \"{group['name']}\" (ID:{group['groupid']})" self.logger.debug(e) - if self.group_ids: + if len(self.group_ids) == len(self.hostgroups): return True return False From 77b0798b65c86ec2026b936c88734c7464f79e1c Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Thu, 5 Jun 2025 11:39:42 +0200 Subject: [PATCH 69/93] Added verify of vm_hostgroup_format (moved fucntion to tools.py) --- modules/tools.py | 46 ++++++++++++++++++++++++++++++++++++++++++- netbox_zabbix_sync.py | 34 ++++++-------------------------- 2 files changed, 51 insertions(+), 29 deletions(-) diff --git a/modules/tools.py b/modules/tools.py index 791025d..e49cf13 100644 --- a/modules/tools.py +++ b/modules/tools.py @@ -1,5 +1,5 @@ """A collection of tools used by several classes""" - +from modules.exceptions import HostgroupError def convert_recordset(recordset): """Converts netbox RedcordSet to list of dicts.""" @@ -99,3 +99,47 @@ def remove_duplicates(input_list, sortkey=None): if sortkey and isinstance(sortkey, str): output_list.sort(key=lambda x: x[sortkey]) return output_list + +def verify_hg_format(hg_format, hg_type="dev", logger=None): + """ + Verifies hostgroup field format + """ + allowed_objects = {"dev": ["location", + "rack", + "role", + "manufacturer", + "region", + "site", + "site_group", + "tenant", + "tenant_group", + "platform", + "cluster"] + ,"vm": ["location", + "role", + "manufacturer", + "region", + "site", + "site_group", + "tenant", + "tenant_group", + "cluster", + "device", + "platform"] + } + hg_objects = [] + if isinstance(hg_format,list): + for f in hg_format: + hg_objects = hg_objects + f.split("/") + else: + hg_objects = hg_format.split("/") + hg_objects = sorted(set(hg_objects)) + for hg_object in hg_objects: + if hg_object not in allowed_objects[hg_type]: + e = ( + f"Hostgroup item {hg_object} is not valid. Make sure you" + " use valid items and separate them with '/'." + ) + logger.error(e) + raise HostgroupError(e) + diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 08e3036..340fabc 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -13,9 +13,9 @@ from requests.exceptions import ConnectionError as RequestsConnectionError from zabbix_utils import APIRequestError, ProcessingError, ZabbixAPI from modules.device import PhysicalDevice -from modules.exceptions import EnvironmentVarError, HostgroupError, SyncError +from modules.exceptions import EnvironmentVarError, SyncError from modules.logging import get_logger, set_log_levels, setup_logger -from modules.tools import convert_recordset, proxy_prepper +from modules.tools import convert_recordset, proxy_prepper, verify_hg_format from modules.virtual_machine import VirtualMachine try: @@ -84,24 +84,6 @@ def main(arguments): netbox_token = environ.get("NETBOX_TOKEN") # Set NetBox API netbox = api(netbox_host, token=netbox_token, threading=True) - # Check if the provided Hostgroup layout is valid - hg_objects = [] - if isinstance(hostgroup_format,list): - for l in hostgroup_format: - hg_objects = hg_objects + l.split("/") - else: - hg_objects = hostgroup_format.split("/") - hg_objects = sorted(set(hg_objects)) - allowed_objects = [ - "location", - "role", - "manufacturer", - "region", - "site", - "site_group", - "tenant", - "tenant_group", - ] # Create API call to get all custom fields which are on the device objects try: device_cfs = list( @@ -118,14 +100,10 @@ def main(arguments): sys.exit(1) for cf in device_cfs: allowed_objects.append(cf.name) - for hg_object in hg_objects: - if hg_object not in allowed_objects: - e = ( - f"Hostgroup item {hg_object} is not valid. Make sure you" - " use valid items and separate them with '/'." - ) - logger.error(e) - raise HostgroupError(e) + # Check if the provided Hostgroup layout is valid + verify_hg_format(hostgroup_format, hg_type="dev", logger=logger) + verify_hg_format(vm_hostgroup_format, hg_type="vm", logger=logger) + # Set Zabbix API try: ssl_ctx = ssl.create_default_context() From 298e6c4370433971c572e1f471ffa7e0fb9191f3 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Thu, 5 Jun 2025 11:53:17 +0200 Subject: [PATCH 70/93] support multiple hostgroups for vm --- netbox_zabbix_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 340fabc..9cd0eda 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -166,7 +166,7 @@ def main(arguments): continue vm.set_hostgroup(vm_hostgroup_format, netbox_site_groups, netbox_regions) # Check if a valid hostgroup has been found for this VM. - if not vm.hostgroup: + if not vm.hostgroups: continue vm.set_inventory(nb_vm) vm.set_usermacros() From 9e1a90833d984ff19aa39d5a9e862f1e2da5d46e Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Sun, 8 Jun 2025 21:45:45 +0200 Subject: [PATCH 71/93] Added new config parameters to base template --- modules/config.py | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/modules/config.py b/modules/config.py index 6528d67..586227d 100644 --- a/modules/config.py +++ b/modules/config.py @@ -1,5 +1,5 @@ """ -Module for parsing configuration from the top level config.yaml file +Module for parsing configuration from the top level config.py file """ from pathlib import Path from importlib import util @@ -8,7 +8,8 @@ from logging import getLogger logger = getLogger(__name__) -# PLEASE NOTE: This is a sample config file. You should create your own config.py +# PLEASE NOTE: This is a sample config file. Please do NOT make any edits in this file! +# You should create your own config.py and it will overwrite the default config. DEFAULT_CONFIG = { "templates_config_context": False, @@ -30,7 +31,7 @@ DEFAULT_CONFIG = { "nb_vm_filter": {"name__n": "null"}, "inventory_mode": "disabled", "inventory_sync": False, - "inventory_map": { + "device_inventory_map": { "asset_tag": "asset_tag", "virtual_chassis/name": "chassis", "status/label": "deployment_status", @@ -44,6 +45,38 @@ DEFAULT_CONFIG = { "device_type/model": "type", "device_type/manufacturer/name": "vendor", "oob_ip/address": "oob_ip" + }, + "vm_inventory_map": { + "status/label": "deployment_status", + "comments": "notes", + "name": "name" + }, + "usermacro_sync": False, + "device_usermacro_map": { + "serial": "{$HW_SERIAL}", + "role/name": "{$DEV_ROLE}", + "url": "{$NB_URL}", + "id": "{$NB_ID}" + }, + "vm_usermacro_map": { + "memory": "{$TOTAL_MEMORY}", + "role/name": "{$DEV_ROLE}", + "url": "{$NB_URL}", + "id": "{$NB_ID}" + }, + "tag_sync": False, + "tag_lower": True, + "tag_name": 'NetBox', + "tag_value": "name", + "device_tag_map": { + "site/name": "site", + "rack/name": "rack", + "platform/name": "target" + }, + "vm_tag_map": { + "site/name": "site", + "cluster/name": "cluster", + "platform/name": "target" } } From a325863aecb99067e16137cc4913976d8604733d Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Sun, 8 Jun 2025 22:13:36 +0200 Subject: [PATCH 72/93] Fixed several config errors, double exception imports, Linter stuff and edited test for new device_inventory_map key --- modules/device.py | 28 ++++++++++++---------------- modules/virtual_machine.py | 13 +++++-------- tests/test_physical_device.py | 4 ++-- 3 files changed, 19 insertions(+), 26 deletions(-) diff --git a/modules/device.py b/modules/device.py index 8bea73d..971a5b3 100644 --- a/modules/device.py +++ b/modules/device.py @@ -124,8 +124,8 @@ class PhysicalDevice: self.nb, self.nb_api_version, logger=self.logger, - nested_sitegroup_flag=traverse_site_groups, - nested_region_flag=traverse_regions, + nested_sitegroup_flag=config['traverse_site_groups'], + nested_region_flag=config['traverse_regions'], nb_groups=nb_site_groups, nb_regions=nb_regions, ) @@ -166,7 +166,7 @@ class PhysicalDevice: return [device_type_cfs[config["template_cf"]]] # Custom field not found, return error e = ( - f"Custom field {template_cf} not " + f"Custom field {config['template_cf']} not " f"found for {self.nb.device_type.manufacturer.name}" f" - {self.nb.device_type.display}." ) @@ -394,7 +394,7 @@ class PhysicalDevice: macros = ZabbixUsermacros( self.nb, self._usermacro_map(), - usermacro_sync, + config['usermacro_sync'], logger=self.logger, host=self.name, ) @@ -411,10 +411,10 @@ class PhysicalDevice: tags = ZabbixTags( self.nb, self._tag_map(), - tag_sync, - tag_lower, - tag_name=tag_name, - tag_value=tag_value, + config['tag_sync'], + config['tag_lower'], + tag_name=config['tag_name'], + tag_value=config['tag_value'], logger=self.logger, host=self.name, ) @@ -771,15 +771,11 @@ class PhysicalDevice: self.updateZabbixHost(inventory=self.inventory) # Check host usermacros - if usermacro_sync: + if config['usermacro_sync']: macros_filtered = [] # Do not re-sync secret usermacros unless sync is set to 'full' - if str(usermacro_sync).lower() != "full": - for m in - - - - (self.usermacros): + if str(config['usermacro_sync']).lower() != "full": + for m in deepcopy(self.usermacros): if m["type"] == str(1): # Remove the value as the api doesn't return it # this will allow us to only update usermacros that don't exist @@ -792,7 +788,7 @@ class PhysicalDevice: self.updateZabbixHost(macros=self.usermacros) # Check host usermacros - if tag_sync: + if config['tag_sync']: if remove_duplicates(host["tags"], sortkey="tag") == self.tags: self.logger.debug(f"Host {self.name}: tags in-sync.") else: diff --git a/modules/virtual_machine.py b/modules/virtual_machine.py index acabd1d..7ee6659 100644 --- a/modules/virtual_machine.py +++ b/modules/virtual_machine.py @@ -4,9 +4,6 @@ from modules.device import PhysicalDevice from modules.exceptions import InterfaceConfigError, SyncInventoryError, TemplateError from modules.hostgroups import Hostgroup from modules.interface import ZabbixInterface - -from modules.exceptions import (TemplateError, InterfaceConfigError, - SyncInventoryError) from modules.config import load_config # Load config config = load_config() @@ -22,15 +19,15 @@ class VirtualMachine(PhysicalDevice): def _inventory_map(self): """use VM inventory maps""" - return vm_inventory_map + return config["vm_inventory_map"] def _usermacro_map(self): """use VM usermacro maps""" - return vm_usermacro_map + return config["vm_usermacro_map"] def _tag_map(self): """use VM tag maps""" - return vm_tag_map + return config["vm_tag_map"] def set_hostgroup(self, hg_format, nb_site_groups, nb_regions): """Set the hostgroup for this device""" @@ -40,8 +37,8 @@ class VirtualMachine(PhysicalDevice): self.nb, self.nb_api_version, logger=self.logger, - nested_sitegroup_flag=traverse_site_groups, - nested_region_flag=traverse_regions, + nested_sitegroup_flag=config["traverse_site_groups"], + nested_region_flag=config["traverse_regions"], nb_groups=nb_site_groups, nb_regions=nb_regions, ) diff --git a/tests/test_physical_device.py b/tests/test_physical_device.py index d0ba43b..1b79ad8 100644 --- a/tests/test_physical_device.py +++ b/tests/test_physical_device.py @@ -43,7 +43,7 @@ class TestPhysicalDevice(unittest.TestCase): "traverse_site_groups": False, "inventory_mode": "disabled", "inventory_sync": False, - "inventory_map": {} + "device_inventory_map": {} }): self.device = PhysicalDevice( self.mock_nb_device, @@ -303,7 +303,7 @@ class TestPhysicalDevice(unittest.TestCase): "device_cf": "zabbix_hostid", "inventory_mode": "manual", "inventory_sync": True, - "inventory_map": { + "device_inventory_map": { "name": "name", "serial": "serialno_a" } From 8df17f208ce8494469bc4a079c83f5cd4e7cc788 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Wed, 11 Jun 2025 20:09:53 +0000 Subject: [PATCH 73/93] Fixed small typo in Readme, Updated zabbix-utils, Added Devcontainer, Fixed logging and class description in usermacros module, fixed Zabbix consistencycheck for Usermacros and added unit tests for usermacros. --- .devcontainer/devcontainer.json | 22 ++++++ README.md | 2 +- modules/device.py | 34 ++++++--- modules/usermacros.py | 10 ++- requirements.txt | 4 +- tests/test_usermacros.py | 125 ++++++++++++++++++++++++++++++++ 6 files changed, 178 insertions(+), 19 deletions(-) create mode 100644 .devcontainer/devcontainer.json create mode 100644 tests/test_usermacros.py diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..99322f6 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,22 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/python +{ + "name": "Python 3", + // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile + "image": "mcr.microsoft.com/devcontainers/python:1-3.12-bullseye", + + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Use 'postCreateCommand' to run commands after the container is created. + "postCreateCommand": "pip3 install --user -r requirements.txt && pip3 install --user pylint pytest" + + // Configure tool-specific properties. + // "customizations": {}, + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "root" +} diff --git a/README.md b/README.md index 62a6673..86bb532 100644 --- a/README.md +++ b/README.md @@ -720,7 +720,7 @@ I would recommend using usermacros for sensitive data such as community strings since the data in NetBox is plain-text. > **_NOTE:_** Not all SNMP data is required for a working configuration. -> [The following parameters are allowed](https://www.zabbix.com/documentation/current/manual/api/reference/hostinterface/object#details_tag "The following parameters are allowed")but +> [The following parameters are allowed](https://www.zabbix.com/documentation/current/manual/api/reference/hostinterface/object#details_tag "The following parameters are allowed") but > are not all required, depending on your environment. diff --git a/modules/device.py b/modules/device.py index 971a5b3..b6903fe 100644 --- a/modules/device.py +++ b/modules/device.py @@ -55,7 +55,7 @@ class PhysicalDevice: self.nb_journals = nb_journal_class self.inventory_mode = -1 self.inventory = {} - self.usermacros = {} + self.usermacros = [] self.tags = {} self.logger = logger if logger else getLogger(__name__) self._setBasics() @@ -400,6 +400,7 @@ class PhysicalDevice: ) if macros.sync is False: self.usermacros = [] + return True self.usermacros = macros.generate() return True @@ -772,22 +773,31 @@ class PhysicalDevice: # Check host usermacros if config['usermacro_sync']: - macros_filtered = [] - # Do not re-sync secret usermacros unless sync is set to 'full' - if str(config['usermacro_sync']).lower() != "full": - for m in deepcopy(self.usermacros): - if m["type"] == str(1): - # Remove the value as the api doesn't return it - # this will allow us to only update usermacros that don't exist - m.pop("value") - macros_filtered.append(m) - if host["macros"] == self.usermacros or host["macros"] == macros_filtered: + # Make a full copy synce we dont want to lose the original value + # of secret type macros from Netbox + netbox_macros = deepcopy(self.usermacros) + # Set the sync bit + full_sync_bit = bool(str(config['usermacro_sync']).lower() == "full") + for macro in netbox_macros: + # If the Macro is a secret and full sync is NOT activated + if macro["type"] == str(1) and not full_sync_bit: + # Remove the value as the Zabbix api does not return the value key + # This is required when you want to do a diff between both lists + macro.pop("value") + # Sort all lists + def filter_with_macros(macro): + return macro["macro"] + host["macros"].sort(key=filter_with_macros) + netbox_macros.sort(key=filter_with_macros) + # Check if both lists are the same + if host["macros"] == netbox_macros: self.logger.debug(f"Host {self.name}: usermacros in-sync.") else: self.logger.warning(f"Host {self.name}: usermacros OUT of sync.") + # Update Zabbix with NetBox usermacros self.updateZabbixHost(macros=self.usermacros) - # Check host usermacros + # Check host tags if config['tag_sync']: if remove_duplicates(host["tags"], sortkey="tag") == self.tags: self.logger.debug(f"Host {self.name}: tags in-sync.") diff --git a/modules/usermacros.py b/modules/usermacros.py index c1d783b..6d396c8 100644 --- a/modules/usermacros.py +++ b/modules/usermacros.py @@ -10,7 +10,7 @@ from modules.tools import field_mapper class ZabbixUsermacros: - """Class that represents a Zabbix interface.""" + """Class that represents Zabbix usermacros.""" def __init__(self, nb, usermacro_map, usermacro_sync, logger=None, host=None): self.nb = nb @@ -57,7 +57,8 @@ class ZabbixUsermacros: macro["macro"] = str(macro_name) if isinstance(macro_properties, dict): if not "value" in macro_properties: - self.logger.warning(f"Usermacro {macro_name} has no value, skipping.") + self.logger.warning(f"Host {self.name}: Usermacro {macro_name} has " + "no value in Netbox, skipping.") return False macro["value"] = macro_properties["value"] @@ -82,11 +83,12 @@ class ZabbixUsermacros: macro["description"] = "" else: - self.logger.warning(f"Usermacro {macro_name} has no value, skipping.") + self.logger.warning(f"Host {self.name}: Usermacro {macro_name} " + "has no value, skipping.") return False else: self.logger.error( - f"Usermacro {macro_name} is not a valid usermacro name, skipping." + f"Host {self.name}: Usermacro {macro_name} is not a valid usermacro name, skipping." ) return False return macro diff --git a/requirements.txt b/requirements.txt index 8da5ce5..295b59f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -pynetbox -zabbix-utils==2.0.1 \ No newline at end of file +pynetbox==7.4.1 +zabbix-utils==2.0.2 diff --git a/tests/test_usermacros.py b/tests/test_usermacros.py new file mode 100644 index 0000000..28305af --- /dev/null +++ b/tests/test_usermacros.py @@ -0,0 +1,125 @@ +import unittest +from unittest.mock import MagicMock, patch +from modules.device import PhysicalDevice +from modules.usermacros import ZabbixUsermacros + +class DummyNB: + def __init__(self, name="dummy", config_context=None, **kwargs): + self.name = name + self.config_context = config_context or {} + for k, v in kwargs.items(): + setattr(self, k, v) + + def __getitem__(self, key): + # Allow dict-style access for test compatibility + if hasattr(self, key): + return getattr(self, key) + if key in self.config_context: + return self.config_context[key] + raise KeyError(key) + +class TestUsermacroSync(unittest.TestCase): + def setUp(self): + self.nb = DummyNB(serial="1234") + self.logger = MagicMock() + self.usermacro_map = {"serial": "{$HW_SERIAL}"} + + @patch("modules.device.config", {"usermacro_sync": False}) + def test_usermacro_sync_false(self): + device = PhysicalDevice.__new__(PhysicalDevice) + device.nb = self.nb + device.logger = self.logger + device.name = "dummy" + device._usermacro_map = MagicMock(return_value=self.usermacro_map) + # call set_usermacros + result = device.set_usermacros() + self.assertEqual(device.usermacros, []) + self.assertTrue(result is True or result is None) + + @patch("modules.device.config", {"usermacro_sync": True}) + def test_usermacro_sync_true(self): + device = PhysicalDevice.__new__(PhysicalDevice) + device.nb = self.nb + device.logger = self.logger + device.name = "dummy" + device._usermacro_map = MagicMock(return_value=self.usermacro_map) + result = device.set_usermacros() + self.assertIsInstance(device.usermacros, list) + self.assertGreater(len(device.usermacros), 0) + + @patch("modules.device.config", {"usermacro_sync": "full"}) + def test_usermacro_sync_full(self): + device = PhysicalDevice.__new__(PhysicalDevice) + device.nb = self.nb + device.logger = self.logger + device.name = "dummy" + device._usermacro_map = MagicMock(return_value=self.usermacro_map) + result = device.set_usermacros() + self.assertIsInstance(device.usermacros, list) + self.assertGreater(len(device.usermacros), 0) + +class TestZabbixUsermacros(unittest.TestCase): + def setUp(self): + self.nb = DummyNB() + self.logger = MagicMock() + + def test_validate_macro_valid(self): + macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger) + self.assertTrue(macros.validate_macro("{$TEST_MACRO}")) + self.assertTrue(macros.validate_macro("{$A1_2.3}")) + self.assertTrue(macros.validate_macro("{$FOO:bar}")) + + def test_validate_macro_invalid(self): + macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger) + self.assertFalse(macros.validate_macro("$TEST_MACRO")) + self.assertFalse(macros.validate_macro("{TEST_MACRO}")) + self.assertFalse(macros.validate_macro("{$test}")) # lower-case not allowed + self.assertFalse(macros.validate_macro("")) + + def test_render_macro_dict(self): + macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger) + macro = macros.render_macro("{$FOO}", {"value": "bar", "type": "secret", "description": "desc"}) + self.assertEqual(macro["macro"], "{$FOO}") + self.assertEqual(macro["value"], "bar") + self.assertEqual(macro["type"], "1") + self.assertEqual(macro["description"], "desc") + + def test_render_macro_dict_missing_value(self): + macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger) + result = macros.render_macro("{$FOO}", {"type": "text"}) + self.assertFalse(result) + self.logger.warning.assert_called() + + def test_render_macro_str(self): + macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger) + macro = macros.render_macro("{$FOO}", "bar") + self.assertEqual(macro["macro"], "{$FOO}") + self.assertEqual(macro["value"], "bar") + self.assertEqual(macro["type"], "0") + self.assertEqual(macro["description"], "") + + def test_render_macro_invalid_name(self): + macros = ZabbixUsermacros(self.nb, {}, False, logger=self.logger) + result = macros.render_macro("FOO", "bar") + self.assertFalse(result) + self.logger.error.assert_called() + + def test_generate_from_map(self): + nb = DummyNB(memory="bar", role="baz") + usermacro_map = {"memory": "{$FOO}", "role": "{$BAR}"} + macros = ZabbixUsermacros(nb, usermacro_map, True, logger=self.logger) + result = macros.generate() + self.assertEqual(len(result), 2) + self.assertEqual(result[0]["macro"], "{$FOO}") + self.assertEqual(result[1]["macro"], "{$BAR}") + + def test_generate_from_config_context(self): + config_context = {"zabbix": {"usermacros": {"{$FOO}": {"value": "bar"}}}} + nb = DummyNB(config_context=config_context) + macros = ZabbixUsermacros(nb, {}, True, logger=self.logger) + result = macros.generate() + self.assertEqual(len(result), 1) + self.assertEqual(result[0]["macro"], "{$FOO}") + +if __name__ == "__main__": + unittest.main() From b705e1341fed27f4dae08e4f17839b1e313d2b2b Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Wed, 11 Jun 2025 20:15:02 +0000 Subject: [PATCH 74/93] Fixed publish image workflow --- .github/workflows/publish-image.yml | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/.github/workflows/publish-image.yml b/.github/workflows/publish-image.yml index 4531303..069fc69 100644 --- a/.github/workflows/publish-image.yml +++ b/.github/workflows/publish-image.yml @@ -1,17 +1,13 @@ +--- name: Build and Push Docker Image on: push: branches: - main -permissions: - contents: read - packages: write - -on: - release: - types: [published] pull_request: + branches: + - main types: [opened, synchronize] jobs: From 7b8827fa944001769766b2545a377b09cc25bf40 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Thu, 12 Jun 2025 10:56:30 +0200 Subject: [PATCH 75/93] Added Zabbix logout --- netbox_zabbix_sync.py | 1 + 1 file changed, 1 insertion(+) diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index ec14b4e..448e3cd 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -283,6 +283,7 @@ def main(arguments): device.createInZabbix(zabbix_groups, zabbix_templates, zabbix_proxy_list) except SyncError: pass + zabbix.logout() if __name__ == "__main__": From c8fda04ce83795824e89735f0d06feba4d1f5626 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Thu, 12 Jun 2025 11:08:21 +0000 Subject: [PATCH 76/93] Fixed config bug and #111 --- modules/config.py | 5 +---- modules/device.py | 4 ++-- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/modules/config.py b/modules/config.py index 586227d..4a44aab 100644 --- a/modules/config.py +++ b/modules/config.py @@ -118,7 +118,4 @@ def load_config_file(config_default, config_file="config.py"): if hasattr(config_module, key): dconf[key] = getattr(config_module, key) return dconf - logger.warning( - "Config file %s not found. Using default config " - "and environment variables.", config_file) - return None + return config_default \ No newline at end of file diff --git a/modules/device.py b/modules/device.py index b6903fe..b8bda3f 100644 --- a/modules/device.py +++ b/modules/device.py @@ -8,10 +8,10 @@ from logging import getLogger from re import search from zabbix_utils import APIRequestError +from pynetbox import RequestError as NetboxRequestError from modules.exceptions import ( InterfaceConfigError, - JournalError, SyncExternalError, SyncInventoryError, TemplateError, @@ -896,7 +896,7 @@ class PhysicalDevice: self.nb_journals.create(journal) self.logger.debug(f"Host {self.name}: Created journal entry in NetBox") return True - except JournalError(e) as e: + except NetboxRequestError as e: self.logger.warning( "Unable to create journal entry for " f"{self.name}: NB returned {e}" From 3115eaa04ee83e2f7b429340dbe018c9d362a2bd Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Thu, 12 Jun 2025 11:14:15 +0000 Subject: [PATCH 77/93] Fixed linter and test for config file --- modules/config.py | 2 +- tests/test_configuration_parsing.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/modules/config.py b/modules/config.py index 4a44aab..9f97c83 100644 --- a/modules/config.py +++ b/modules/config.py @@ -118,4 +118,4 @@ def load_config_file(config_default, config_file="config.py"): if hasattr(config_module, key): dconf[key] = getattr(config_module, key) return dconf - return config_default \ No newline at end of file + return config_default diff --git a/tests/test_configuration_parsing.py b/tests/test_configuration_parsing.py index 23438b4..641b508 100644 --- a/tests/test_configuration_parsing.py +++ b/tests/test_configuration_parsing.py @@ -93,11 +93,10 @@ def test_load_config_file_function(): def test_load_config_file_not_found(): """Test load_config_file when the config file doesn't exist""" - # Instead of trying to assert on the logger call, we'll just check the return value - # and verify the function works as expected in this case with patch('pathlib.Path.exists', return_value=False): result = load_config_file(DEFAULT_CONFIG.copy()) - assert result is None + # Should return a dict equal to DEFAULT_CONFIG, not a new object + assert result == DEFAULT_CONFIG def test_load_env_variable_function(): From baf23403a0b97622917e9a1a7e372d265d480da3 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Thu, 12 Jun 2025 11:20:46 +0000 Subject: [PATCH 78/93] Updated documentation after fixing #111 --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 86bb532..25f2407 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,6 @@ # NetBox to Zabbix synchronization -A script to create, update and delete Zabbix hosts using NetBox device objects. -Currently compatible with Zabbix 7.0. Zabbix 7.2 is unfortunately not supported and will result in the script failing. +A script to create, update and delete Zabbix hosts using NetBox device objects. Tested and compabible with all [currently Supported Zabbix Releases](https://www.zabbix.com/life_cycle_and_release_policy). ## Installation via Docker From c58e5aba1e95413e8b33ef8c1b37a287bc3e19b8 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Thu, 12 Jun 2025 11:51:15 +0000 Subject: [PATCH 79/93] Fixed minor documentation mistake --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 25f2407..641b302 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # NetBox to Zabbix synchronization -A script to create, update and delete Zabbix hosts using NetBox device objects. Tested and compabible with all [currently Supported Zabbix Releases](https://www.zabbix.com/life_cycle_and_release_policy). +A script to create, update and delete Zabbix hosts using NetBox device objects. Tested and compabible with all [currently supported Zabbix releases](https://www.zabbix.com/life_cycle_and_release_policy). ## Installation via Docker From 76723d28238e2826669b8b062cf1baf3a4f37aad Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Thu, 12 Jun 2025 13:51:59 +0200 Subject: [PATCH 80/93] Updated Git workflow. Linter to Python 3.13, Image publisher will only execute when a commit is performend on main. --- .github/workflows/publish-image.yml | 4 ---- .github/workflows/quality.yml | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/.github/workflows/publish-image.yml b/.github/workflows/publish-image.yml index 069fc69..c18dc39 100644 --- a/.github/workflows/publish-image.yml +++ b/.github/workflows/publish-image.yml @@ -5,10 +5,6 @@ on: push: branches: - main - pull_request: - branches: - - main - types: [opened, synchronize] jobs: test_quality: diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml index 4421765..af7cc25 100644 --- a/.github/workflows/quality.yml +++ b/.github/workflows/quality.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.11","3.12"] + python-version: ["3.12","3.13"] steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} From 8ce2cab86fad994b687a551b7407263f19d0cdcb Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Thu, 12 Jun 2025 18:35:56 +0000 Subject: [PATCH 81/93] Fixed bug where sync.log was created in the modules directory --- modules/logging.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/logging.py b/modules/logging.py index c36c2c1..e96e6e9 100644 --- a/modules/logging.py +++ b/modules/logging.py @@ -21,9 +21,10 @@ def setup_logger(): """ # Set logging lgout = logging.StreamHandler() - lgfile = logging.FileHandler( - path.join(path.dirname(path.realpath(__file__)), "sync.log") - ) + # Logfile in the project root + project_root = path.dirname(path.dirname(path.realpath(__file__))) + logfile_path = path.join(project_root, "sync.log") + lgfile = logging.FileHandler(logfile_path) logging.basicConfig( format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", From 7394bf8d1d6338905df3514f498f1522d8e01618 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Thu, 12 Jun 2025 19:24:04 +0000 Subject: [PATCH 82/93] Fixed a bunch of typos (how did this happen!?!) --- README.md | 50 ++++++++++++++++++++++---------------------------- 1 file changed, 22 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index 641b302..64e52ee 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # NetBox to Zabbix synchronization -A script to create, update and delete Zabbix hosts using NetBox device objects. Tested and compabible with all [currently supported Zabbix releases](https://www.zabbix.com/life_cycle_and_release_policy). +A script to create, update and delete Zabbix hosts using NetBox device objects. Tested and compatible with all [currently supported Zabbix releases](https://www.zabbix.com/life_cycle_and_release_policy). ## Installation via Docker @@ -23,10 +23,10 @@ docker run -d -t -i -e ZABBIX_HOST='https://zabbix.local' \ --name netbox-zabbix-sync ghcr.io/thenetworkguy/netbox-zabbix-sync:main ``` -This should run a one-time sync, you can check the sync with +This should run a one-time sync. You can check the sync with `docker logs netbox-zabbix-sync`. -The image uses the default `config.py` for it's configuration, you can use a +The image uses the default `config.py` for its configuration, you can use a volume mount in the docker run command to override with your own config file if needed (see [config file](#config-file)): @@ -118,8 +118,8 @@ the template information then the zabbix_template field is not required): You can make the `zabbix_hostid` field hidden or read-only to prevent human intervention. -This is optional and there is a use case for leaving it read-write in the UI to -manually change the ID. For example to re-run a sync. +This is optional, but there may be cases where you want to leave it +read-write in the UI. For example to manually change or clear the ID and re-run a sync. ## Virtual Machine (VM) Syncing @@ -146,7 +146,7 @@ creation for devices in a new category. I would recommend setting this variable to `True` since leaving it on `False` results in a lot of manual work. The format can be set with the `hostgroup_format` variable for devices and -`vm_hostgroup_format` for devices. +`vm_hostgroup_format` for virtual machines. Any nested parent hostgroups will also be created automatically. For instance the region `Berlin` with parent region `Germany` will create the hostgroup @@ -189,10 +189,10 @@ used: | cluster | VM cluster name | | cluster_type | VM cluster type | -You can specify the value seperated by a "/" like so: +You can specify the value separated by a "/" like so: ```python -hostgroup_format = "tenant/site/dev_location/role" +hostgroup_format = "tenant/site/location/role" ``` **Group traversal** @@ -239,7 +239,7 @@ have a relationship with a tenant. - Site: HQ-AMS ```python -hostgroup_format = "site/tenant/device_role" +hostgroup_format = "site/tenant/role" ``` When running the script like above, the following hostgroup (HG) will be @@ -312,7 +312,7 @@ device_inventory_map = {"custom_fields/mycustomfield/name": "alias"} vm_inventory_map = {"custom_fields/mycustomfield/name": "alias"} ``` -See `config.py.example` for an extensive example map. Any Zabix Inventory fields +See `config.py.example` for an extensive example map. Any Zabbix Inventory fields that are not included in the map will not be touched by the script, so you can safely add manual values or use items to automatically add values to other fields. @@ -367,7 +367,7 @@ SLA calculations and event correlation. Tags can be synced from the following sources: 1. NetBox device/vm tags -2. NetBox config ontext +2. NetBox config context 3. NetBox fields Syncing tags will override any tags that were set manually on the host, @@ -385,10 +385,10 @@ tag_lower = True #### Device tags As NetBox doesn't follow the tag/value pattern for tags, we will need a tag -name set to register the netwbox tags. +name set to register the netbox tags. By default the tag name is "NetBox", but you can change this to whatever you want. -The value for the tag can be choosen from 'name', 'display' or 'slug'. +The value for the tag can be set to 'name', 'display', or 'slug', which refers to the property of the NetBox tag object that will be used as the value in Zabbix. ```python tag_name = 'NetBox' @@ -503,8 +503,8 @@ Examples: ``` Please be aware that secret usermacros are only synced _once_ by default. -This is the default behaviour because Zabbix API won't return the value of -secrets so the script cannot compare the values with the ones set in NetBox. +This is the default behavior because Zabbix API won't return the value of +secrets so the script cannot compare the values with those set in NetBox. If you update a secret usermacro value, just remove the value from the host in Zabbix and the new value will be synced during the next run. @@ -601,7 +601,7 @@ You can set the proxy for a device using the 'proxy' key in config context. } ``` -It is now posible to specify proxy groups with the introduction of Proxy groups +It is now possible to specify proxy groups with the introduction of Proxy groups in Zabbix 7. Specifying a group in the config context on older Zabbix releases will have no impact and the script will ignore the statement. @@ -614,9 +614,9 @@ will have no impact and the script will ignore the statement. ``` The script will prefer groups when specifying both a proxy and group. This is -done with the assumption that groups are more resiliant and HA ready, making it +done with the assumption that groups are more resilient and HA ready, making it a more logical choice to use for proxy linkage. This also makes migrating from a -proxy to proxy group easier since the group take priority over the invidivual +proxy to proxy group easier since the group take priority over the individual proxy. ```json @@ -630,13 +630,7 @@ proxy. In the example above the host will use the group on Zabbix 7. On Zabbix 6 and below the host will use the proxy. Zabbix 7 will use the proxy value when -ommiting the proxy_group value. - -Because of the possible amount of destruction when setting up NetBox but -forgetting the proxy command, the sync works a bit different. By default -everything is synced except in a situation where the Zabbix host has a proxy -configured but nothing is configured in NetBox. To force deletion and a full -sync, set the `full_proxy_sync` variable in the config file. +omitting the proxy_group value. ### Set interface parameters within NetBox @@ -653,7 +647,7 @@ Due to Zabbix limitations of changing interface type with a linked template, changing the interface type from within NetBox is not supported and the script will generate an error. -For example when changing a SNMP interface to an Agent interface: +For example, when changing a SNMP interface to an Agent interface: ``` NetBox-Zabbix-sync - WARNING - Device: Interface OUT of sync. @@ -661,11 +655,11 @@ NetBox-Zabbix-sync - ERROR - Device: changing interface type to 1 is not support ``` To configure the interface parameters you'll need to use custom context. Custom -context was used to make this script as customizable as posible for each +context was used to make this script as customizable as possible for each environment. For example, you could: - Set the custom context directly on a device -- Set the custom context on a label, which you would add to a device (for +- Set the custom context on a tag, which you would add to a device (for instance, SNMPv3) - Set the custom context on a device role - Set the custom context on a site or region From 7969de50bfadfd07be93bca0cf79d458fa72462f Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Thu, 12 Jun 2025 20:24:29 +0000 Subject: [PATCH 83/93] Adds coverage report to gitignore. Adds extra coverage test to workflow. --- .github/workflows/run_tests.yml | 9 +++++++++ .gitignore | 3 ++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 9093c96..604518d 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -19,7 +19,16 @@ jobs: python -m pip install --upgrade pip pip install pytest pytest-mock pip install -r requirements.txt + - name: Install coverage + run: pip install coverage pytest-cov - name: Testing the code with PyTest run: | cp config.py.example config.py pytest tests + - name: Run tests with coverage + run: | + cp config.py.example config.py + coverage run -m pytest tests + - name: Check coverage percentage + run: | + coverage report --fail-under=70 diff --git a/.gitignore b/.gitignore index 0693f26..c515fe3 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,5 @@ Pipfile.lock __pycache__/ *.py[cod] .vscode -.flake \ No newline at end of file +.flake +.coverage \ No newline at end of file From 148ce47c105f031e30284373c8d6f3d4a841710c Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Thu, 12 Jun 2025 20:25:54 +0000 Subject: [PATCH 84/93] Set minimum coverage to 60 --- .github/workflows/run_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 604518d..db4456e 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -31,4 +31,4 @@ jobs: coverage run -m pytest tests - name: Check coverage percentage run: | - coverage report --fail-under=70 + coverage report --fail-under=60 From bd4d21c5d8205e4753249370003528ba8572075c Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Fri, 13 Jun 2025 10:24:26 +0200 Subject: [PATCH 85/93] Hostgroup CF checks for VMs --- modules/device.py | 2 +- modules/tools.py | 11 +++++++++-- netbox_zabbix_sync.py | 14 ++++++++------ 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/modules/device.py b/modules/device.py index d5bb635..0293778 100644 --- a/modules/device.py +++ b/modules/device.py @@ -724,7 +724,7 @@ class PhysicalDevice: # Check if hostgroups match if (sorted(host[group_dictname], key=itemgetter('groupid')) == sorted(self.group_ids, key=itemgetter('groupid'))): - self.logger.debug(f"Host {self.name}: hostgroups in-sync.") + self.logger.debug(f"Host {self.name}: hostgroups in-sync.") else: self.logger.warning(f"Host {self.name}: hostgroups OUT of sync.") self.updateZabbixHost(groups=self.group_ids) diff --git a/modules/tools.py b/modules/tools.py index e49cf13..9cdb56f 100644 --- a/modules/tools.py +++ b/modules/tools.py @@ -100,7 +100,7 @@ def remove_duplicates(input_list, sortkey=None): output_list.sort(key=lambda x: x[sortkey]) return output_list -def verify_hg_format(hg_format, hg_type="dev", logger=None): +def verify_hg_format(hg_format, device_cfs=[], vm_cfs=[], hg_type="dev", logger=None): """ Verifies hostgroup field format """ @@ -126,7 +126,13 @@ def verify_hg_format(hg_format, hg_type="dev", logger=None): "cluster", "device", "platform"] + ,"cfs": {"dev": [], "vm": []} } + for cf in device_cfs: + allowed_objects['cfs']['dev'].append(cf.name) + for cf in vm_cfs: + allowed_objects['cfs']['vm'].append(cf.name) + logger.debug(allowed_objects) hg_objects = [] if isinstance(hg_format,list): for f in hg_format: @@ -135,7 +141,8 @@ def verify_hg_format(hg_format, hg_type="dev", logger=None): hg_objects = hg_format.split("/") hg_objects = sorted(set(hg_objects)) for hg_object in hg_objects: - if hg_object not in allowed_objects[hg_type]: + if (hg_object not in allowed_objects[hg_type] and + hg_object not in allowed_objects['cfs'][hg_type]): e = ( f"Hostgroup item {hg_object} is not valid. Make sure you" " use valid items and separate them with '/'." diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 9cd0eda..5418b50 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -85,9 +85,13 @@ def main(arguments): # Set NetBox API netbox = api(netbox_host, token=netbox_token, threading=True) # Create API call to get all custom fields which are on the device objects + device_cfs = [] try: device_cfs = list( - netbox.extras.custom_fields.filter(type="text", content_type_id=23) + netbox.extras.custom_fields.filter(type="text", content_types="dcim.device") + ) + vm_cfs = list( + netbox.extras.custom_fields.filter(type="text", content_types="virtualization.virtualmachine") ) except RequestsConnectionError: logger.error( @@ -98,12 +102,10 @@ def main(arguments): except NBRequestError as e: logger.error(f"NetBox error: {e}") sys.exit(1) - for cf in device_cfs: - allowed_objects.append(cf.name) + logger.debug(device_cfs) # Check if the provided Hostgroup layout is valid - verify_hg_format(hostgroup_format, hg_type="dev", logger=logger) - verify_hg_format(vm_hostgroup_format, hg_type="vm", logger=logger) - + verify_hg_format(hostgroup_format, device_cfs=device_cfs, hg_type="dev", logger=logger) + verify_hg_format(vm_hostgroup_format, vm_cfs=vm_cfs, hg_type="vm", logger=logger) # Set Zabbix API try: ssl_ctx = ssl.create_default_context() From bfadd88542ea0b285e8c216a9b1bdba4d140c9fa Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Fri, 13 Jun 2025 10:49:40 +0200 Subject: [PATCH 86/93] perform hostgroup creation before consistency check --- netbox_zabbix_sync.py | 57 ++++++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 28 deletions(-) diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index 5418b50..3e50aff 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -85,14 +85,10 @@ def main(arguments): # Set NetBox API netbox = api(netbox_host, token=netbox_token, threading=True) # Create API call to get all custom fields which are on the device objects - device_cfs = [] try: - device_cfs = list( - netbox.extras.custom_fields.filter(type="text", content_types="dcim.device") - ) - vm_cfs = list( - netbox.extras.custom_fields.filter(type="text", content_types="virtualization.virtualmachine") - ) + # Get NetBox version + nb_version = netbox.version + logger.debug(f"NetBox version is {nb_version}.") except RequestsConnectionError: logger.error( f"Unable to connect to NetBox with URL {netbox_host}." @@ -102,10 +98,18 @@ def main(arguments): except NBRequestError as e: logger.error(f"NetBox error: {e}") sys.exit(1) - logger.debug(device_cfs) # Check if the provided Hostgroup layout is valid + device_cfs = [] + vm_cfs = [] + device_cfs = list( + netbox.extras.custom_fields.filter(type="text", content_types="dcim.device") + ) verify_hg_format(hostgroup_format, device_cfs=device_cfs, hg_type="dev", logger=logger) - verify_hg_format(vm_hostgroup_format, vm_cfs=vm_cfs, hg_type="vm", logger=logger) + if sync_vms: + vm_cfs = list( + netbox.extras.custom_fields.filter(type="text", content_types="virtualization.virtualmachine") + ) + verify_hg_format(vm_hostgroup_format, vm_cfs=vm_cfs, hg_type="vm", logger=logger) # Set Zabbix API try: ssl_ctx = ssl.create_default_context() @@ -152,9 +156,6 @@ def main(arguments): # Prepare list of all proxy and proxy_groups zabbix_proxy_list = proxy_prepper(zabbix_proxies, zabbix_proxygroups) - # Get NetBox API version - nb_version = netbox.version - # Go through all NetBox devices for nb_vm in netbox_vms: try: @@ -191,6 +192,14 @@ def main(arguments): # Check if the VM is in the disabled state if vm.status in zabbix_device_disable: vm.zabbix_state = 1 + # Add hostgroup if config is set + if create_hostgroups: + # Create new hostgroup. Potentially multiple groups if nested + hostgroups = vm.createZabbixHostgroup(zabbix_groups) + # go through all newly created hostgroups + for group in hostgroups: + # Add new hostgroups to zabbix group list + zabbix_groups.append(group) # Check if VM is already in Zabbix if vm.zabbix_id: vm.ConsistencyCheck( @@ -201,14 +210,6 @@ def main(arguments): create_hostgroups, ) continue - # Add hostgroup if config is set - if create_hostgroups: - # Create new hostgroup. Potentially multiple groups if nested - hostgroups = vm.createZabbixHostgroup(zabbix_groups) - # go through all newly created hostgroups - for group in hostgroups: - # Add new hostgroups to zabbix group list - zabbix_groups.append(group) # Add VM to Zabbix vm.createInZabbix(zabbix_groups, zabbix_templates, zabbix_proxy_list) except SyncError: @@ -268,6 +269,14 @@ def main(arguments): # Check if the device is in the disabled state if device.status in zabbix_device_disable: device.zabbix_state = 1 + # Add hostgroup is config is set + if create_hostgroups: + # Create new hostgroup. Potentially multiple groups if nested + hostgroups = device.createZabbixHostgroup(zabbix_groups) + # go through all newly created hostgroups + for group in hostgroups: + # Add new hostgroups to zabbix group list + zabbix_groups.append(group) # Check if device is already in Zabbix if device.zabbix_id: device.ConsistencyCheck( @@ -278,14 +287,6 @@ def main(arguments): create_hostgroups, ) continue - # Add hostgroup is config is set - if create_hostgroups: - # Create new hostgroup. Potentially multiple groups if nested - hostgroups = device.createZabbixHostgroup(zabbix_groups) - # go through all newly created hostgroups - for group in hostgroups: - # Add new hostgroups to zabbix group list - zabbix_groups.append(group) # Add device to Zabbix device.createInZabbix(zabbix_groups, zabbix_templates, zabbix_proxy_list) except SyncError: From b62e8203b698697a9b3ea955abaab571abb9330b Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Fri, 13 Jun 2025 15:47:31 +0200 Subject: [PATCH 87/93] removed debug line --- modules/tools.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/tools.py b/modules/tools.py index 9cdb56f..5a7d8d3 100644 --- a/modules/tools.py +++ b/modules/tools.py @@ -132,7 +132,6 @@ def verify_hg_format(hg_format, device_cfs=[], vm_cfs=[], hg_type="dev", logger= allowed_objects['cfs']['dev'].append(cf.name) for cf in vm_cfs: allowed_objects['cfs']['vm'].append(cf.name) - logger.debug(allowed_objects) hg_objects = [] if isinstance(hg_format,list): for f in hg_format: From a7a79ea81ea55b01044673b9a53305f6340fa8a2 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Fri, 13 Jun 2025 15:56:21 +0200 Subject: [PATCH 88/93] updated README for multiple hostgoups --- README.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 0bd6bfc..a938572 100644 --- a/README.md +++ b/README.md @@ -175,13 +175,14 @@ used: | ------------ | ------------------------ | | location | The device location name | | manufacturer | Device manufacturer name | +| rack | Rack | **Only for VMs** | name | description | | ------------ | --------------- | | cluster | VM cluster name | -| cluster_type | VM cluster type | +| device | parent device | You can specify the value seperated by a "/" like so: @@ -189,6 +190,13 @@ You can specify the value seperated by a "/" like so: hostgroup_format = "tenant/site/dev_location/role" ``` +You can also provice a list of groups like so: + +```python +hostgroup_format = ["region/site_group/site", "role", "tenant_group/tenant"] +``` + + **Group traversal** The default behaviour for `region` is to only use the directly assigned region From 8fe7e5763bf16e207050aa18da5ccfcac9854922 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Sat, 14 Jun 2025 20:15:05 +0000 Subject: [PATCH 89/93] Added sanatizer function for log output. --- modules/device.py | 6 ++--- modules/tools.py | 36 ++++++++++++++++++++++++++ tests/test_tools.py | 62 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 101 insertions(+), 3 deletions(-) create mode 100644 tests/test_tools.py diff --git a/modules/device.py b/modules/device.py index b8bda3f..95f9d4f 100644 --- a/modules/device.py +++ b/modules/device.py @@ -19,7 +19,7 @@ from modules.exceptions import ( from modules.hostgroups import Hostgroup from modules.interface import ZabbixInterface from modules.tags import ZabbixTags -from modules.tools import field_mapper, remove_duplicates +from modules.tools import field_mapper, remove_duplicates, sanatize_log_output from modules.usermacros import ZabbixUsermacros from modules.config import load_config @@ -594,7 +594,7 @@ class PhysicalDevice: ) self.logger.error(e) raise SyncExternalError(e) from None - self.logger.info(f"Updated host {self.name} with data {kwargs}.") + self.logger.info(f"Host {self.name}: updated with data {sanatize_log_output(kwargs)}.") self.create_journal_entry("info", "Updated host in Zabbix with latest NB data.") def ConsistencyCheck( @@ -854,7 +854,7 @@ class PhysicalDevice: try: # API call to Zabbix self.zabbix.hostinterface.update(updates) - e = f"Host {self.name}: solved interface conflict." + e = f"Host {self.name}: updated interface with data {sanatize_log_output(updates)}." self.logger.info(e) self.create_journal_entry("info", e) except APIRequestError as e: diff --git a/modules/tools.py b/modules/tools.py index 791025d..1b641f2 100644 --- a/modules/tools.py +++ b/modules/tools.py @@ -99,3 +99,39 @@ def remove_duplicates(input_list, sortkey=None): if sortkey and isinstance(sortkey, str): output_list.sort(key=lambda x: x[sortkey]) return output_list + +def sanatize_log_output(data): + """ + Used for the update function to Zabbix which + shows the data that its using to update the host. + Removes and sensitive data from the input. + """ + if not isinstance(data, dict): + return data + sanitized_data = data.copy() + # Check if there are any sensitive macros defined in the data + if "macros" in data: + for macro in sanitized_data["macros"]: + # Check if macro is secret type + if not macro["type"] == str(1): + continue + macro["value"] = "********" + # Check for interface data + if "interfaceid" in data: + # Interface tID is a value which is most likely not helpful + # in logging output or for roubleshooting. + del sanitized_data["interfaceid"] + # InterfaceID also hints that this is a interface update. + # A check is required if there are no macro's used for SNMP security parameters. + if not "details" in data: + return sanitized_data + for key, detail in sanitized_data["details"].items(): + # If the detail is a secret, we don't want to log it. + if key in ("authpassphrase", "privpassphrase", "securityname", "community"): + # Check if a macro is used. + # If so then logging the output is not a security issue. + if detail.startswith("{$") and detail.endswith("}"): + continue + # A macro is not used, so we sanitize the value. + sanitized_data["details"][key] = "********" + return sanitized_data diff --git a/tests/test_tools.py b/tests/test_tools.py new file mode 100644 index 0000000..3e6ae24 --- /dev/null +++ b/tests/test_tools.py @@ -0,0 +1,62 @@ +from modules.tools import sanatize_log_output + +def test_sanatize_log_output_secrets(): + data = { + "macros": [ + {"macro": "{$SECRET}", "type": "1", "value": "supersecret"}, + {"macro": "{$PLAIN}", "type": "0", "value": "notsecret"}, + ] + } + sanitized = sanatize_log_output(data) + assert sanitized["macros"][0]["value"] == "********" + assert sanitized["macros"][1]["value"] == "notsecret" + +def test_sanatize_log_output_interface_secrets(): + data = { + "interfaceid": 123, + "details": { + "authpassphrase": "supersecret", + "privpassphrase": "anothersecret", + "securityname": "sensitiveuser", + "community": "public", + "other": "normalvalue" + } + } + sanitized = sanatize_log_output(data) + # Sensitive fields should be sanitized + assert sanitized["details"]["authpassphrase"] == "********" + assert sanitized["details"]["privpassphrase"] == "********" + assert sanitized["details"]["securityname"] == "********" + # Non-sensitive fields should remain + assert sanitized["details"]["community"] == "********" + assert sanitized["details"]["other"] == "normalvalue" + # interfaceid should be removed + assert "interfaceid" not in sanitized + +def test_sanatize_log_output_interface_macros(): + data = { + "interfaceid": 123, + "details": { + "authpassphrase": "{$SECRET_MACRO}", + "privpassphrase": "{$SECRET_MACRO}", + "securityname": "{$USER_MACRO}", + "community": "{$SNNMP_COMMUNITY}", + } + } + sanitized = sanatize_log_output(data) + # Macro values should not be sanitized + assert sanitized["details"]["authpassphrase"] == "{$SECRET_MACRO}" + assert sanitized["details"]["privpassphrase"] == "{$SECRET_MACRO}" + assert sanitized["details"]["securityname"] == "{$USER_MACRO}" + assert sanitized["details"]["community"] == "{$SNNMP_COMMUNITY}" + assert "interfaceid" not in sanitized + +def test_sanatize_log_output_plain_data(): + data = {"foo": "bar", "baz": 123} + sanitized = sanatize_log_output(data) + assert sanitized == data + +def test_sanatize_log_output_non_dict(): + data = [1, 2, 3] + sanitized = sanatize_log_output(data) + assert sanitized == data From ee6d13bfdf7899b3550e6d173e4775e9a56e6f2d Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Sat, 14 Jun 2025 20:17:57 +0000 Subject: [PATCH 90/93] Fixed line too long and updated readme --- README.md | 4 ++-- modules/device.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 64e52ee..65cdf02 100644 --- a/README.md +++ b/README.md @@ -518,8 +518,8 @@ usermacro_sync = "full" This will force a full usermacro sync on every run on hosts that have secret usermacros set. That way, you will know for sure the secret values are always up to date. -Keep in mind that NetBox (and the log output of this script) will show your secrets -in plain text. If true secrecy is required, consider switching to +Keep in mind that NetBox will show your secrets in plain text. +If true secrecy is required, consider switching to [vault](https://www.zabbix.com/documentation/current/en/manual/config/macros/secret_macros#vault-secret) usermacros. diff --git a/modules/device.py b/modules/device.py index 95f9d4f..fd7ff59 100644 --- a/modules/device.py +++ b/modules/device.py @@ -854,7 +854,8 @@ class PhysicalDevice: try: # API call to Zabbix self.zabbix.hostinterface.update(updates) - e = f"Host {self.name}: updated interface with data {sanatize_log_output(updates)}." + e = (f"Host {self.name}: updated interface " + f"with data {sanatize_log_output(updates)}.") self.logger.info(e) self.create_journal_entry("info", e) except APIRequestError as e: From 5e390396ba5e2b387380ce65289e90ee119a54c2 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Sat, 14 Jun 2025 23:16:07 +0200 Subject: [PATCH 91/93] Fixed small typo --- modules/tools.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/tools.py b/modules/tools.py index 1b641f2..1185c12 100644 --- a/modules/tools.py +++ b/modules/tools.py @@ -118,8 +118,8 @@ def sanatize_log_output(data): macro["value"] = "********" # Check for interface data if "interfaceid" in data: - # Interface tID is a value which is most likely not helpful - # in logging output or for roubleshooting. + # Interface ID is a value which is most likely not helpful + # in logging output or for troubleshooting. del sanitized_data["interfaceid"] # InterfaceID also hints that this is a interface update. # A check is required if there are no macro's used for SNMP security parameters. From ba530ecd58402f1599d2bec4bdc2d939cad8a680 Mon Sep 17 00:00:00 2001 From: Raymond Kuiper Date: Mon, 16 Jun 2025 10:28:17 +0200 Subject: [PATCH 92/93] corrected linting errors --- modules/device.py | 12 +++++++----- modules/tools.py | 12 ++++++++---- netbox_zabbix_sync.py | 14 ++++++++------ 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/modules/device.py b/modules/device.py index f02f35b..e61cede 100644 --- a/modules/device.py +++ b/modules/device.py @@ -320,7 +320,10 @@ class PhysicalDevice: for group in groups: if group["name"] == hg: self.group_ids.append({"groupid": group["groupid"]}) - e = f"Host {self.name}: matched group \"{group['name']}\" (ID:{group['groupid']})" + e = ( + f"Host {self.name}: matched group " + f"\"{group['name']}\" (ID:{group['groupid']})" + ) self.logger.debug(e) if len(self.group_ids) == len(self.hostgroups): return True @@ -500,7 +503,6 @@ class PhysicalDevice: templateids.append({"templateid": template["templateid"]}) # Set interface, group and template configuration interfaces = self.setInterfaceDetails() - groups = self.group_ids # Set Zabbix proxy if defined self.setProxy(proxies) @@ -702,9 +704,9 @@ class PhysicalDevice: if str(self.zabbix.version).startswith(("6", "5")): group_dictname = "groups" # Check if hostgroups match - if (sorted(host[group_dictname], key=itemgetter('groupid')) == - sorted(self.group_ids, key=itemgetter('groupid'))): - self.logger.debug(f"Host {self.name}: hostgroups in-sync.") + if (sorted(host[group_dictname], key=itemgetter('groupid')) == + sorted(self.group_ids, key=itemgetter('groupid'))): + self.logger.debug(f"Host {self.name}: hostgroups in-sync.") else: self.logger.warning(f"Host {self.name}: hostgroups OUT of sync.") self.updateZabbixHost(groups=self.group_ids) diff --git a/modules/tools.py b/modules/tools.py index 3f7bf81..0b600c2 100644 --- a/modules/tools.py +++ b/modules/tools.py @@ -101,10 +101,14 @@ def remove_duplicates(input_list, sortkey=None): return output_list -def verify_hg_format(hg_format, device_cfs=[], vm_cfs=[], hg_type="dev", logger=None): +def verify_hg_format(hg_format, device_cfs=None, vm_cfs=None, hg_type="dev", logger=None): """ Verifies hostgroup field format """ + if not device_cfs: + device_cfs = [] + if not vm_cfs: + vm_cfs = [] allowed_objects = {"dev": ["location", "rack", "role", @@ -130,9 +134,9 @@ def verify_hg_format(hg_format, device_cfs=[], vm_cfs=[], hg_type="dev", logger= ,"cfs": {"dev": [], "vm": []} } for cf in device_cfs: - allowed_objects['cfs']['dev'].append(cf.name) + allowed_objects['cfs']['dev'].append(cf.name) for cf in vm_cfs: - allowed_objects['cfs']['vm'].append(cf.name) + allowed_objects['cfs']['vm'].append(cf.name) hg_objects = [] if isinstance(hg_format,list): for f in hg_format: @@ -185,4 +189,4 @@ def sanatize_log_output(data): continue # A macro is not used, so we sanitize the value. sanitized_data["details"][key] = "********" - return sanitized_data \ No newline at end of file + return sanitized_data diff --git a/netbox_zabbix_sync.py b/netbox_zabbix_sync.py index afab914..d9ff71b 100755 --- a/netbox_zabbix_sync.py +++ b/netbox_zabbix_sync.py @@ -83,12 +83,14 @@ def main(arguments): device_cfs = list( netbox.extras.custom_fields.filter(type="text", content_types="dcim.device") ) - verify_hg_format(hostgroup_format, device_cfs=device_cfs, hg_type="dev", logger=logger) - if sync_vms: + verify_hg_format(config["hostgroup_format"], + device_cfs=device_cfs, hg_type="dev", logger=logger) + if config["sync_vms"]: vm_cfs = list( - netbox.extras.custom_fields.filter(type="text", content_types="virtualization.virtualmachine") + netbox.extras.custom_fields.filter(type="text", + content_types="virtualization.virtualmachine") ) - verify_hg_format(vm_hostgroup_format, vm_cfs=vm_cfs, hg_type="vm", logger=logger) + verify_hg_format(config["vm_hostgroup_format"], vm_cfs=vm_cfs, hg_type="vm", logger=logger) # Set Zabbix API try: ssl_ctx = ssl.create_default_context() @@ -173,7 +175,7 @@ def main(arguments): if vm.status in config["zabbix_device_disable"]: vm.zabbix_state = 1 # Add hostgroup if config is set - if create_hostgroups: + if config["create_hostgroups"]: # Create new hostgroup. Potentially multiple groups if nested hostgroups = vm.createZabbixHostgroup(zabbix_groups) # go through all newly created hostgroups @@ -249,7 +251,7 @@ def main(arguments): if device.status in config["zabbix_device_disable"]: device.zabbix_state = 1 # Add hostgroup is config is set - if create_hostgroups: + if config["create_hostgroups"]: # Create new hostgroup. Potentially multiple groups if nested hostgroups = device.createZabbixHostgroup(zabbix_groups) # go through all newly created hostgroups From a5be9538d9f4b985a082ef01ea0163d7b23a3ca4 Mon Sep 17 00:00:00 2001 From: TheNetworkGuy Date: Mon, 16 Jun 2025 11:15:52 +0200 Subject: [PATCH 93/93] Made the pytest file a bit cleaner and removed a redundant step --- .github/workflows/run_tests.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index db4456e..c434213 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -17,10 +17,8 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install pytest pytest-mock + pip install pytest pytest-mock coverage pytest-cov pip install -r requirements.txt - - name: Install coverage - run: pip install coverage pytest-cov - name: Testing the code with PyTest run: | cp config.py.example config.py