From 9a9660a765f46054660aff83917c1a47c5a46888 Mon Sep 17 00:00:00 2001 From: Jeremy Stretch Date: Mon, 26 Aug 2019 11:59:38 -0400 Subject: [PATCH] Fix errant changelog entries when executing a script without committing --- netbox/extras/middleware.py | 26 +++++++++++++++++++++++++- netbox/extras/scripts.py | 3 +++ netbox/extras/signals.py | 12 ++++++++++++ 3 files changed, 40 insertions(+), 1 deletion(-) diff --git a/netbox/extras/middleware.py b/netbox/extras/middleware.py index b0b5a014d..79d543907 100644 --- a/netbox/extras/middleware.py +++ b/netbox/extras/middleware.py @@ -9,11 +9,12 @@ from django.utils import timezone from django.utils.functional import curry from django_prometheus.models import model_deletes, model_inserts, model_updates -from extras.webhooks import enqueue_webhooks from .constants import ( OBJECTCHANGE_ACTION_CREATE, OBJECTCHANGE_ACTION_DELETE, OBJECTCHANGE_ACTION_UPDATE, ) from .models import ObjectChange +from .signals import purge_changelog +from .webhooks import enqueue_webhooks _thread_locals = threading.local() @@ -30,6 +31,10 @@ def cache_changed_object(instance, **kwargs): def _record_object_deleted(request, instance, **kwargs): + # TODO: Can we cache deletions for later processing like we do for saves? Currently this will trigger an exception + # when trying to serialize ManyToMany relations after the object has been deleted. This should be doable if we alter + # log_change() to return ObjectChanges to be saved rather than saving them directly. + # Record that the object was deleted if hasattr(instance, 'log_change'): instance.log_change(request.user, request.id, OBJECTCHANGE_ACTION_DELETE) @@ -41,6 +46,13 @@ def _record_object_deleted(request, instance, **kwargs): model_deletes.labels(instance._meta.model_name).inc() +def purge_objectchange_cache(sender, **kwargs): + """ + Delete any queued object changes waiting to be written. + """ + _thread_locals.changed_objects = None + + class ObjectChangeMiddleware(object): """ This middleware performs three functions in response to an object being created, updated, or deleted: @@ -74,9 +86,21 @@ class ObjectChangeMiddleware(object): post_save.connect(cache_changed_object, dispatch_uid='record_object_saved') post_delete.connect(record_object_deleted, dispatch_uid='record_object_deleted') + # Provide a hook for purging the change cache + purge_changelog.connect(purge_objectchange_cache) + # Process the request response = self.get_response(request) + # If the change cache has been purged (e.g. due to an exception) abort the logging of all changes resulting from + # this request. + if _thread_locals.changed_objects is None: + + # Delete ObjectChanges representing deletions, since these have already been written + ObjectChange.objects.filter(request_id=request.id).delete() + + return response + # Create records for any cached objects that were created/updated. for obj, action in _thread_locals.changed_objects: diff --git a/netbox/extras/scripts.py b/netbox/extras/scripts.py index 0bad2fe06..9462ee5bd 100644 --- a/netbox/extras/scripts.py +++ b/netbox/extras/scripts.py @@ -18,6 +18,7 @@ from ipam.formfields import IPFormField from utilities.exceptions import AbortTransaction from .constants import LOG_DEFAULT, LOG_FAILURE, LOG_INFO, LOG_SUCCESS, LOG_WARNING from .forms import ScriptForm +from .signals import purge_changelog __all__ = [ @@ -310,6 +311,8 @@ def run_script(script, data, files, commit=True): commit = False finally: if not commit: + # Delete all pending changelog entries + purge_changelog.send(Script) script.log_info( "Database changes have been reverted automatically." ) diff --git a/netbox/extras/signals.py b/netbox/extras/signals.py index aa173b437..1c20ba6dc 100644 --- a/netbox/extras/signals.py +++ b/netbox/extras/signals.py @@ -1,7 +1,12 @@ from cacheops.signals import cache_invalidated, cache_read +from django.dispatch import Signal from prometheus_client import Counter +# +# Caching +# + cacheops_cache_hit = Counter('cacheops_cache_hit', 'Number of cache hits') cacheops_cache_miss = Counter('cacheops_cache_miss', 'Number of cache misses') cacheops_cache_invalidated = Counter('cacheops_cache_invalidated', 'Number of cache invalidations') @@ -20,3 +25,10 @@ def cache_invalidated_collector(sender, obj_dict, **kwargs): cache_read.connect(cache_read_collector) cache_invalidated.connect(cache_invalidated_collector) + + +# +# Change logging +# + +purge_changelog = Signal()