diff --git a/docs/development/models.md b/docs/development/models.md index 19b7be6de..a762409b6 100644 --- a/docs/development/models.md +++ b/docs/development/models.md @@ -18,7 +18,7 @@ Depending on its classification, each NetBox model may support various features | [Custom links](../customization/custom-links.md) | `CustomLinksMixin` | `custom_links` | These models support the assignment of custom links | | [Custom validation](../customization/custom-validation.md) | `CustomValidationMixin` | - | Supports the enforcement of custom validation rules | | [Export templates](../customization/export-templates.md) | `ExportTemplatesMixin` | `export_templates` | Users can create custom export templates for these models | -| [Job results](../features/background-jobs.md) | `JobsMixin` | `jobs` | Users can create custom export templates for these models | +| [Job results](../features/background-jobs.md) | `JobsMixin` | `jobs` | Background jobs can be scheduled for these models | | [Journaling](../features/journaling.md) | `JournalingMixin` | `journaling` | These models support persistent historical commentary | | [Synchronized data](../integrations/synchronized-data.md) | `SyncedDataMixin` | `synced_data` | Certain model data can be automatically synchronized from a remote data source | | [Tagging](../models/extras/tag.md) | `TagsMixin` | `tags` | The models can be tagged with user-defined tags | diff --git a/docs/plugins/development/background-jobs.md b/docs/plugins/development/background-jobs.md new file mode 100644 index 000000000..e642fe585 --- /dev/null +++ b/docs/plugins/development/background-jobs.md @@ -0,0 +1,97 @@ +# Background Jobs + +NetBox plugins can defer certain operations by enqueuing [background jobs](../../features/background-jobs.md), which are executed asynchronously by background workers. This is helpful for decoupling long-running processes from the user-facing request-response cycle. + +For example, your plugin might need to fetch data from a remote system. Depending on the amount of data and the responsiveness of the remote server, this could take a few minutes. Deferring this task to a queued job ensures that it can be completed in the background, without interrupting the user. The data it fetches can be made available once the job has completed. + +## Job Runners + +A background job implements a basic [Job](../../models/core/job.md) executor for all kinds of tasks. It has logic implemented to handle the management of the associated job object, rescheduling of periodic jobs in the given interval and error handling. Adding custom jobs is done by subclassing NetBox's `JobRunner` class. + +::: utilities.jobs.JobRunner + +#### Example + +```python title="jobs.py" +from utilities.jobs import JobRunner + + +class MyTestJob(JobRunner): + class Meta: + name = "My Test Job" + + def run(self, *args, **kwargs): + obj = self.job.object + # your logic goes here +``` + +You can schedule the background job from within your code (e.g. from a model's `save()` method or a view) by calling `MyTestJob.enqueue()`. This method passes through all arguments to `Job.enqueue()`. However, no `name` argument must be passed, as the background job name will be used instead. + +### Attributes + +`JobRunner` attributes are defined under a class named `Meta` within the job. These are optional, but encouraged. + +#### `name` + +This is the human-friendly names of your background job. If omitted, the class name will be used. + +### Scheduled Jobs + +As described above, jobs can be scheduled for immediate execution or at any later time using the `enqueue()` method. However, for management purposes, the `enqueue_once()` method allows a job to be scheduled exactly once avoiding duplicates. If a job is already scheduled for a particular instance, a second one won't be scheduled, respecting thread safety. An example use case would be to schedule a periodic task that is bound to an instance in general, but not to any event of that instance (such as updates). The parameters of the `enqueue_once()` method are identical to those of `enqueue()`. + +!!! tip + It is not forbidden to `enqueue()` additional jobs while an interval schedule is active. An example use of this would be to schedule a periodic daily synchronization, but also trigger additional synchronizations on demand when the user presses a button. + +#### Example + +```python title="jobs.py" +from utilities.jobs import JobRunner + + +class MyHousekeepingJob(JobRunner): + class Meta: + name = "Housekeeping" + + def run(self, *args, **kwargs): + # your logic goes here +``` + +```python title="__init__.py" +from netbox.plugins import PluginConfig + +class MyPluginConfig(PluginConfig): + def ready(self): + from .jobs import MyHousekeepingJob + MyHousekeepingJob.setup(interval=60) +``` + +## Task queues + +Three task queues of differing priority are defined by default: + +* High +* Default +* Low + +Any tasks in the "high" queue are completed before the default queue is checked, and any tasks in the default queue are completed before those in the "low" queue. + +Plugins can also add custom queues for their own needs by setting the `queues` attribute under the PluginConfig class. An example is included below: + +```python +class MyPluginConfig(PluginConfig): + name = 'myplugin' + ... + queues = [ + 'foo', + 'bar', + ] +``` + +The `PluginConfig` above creates two custom queues with the following names `my_plugin.foo` and `my_plugin.bar`. (The plugin's name is prepended to each queue to avoid conflicts between plugins.) + +!!! warning "Configuring the RQ worker process" + By default, NetBox's RQ worker process only services the high, default, and low queues. Plugins which introduce custom queues should advise users to either reconfigure the default worker, or run a dedicated worker specifying the necessary queues. For example: + + ``` + python manage.py rqworker my_plugin.foo my_plugin.bar + ``` diff --git a/docs/plugins/development/background-tasks.md b/docs/plugins/development/background-tasks.md deleted file mode 100644 index 5ed05752a..000000000 --- a/docs/plugins/development/background-tasks.md +++ /dev/null @@ -1,30 +0,0 @@ -# Background Tasks - -NetBox supports the queuing of tasks that need to be performed in the background, decoupled from the request-response cycle, using the [Python RQ](https://python-rq.org/) library. Three task queues of differing priority are defined by default: - -* High -* Default -* Low - -Any tasks in the "high" queue are completed before the default queue is checked, and any tasks in the default queue are completed before those in the "low" queue. - -Plugins can also add custom queues for their own needs by setting the `queues` attribute under the PluginConfig class. An example is included below: - -```python -class MyPluginConfig(PluginConfig): - name = 'myplugin' - ... - queues = [ - 'foo', - 'bar', - ] -``` - -The PluginConfig above creates two custom queues with the following names `my_plugin.foo` and `my_plugin.bar`. (The plugin's name is prepended to each queue to avoid conflicts between plugins.) - -!!! warning "Configuring the RQ worker process" - By default, NetBox's RQ worker process only services the high, default, and low queues. Plugins which introduce custom queues should advise users to either reconfigure the default worker, or run a dedicated worker specifying the necessary queues. For example: - - ``` - python manage.py rqworker my_plugin.foo my_plugin.bar - ``` diff --git a/docs/plugins/development/index.md b/docs/plugins/development/index.md index c042be6ec..f3f9a3e4f 100644 --- a/docs/plugins/development/index.md +++ b/docs/plugins/development/index.md @@ -47,6 +47,7 @@ project-name/ - __init__.py - filtersets.py - graphql.py + - jobs.py - models.py - middleware.py - navigation.py diff --git a/docs/plugins/development/models.md b/docs/plugins/development/models.md index 902ee9c82..03cedda16 100644 --- a/docs/plugins/development/models.md +++ b/docs/plugins/development/models.md @@ -130,6 +130,8 @@ For more information about database migrations, see the [Django documentation](h ::: netbox.models.features.ExportTemplatesMixin +::: netbox.models.features.JobsMixin + ::: netbox.models.features.JournalingMixin ::: netbox.models.features.TagsMixin diff --git a/mkdocs.yml b/mkdocs.yml index 841a9df47..ea93ee584 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -146,7 +146,7 @@ nav: - Data Backends: 'plugins/development/data-backends.md' - REST API: 'plugins/development/rest-api.md' - GraphQL API: 'plugins/development/graphql-api.md' - - Background Tasks: 'plugins/development/background-tasks.md' + - Background Jobs: 'plugins/development/background-jobs.md' - Dashboard Widgets: 'plugins/development/dashboard-widgets.md' - Staged Changes: 'plugins/development/staged-changes.md' - Exceptions: 'plugins/development/exceptions.md' diff --git a/netbox/circuits/forms/model_forms.py b/netbox/circuits/forms/model_forms.py index 554f2af5a..9a54fdccb 100644 --- a/netbox/circuits/forms/model_forms.py +++ b/netbox/circuits/forms/model_forms.py @@ -198,6 +198,7 @@ class CircuitGroupAssignmentForm(NetBoxModelForm): circuit = DynamicModelChoiceField( label=_('Circuit'), queryset=Circuit.objects.all(), + selector=True ) class Meta: diff --git a/netbox/circuits/migrations/0044_circuitgroup_circuitgroupassignment_and_more.py b/netbox/circuits/migrations/0044_circuit_groups.py similarity index 98% rename from netbox/circuits/migrations/0044_circuitgroup_circuitgroupassignment_and_more.py rename to netbox/circuits/migrations/0044_circuit_groups.py index 40ea5bd1e..f3984877f 100644 --- a/netbox/circuits/migrations/0044_circuitgroup_circuitgroupassignment_and_more.py +++ b/netbox/circuits/migrations/0044_circuit_groups.py @@ -78,7 +78,7 @@ class Migration(migrations.Migration): options={ 'verbose_name': 'Circuit group assignment', 'verbose_name_plural': 'Circuit group assignments', - 'ordering': ('circuit', 'priority', 'pk'), + 'ordering': ('group', 'circuit', 'priority', 'pk'), }, ), migrations.AddConstraint( diff --git a/netbox/circuits/models/circuits.py b/netbox/circuits/models/circuits.py index 7c5e5f2b5..68c938aa9 100644 --- a/netbox/circuits/models/circuits.py +++ b/netbox/circuits/models/circuits.py @@ -203,7 +203,7 @@ class CircuitGroupAssignment(CustomFieldsMixin, ExportTemplatesMixin, TagsMixin, ) class Meta: - ordering = ('circuit', 'priority', 'pk') + ordering = ('group', 'circuit', 'priority', 'pk') constraints = ( models.UniqueConstraint( fields=('circuit', 'group'), diff --git a/netbox/circuits/tables/circuits.py b/netbox/circuits/tables/circuits.py index 3145df43e..1c9791283 100644 --- a/netbox/circuits/tables/circuits.py +++ b/netbox/circuits/tables/circuits.py @@ -77,18 +77,22 @@ class CircuitTable(TenancyColumnsMixin, ContactsColumnMixin, NetBoxTable): verbose_name=_('Commit Rate') ) comments = columns.MarkdownColumn( - verbose_name=_('Comments'), + verbose_name=_('Comments') ) tags = columns.TagColumn( url_name='circuits:circuit_list' ) + assignments = columns.ManyToManyColumn( + verbose_name=_('Assignments'), + linkify_item=True + ) class Meta(NetBoxTable.Meta): model = Circuit fields = ( 'pk', 'id', 'cid', 'provider', 'provider_account', 'type', 'status', 'tenant', 'tenant_group', 'termination_a', 'termination_z', 'install_date', 'termination_date', 'commit_rate', 'description', - 'comments', 'contacts', 'tags', 'created', 'last_updated', + 'comments', 'contacts', 'tags', 'created', 'last_updated', 'assignments', ) default_columns = ( 'pk', 'cid', 'provider', 'type', 'status', 'tenant', 'termination_a', 'termination_z', 'description', diff --git a/netbox/core/api/views.py b/netbox/core/api/views.py index ff488e3cd..b3a024c02 100644 --- a/netbox/core/api/views.py +++ b/netbox/core/api/views.py @@ -7,6 +7,8 @@ from rest_framework.routers import APIRootView from rest_framework.viewsets import ReadOnlyModelViewSet from core import filtersets +from core.choices import DataSourceStatusChoices +from core.jobs import SyncDataSourceJob from core.models import * from netbox.api.metadata import ContentTypeMetadata from netbox.api.viewsets import NetBoxModelViewSet, NetBoxReadOnlyModelViewSet @@ -36,7 +38,11 @@ class DataSourceViewSet(NetBoxModelViewSet): if not request.user.has_perm('core.sync_datasource', obj=datasource): raise PermissionDenied(_("This user does not have permission to synchronize this data source.")) - datasource.enqueue_sync_job(request) + # Enqueue the sync job & update the DataSource's status + SyncDataSourceJob.enqueue(instance=datasource, user=request.user) + datasource.status = DataSourceStatusChoices.QUEUED + DataSource.objects.filter(pk=datasource.pk).update(status=datasource.status) + serializer = serializers.DataSourceSerializer(datasource, context={'request': request}) return Response(serializer.data) diff --git a/netbox/core/choices.py b/netbox/core/choices.py index ee0febaff..01a072ce1 100644 --- a/netbox/core/choices.py +++ b/netbox/core/choices.py @@ -59,6 +59,12 @@ class JobStatusChoices(ChoiceSet): (STATUS_FAILED, _('Failed'), 'red'), ) + ENQUEUED_STATE_CHOICES = ( + STATUS_PENDING, + STATUS_SCHEDULED, + STATUS_RUNNING, + ) + TERMINAL_STATE_CHOICES = ( STATUS_COMPLETED, STATUS_ERRORED, diff --git a/netbox/core/jobs.py b/netbox/core/jobs.py index 264313e62..1c38cf61f 100644 --- a/netbox/core/jobs.py +++ b/netbox/core/jobs.py @@ -1,33 +1,33 @@ import logging from netbox.search.backends import search_backend -from .choices import * +from utilities.jobs import JobRunner +from .choices import DataSourceStatusChoices from .exceptions import SyncError from .models import DataSource -from rq.timeouts import JobTimeoutException logger = logging.getLogger(__name__) -def sync_datasource(job, *args, **kwargs): +class SyncDataSourceJob(JobRunner): """ Call sync() on a DataSource. """ - datasource = DataSource.objects.get(pk=job.object_id) - try: - job.start() - datasource.sync() + class Meta: + name = 'Synchronization' - # Update the search cache for DataFiles belonging to this source - search_backend.cache(datasource.datafiles.iterator()) + def run(self, *args, **kwargs): + datasource = DataSource.objects.get(pk=self.job.object_id) - job.terminate() + try: + datasource.sync() - except Exception as e: - job.terminate(status=JobStatusChoices.STATUS_ERRORED, error=repr(e)) - DataSource.objects.filter(pk=datasource.pk).update(status=DataSourceStatusChoices.FAILED) - if type(e) in (SyncError, JobTimeoutException): - logging.error(e) - else: + # Update the search cache for DataFiles belonging to this source + search_backend.cache(datasource.datafiles.iterator()) + + except Exception as e: + DataSource.objects.filter(pk=datasource.pk).update(status=DataSourceStatusChoices.FAILED) + if type(e) is SyncError: + logging.error(e) raise e diff --git a/netbox/core/migrations/0012_job_object_type_optional.py b/netbox/core/migrations/0012_job_object_type_optional.py new file mode 100644 index 000000000..3c6664afc --- /dev/null +++ b/netbox/core/migrations/0012_job_object_type_optional.py @@ -0,0 +1,24 @@ +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('contenttypes', '0002_remove_content_type_name'), + ('core', '0011_move_objectchange'), + ] + + operations = [ + migrations.AlterField( + model_name='job', + name='object_type', + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name='jobs', + to='contenttypes.contenttype' + ), + ), + ] diff --git a/netbox/core/models/data.py b/netbox/core/models/data.py index 48fa2ff71..a8e90ec3f 100644 --- a/netbox/core/models/data.py +++ b/netbox/core/models/data.py @@ -1,10 +1,10 @@ import hashlib import logging import os -import yaml from fnmatch import fnmatchcase from urllib.parse import urlparse +import yaml from django.conf import settings from django.contrib.contenttypes.fields import GenericForeignKey from django.core.exceptions import ValidationError @@ -12,7 +12,6 @@ from django.core.validators import RegexValidator from django.db import models from django.urls import reverse from django.utils import timezone -from django.utils.module_loading import import_string from django.utils.translation import gettext as _ from netbox.constants import CENSOR_TOKEN, CENSOR_TOKEN_CHANGED @@ -23,7 +22,6 @@ from utilities.querysets import RestrictedQuerySet from ..choices import * from ..exceptions import SyncError from ..signals import post_sync, pre_sync -from .jobs import Job __all__ = ( 'AutoSyncRecord', @@ -153,21 +151,6 @@ class DataSource(JobsMixin, PrimaryModel): return objectchange - def enqueue_sync_job(self, request): - """ - Enqueue a background job to synchronize the DataSource by calling sync(). - """ - # Set the status to "syncing" - self.status = DataSourceStatusChoices.QUEUED - DataSource.objects.filter(pk=self.pk).update(status=self.status) - - # Enqueue a sync job - return Job.enqueue( - import_string('core.jobs.sync_datasource'), - instance=self, - user=request.user - ) - def get_backend(self): backend_params = self.parameters or {} return self.backend_class(self.source_url, **backend_params) diff --git a/netbox/core/models/jobs.py b/netbox/core/models/jobs.py index c5fbb918c..1d0e7fdeb 100644 --- a/netbox/core/models/jobs.py +++ b/netbox/core/models/jobs.py @@ -31,6 +31,8 @@ class Job(models.Model): to='contenttypes.ContentType', related_name='jobs', on_delete=models.CASCADE, + blank=True, + null=True ) object_id = models.PositiveBigIntegerField( blank=True, @@ -197,25 +199,34 @@ class Job(models.Model): job_end.send(self) @classmethod - def enqueue(cls, func, instance, name='', user=None, schedule_at=None, interval=None, **kwargs): + def enqueue(cls, func, instance=None, name='', user=None, schedule_at=None, interval=None, immediate=False, **kwargs): """ Create a Job instance and enqueue a job using the given callable Args: func: The callable object to be enqueued for execution - instance: The NetBox object to which this job pertains + instance: The NetBox object to which this job pertains (optional) name: Name for the job (optional) user: The user responsible for running the job schedule_at: Schedule the job to be executed at the passed date and time interval: Recurrence interval (in minutes) + immediate: Run the job immediately without scheduling it in the background. Should be used for interactive + management commands only. """ - object_type = ObjectType.objects.get_for_model(instance, for_concrete_model=False) - rq_queue_name = get_queue_for_model(object_type.model) + if schedule_at and immediate: + raise ValueError("enqueue() cannot be called with values for both schedule_at and immediate.") + + if instance: + object_type = ObjectType.objects.get_for_model(instance, for_concrete_model=False) + object_id = instance.pk + else: + object_type = object_id = None + rq_queue_name = get_queue_for_model(object_type.model if object_type else None) queue = django_rq.get_queue(rq_queue_name) status = JobStatusChoices.STATUS_SCHEDULED if schedule_at else JobStatusChoices.STATUS_PENDING job = Job.objects.create( object_type=object_type, - object_id=instance.pk, + object_id=object_id, name=name, status=status, scheduled=schedule_at, @@ -224,8 +235,16 @@ class Job(models.Model): job_id=uuid.uuid4() ) - if schedule_at: + # Run the job immediately, rather than enqueuing it as a background task. Note that this is a synchronous + # (blocking) operation, and execution will pause until the job completes. + if immediate: + func(job_id=str(job.job_id), job=job, **kwargs) + + # Schedule the job to run at a specific date & time. + elif schedule_at: queue.enqueue_at(schedule_at, func, job_id=str(job.job_id), job=job, **kwargs) + + # Schedule the job to run asynchronously at this first available opportunity. else: queue.enqueue(func, job_id=str(job.job_id), job=job, **kwargs) diff --git a/netbox/core/plugins.py b/netbox/core/plugins.py index ab8fd97a1..374cfbe9a 100644 --- a/netbox/core/plugins.py +++ b/netbox/core/plugins.py @@ -155,7 +155,6 @@ def get_catalog_plugins(): # Populate author (if any) if data['author']: - print(data['author']) author = PluginAuthor( name=data['author']['name'], org_id=data['author']['org_id'], diff --git a/netbox/core/tables/plugins.py b/netbox/core/tables/plugins.py index 529fe60f4..f89a886d2 100644 --- a/netbox/core/tables/plugins.py +++ b/netbox/core/tables/plugins.py @@ -44,7 +44,7 @@ class CatalogPluginTable(BaseTable): verbose_name=_('Name') ) author = tables.Column( - accessor=tables.A('author.name'), + accessor=tables.A('author__name'), verbose_name=_('Author') ) is_local = columns.BooleanColumn( diff --git a/netbox/core/views.py b/netbox/core/views.py index 79ef33e1d..06c8b00d0 100644 --- a/netbox/core/views.py +++ b/netbox/core/views.py @@ -34,6 +34,8 @@ from utilities.htmx import htmx_partial from utilities.query import count_related from utilities.views import ContentTypePermissionRequiredMixin, GetRelatedModelsMixin, register_model_view from . import filtersets, forms, tables +from .choices import DataSourceStatusChoices +from .jobs import SyncDataSourceJob from .models import * from .plugins import get_plugins from .tables import CatalogPluginTable, PluginVersionTable @@ -76,7 +78,11 @@ class DataSourceSyncView(BaseObjectView): def post(self, request, pk): datasource = get_object_or_404(self.queryset, pk=pk) - job = datasource.enqueue_sync_job(request) + + # Enqueue the sync job & update the DataSource's status + job = SyncDataSourceJob.enqueue(instance=datasource, user=request.user) + datasource.status = DataSourceStatusChoices.QUEUED + DataSource.objects.filter(pk=datasource.pk).update(status=datasource.status) messages.success(request, f"Queued job #{job.pk} to sync {datasource}") return redirect(datasource.get_absolute_url()) diff --git a/netbox/extras/api/views.py b/netbox/extras/api/views.py index 2369e8f10..142be1b8a 100644 --- a/netbox/extras/api/views.py +++ b/netbox/extras/api/views.py @@ -1,5 +1,6 @@ from django.http import Http404 from django.shortcuts import get_object_or_404 +from django.utils.module_loading import import_string from django_rq.queues import get_connection from rest_framework import status from rest_framework.decorators import action @@ -11,10 +12,10 @@ from rest_framework.routers import APIRootView from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet from rq import Worker -from core.models import Job, ObjectType +from core.models import ObjectType from extras import filtersets from extras.models import * -from extras.scripts import run_script +from extras.jobs import ScriptJob from netbox.api.authentication import IsAuthenticatedOrLoginNotRequired from netbox.api.features import SyncedDataMixin from netbox.api.metadata import ContentTypeMetadata @@ -273,10 +274,8 @@ class ScriptViewSet(ModelViewSet): raise RQWorkerNotRunningException() if input_serializer.is_valid(): - Job.enqueue( - run_script, + ScriptJob.enqueue( instance=script, - name=script.python_class.class_name, user=request.user, data=input_serializer.data['data'], request=copy_safe_request(request), diff --git a/netbox/extras/choices.py b/netbox/extras/choices.py index 387716c85..4525d8689 100644 --- a/netbox/extras/choices.py +++ b/netbox/extras/choices.py @@ -156,16 +156,16 @@ class LogLevelChoices(ChoiceSet): LOG_DEBUG = 'debug' LOG_DEFAULT = 'default' - LOG_SUCCESS = 'success' LOG_INFO = 'info' + LOG_SUCCESS = 'success' LOG_WARNING = 'warning' LOG_FAILURE = 'failure' CHOICES = ( (LOG_DEBUG, _('Debug'), 'teal'), (LOG_DEFAULT, _('Default'), 'gray'), - (LOG_SUCCESS, _('Success'), 'green'), (LOG_INFO, _('Info'), 'cyan'), + (LOG_SUCCESS, _('Success'), 'green'), (LOG_WARNING, _('Warning'), 'yellow'), (LOG_FAILURE, _('Failure'), 'red'), ) @@ -173,8 +173,8 @@ class LogLevelChoices(ChoiceSet): SYSTEM_LEVELS = { LOG_DEBUG: logging.DEBUG, LOG_DEFAULT: logging.INFO, - LOG_SUCCESS: logging.INFO, LOG_INFO: logging.INFO, + LOG_SUCCESS: logging.INFO, LOG_WARNING: logging.WARNING, LOG_FAILURE: logging.ERROR, } @@ -191,35 +191,6 @@ class DurationChoices(ChoiceSet): ) -# -# Job results -# - -class JobResultStatusChoices(ChoiceSet): - - STATUS_PENDING = 'pending' - STATUS_SCHEDULED = 'scheduled' - STATUS_RUNNING = 'running' - STATUS_COMPLETED = 'completed' - STATUS_ERRORED = 'errored' - STATUS_FAILED = 'failed' - - CHOICES = ( - (STATUS_PENDING, _('Pending'), 'cyan'), - (STATUS_SCHEDULED, _('Scheduled'), 'gray'), - (STATUS_RUNNING, _('Running'), 'blue'), - (STATUS_COMPLETED, _('Completed'), 'green'), - (STATUS_ERRORED, _('Errored'), 'red'), - (STATUS_FAILED, _('Failed'), 'red'), - ) - - TERMINAL_STATE_CHOICES = ( - STATUS_COMPLETED, - STATUS_ERRORED, - STATUS_FAILED, - ) - - # # Webhooks # diff --git a/netbox/extras/constants.py b/netbox/extras/constants.py index e8e2c6d8a..3bfe3b21b 100644 --- a/netbox/extras/constants.py +++ b/netbox/extras/constants.py @@ -136,10 +136,10 @@ DEFAULT_DASHBOARD = [ ] LOG_LEVEL_RANK = { - LogLevelChoices.LOG_DEFAULT: 0, - LogLevelChoices.LOG_DEBUG: 1, - LogLevelChoices.LOG_SUCCESS: 2, - LogLevelChoices.LOG_INFO: 3, + LogLevelChoices.LOG_DEBUG: 0, + LogLevelChoices.LOG_DEFAULT: 1, + LogLevelChoices.LOG_INFO: 2, + LogLevelChoices.LOG_SUCCESS: 3, LogLevelChoices.LOG_WARNING: 4, LogLevelChoices.LOG_FAILURE: 5, } diff --git a/netbox/extras/events.py b/netbox/extras/events.py index de6a88c1b..e80f4a558 100644 --- a/netbox/extras/events.py +++ b/netbox/extras/events.py @@ -1,5 +1,6 @@ from collections import defaultdict import logging +from collections import defaultdict from django.conf import settings from django.contrib.auth import get_user_model @@ -10,7 +11,6 @@ from django.utils.translation import gettext as _ from django_rq import get_queue from core.events import * -from core.models import Job from netbox.config import get_config from netbox.constants import RQ_QUEUE_DEFAULT from netbox.registry import registry @@ -126,8 +126,8 @@ def process_event_rules(event_rules, object_type, event_type, data, username=Non script = event_rule.action_object.python_class() # Enqueue a Job to record the script's execution - Job.enqueue( - "extras.scripts.run_script", + from extras.jobs import ScriptJob + ScriptJob.enqueue( instance=event_rule.action_object, name=script.name, user=user, diff --git a/netbox/extras/jobs.py b/netbox/extras/jobs.py new file mode 100644 index 000000000..62f8f6959 --- /dev/null +++ b/netbox/extras/jobs.py @@ -0,0 +1,107 @@ +import logging +import traceback +from contextlib import nullcontext + +from django.db import transaction +from django.utils.translation import gettext as _ + +from extras.models import Script as ScriptModel +from extras.signals import clear_events +from netbox.context_managers import event_tracking +from utilities.exceptions import AbortScript, AbortTransaction +from utilities.jobs import JobRunner +from .utils import is_report + + +class ScriptJob(JobRunner): + """ + Script execution job. + + A wrapper for calling Script.run(). This performs error handling and provides a hook for committing changes. It + exists outside the Script class to ensure it cannot be overridden by a script author. + """ + + class Meta: + # An explicit job name is not set because it doesn't make sense in this context. Currently, there's no scenario + # where jobs other than this one are used. Therefore, it is hidden, resulting in a cleaner job table overview. + name = '' + + def run_script(self, script, request, data, commit): + """ + Core script execution task. We capture this within a method to allow for conditionally wrapping it with the + event_tracking context manager (which is bypassed if commit == False). + + Args: + request: The WSGI request associated with this execution (if any) + data: A dictionary of data to be passed to the script upon execution + commit: Passed through to Script.run() + """ + logger = logging.getLogger(f"netbox.scripts.{script.full_name}") + logger.info(f"Running script (commit={commit})") + + try: + try: + with transaction.atomic(): + script.output = script.run(data, commit) + if not commit: + raise AbortTransaction() + except AbortTransaction: + script.log_info(message=_("Database changes have been reverted automatically.")) + if script.failed: + logger.warning(f"Script failed") + raise + + except Exception as e: + if type(e) is AbortScript: + msg = _("Script aborted with error: ") + str(e) + if is_report(type(script)): + script.log_failure(message=msg) + else: + script.log_failure(msg) + logger.error(f"Script aborted with error: {e}") + + else: + stacktrace = traceback.format_exc() + script.log_failure( + message=_("An exception occurred: ") + f"`{type(e).__name__}: {e}`\n```\n{stacktrace}\n```" + ) + logger.error(f"Exception raised during script execution: {e}") + + if type(e) is not AbortTransaction: + script.log_info(message=_("Database changes have been reverted due to error.")) + + # Clear all pending events. Job termination (including setting the status) is handled by the job framework. + if request: + clear_events.send(request) + raise + + # Update the job data regardless of the execution status of the job. Successes should be reported as well as + # failures. + finally: + self.job.data = script.get_job_data() + + def run(self, data, request=None, commit=True, **kwargs): + """ + Run the script. + + Args: + job: The Job associated with this execution + data: A dictionary of data to be passed to the script upon execution + request: The WSGI request associated with this execution (if any) + commit: Passed through to Script.run() + """ + script = ScriptModel.objects.get(pk=self.job.object_id).python_class() + + # Add files to form data + if request: + files = request.FILES + for field_name, fileobj in files.items(): + data[field_name] = fileobj + + # Add the current request as a property of the script + script.request = request + + # Execute the script. If commit is True, wrap it with the event_tracking context manager to ensure we process + # change logging, event rules, etc. + with event_tracking(request) if commit else nullcontext(): + self.run_script(script, request, data, commit) diff --git a/netbox/extras/management/commands/runscript.py b/netbox/extras/management/commands/runscript.py index dbfbb40d9..b6d6810ac 100644 --- a/netbox/extras/management/commands/runscript.py +++ b/netbox/extras/management/commands/runscript.py @@ -1,19 +1,14 @@ import json import logging import sys -import traceback import uuid from django.contrib.auth import get_user_model from django.core.management.base import BaseCommand, CommandError -from django.db import transaction +from django.utils.module_loading import import_string -from core.choices import JobStatusChoices -from core.models import Job +from extras.jobs import ScriptJob from extras.scripts import get_module_and_script -from extras.signals import clear_events -from netbox.context_managers import event_tracking -from utilities.exceptions import AbortTransaction from utilities.request import NetBoxFakeRequest @@ -33,44 +28,6 @@ class Command(BaseCommand): parser.add_argument('script', help="Script to run") def handle(self, *args, **options): - - def _run_script(): - """ - Core script execution task. We capture this within a subfunction to allow for conditionally wrapping it with - the event_tracking context manager (which is bypassed if commit == False). - """ - try: - try: - with transaction.atomic(): - script.output = script.run(data=data, commit=commit) - if not commit: - raise AbortTransaction() - except AbortTransaction: - script.log_info("Database changes have been reverted automatically.") - clear_events.send(request) - job.data = script.get_job_data() - job.terminate() - except Exception as e: - stacktrace = traceback.format_exc() - script.log_failure( - f"An exception occurred: `{type(e).__name__}: {e}`\n```\n{stacktrace}\n```" - ) - script.log_info("Database changes have been reverted due to error.") - logger.error(f"Exception raised during script execution: {e}") - clear_events.send(request) - job.data = script.get_job_data() - job.terminate(status=JobStatusChoices.STATUS_ERRORED, error=repr(e)) - - # Print any test method results - for test_name, attrs in job.data['tests'].items(): - self.stdout.write( - "\t{}: {} success, {} info, {} warning, {} failure".format( - test_name, attrs['success'], attrs['info'], attrs['warning'], attrs['failure'] - ) - ) - - logger.info(f"Script completed in {job.duration}") - User = get_user_model() # Params @@ -84,8 +41,8 @@ class Command(BaseCommand): data = {} module_name, script_name = script.split('.', 1) - module, script = get_module_and_script(module_name, script_name) - script = script.python_class + module, script_obj = get_module_and_script(module_name, script_name) + script = script_obj.python_class # Take user from command line if provided and exists, other if options['user']: @@ -120,40 +77,29 @@ class Command(BaseCommand): # Initialize the script form script = script() form = script.as_form(data, None) - - # Create the job - job = Job.objects.create( - object=module, - name=script.class_name, - user=User.objects.filter(is_superuser=True).order_by('pk')[0], - job_id=uuid.uuid4() - ) - - request = NetBoxFakeRequest({ - 'META': {}, - 'POST': data, - 'GET': {}, - 'FILES': {}, - 'user': user, - 'path': '', - 'id': job.job_id - }) - - if form.is_valid(): - job.status = JobStatusChoices.STATUS_RUNNING - job.save() - - logger.info(f"Running script (commit={commit})") - script.request = request - - # Execute the script. If commit is True, wrap it with the event_tracking context manager to ensure we process - # change logging, webhooks, etc. - with event_tracking(request): - _run_script() - else: + if not form.is_valid(): logger.error('Data is not valid:') for field, errors in form.errors.get_json_data().items(): for error in errors: logger.error(f'\t{field}: {error.get("message")}') - job.status = JobStatusChoices.STATUS_ERRORED - job.save() + raise CommandError() + + # Execute the script. + job = ScriptJob.enqueue( + instance=script_obj, + user=user, + immediate=True, + data=data, + request=NetBoxFakeRequest({ + 'META': {}, + 'POST': data, + 'GET': {}, + 'FILES': {}, + 'user': user, + 'path': '', + 'id': uuid.uuid4() + }), + commit=commit, + ) + + logger.info(f"Script completed in {job.duration}") diff --git a/netbox/extras/scripts.py b/netbox/extras/scripts.py index f6cc2bad0..96d08d8af 100644 --- a/netbox/extras/scripts.py +++ b/netbox/extras/scripts.py @@ -2,32 +2,23 @@ import inspect import json import logging import os -import traceback -from datetime import timedelta import yaml from django import forms from django.conf import settings from django.core.validators import RegexValidator -from django.db import transaction from django.utils import timezone from django.utils.functional import classproperty from django.utils.translation import gettext as _ -from core.choices import JobStatusChoices -from core.models import Job from extras.choices import LogLevelChoices -from extras.models import ScriptModule, Script as ScriptModel -from extras.signals import clear_events +from extras.models import ScriptModule from ipam.formfields import IPAddressFormField, IPNetworkFormField from ipam.validators import MaxPrefixLengthValidator, MinPrefixLengthValidator, prefix_validator -from netbox.context_managers import event_tracking -from utilities.exceptions import AbortScript, AbortTransaction from utilities.forms import add_blank_choice from utilities.forms.fields import DynamicModelChoiceField, DynamicModelMultipleChoiceField from utilities.forms.widgets import DatePicker, DateTimePicker from .forms import ScriptForm -from .utils import is_report __all__ = ( @@ -48,7 +39,6 @@ __all__ = ( 'StringVar', 'TextVar', 'get_module_and_script', - 'run_script', ) @@ -613,111 +603,3 @@ def get_module_and_script(module_name, script_name): module = ScriptModule.objects.get(file_path=f'{module_name}.py') script = module.scripts.get(name=script_name) return module, script - - -def run_script(data, job, request=None, commit=True, **kwargs): - """ - A wrapper for calling Script.run(). This performs error handling and provides a hook for committing changes. It - exists outside the Script class to ensure it cannot be overridden by a script author. - - Args: - data: A dictionary of data to be passed to the script upon execution - job: The Job associated with this execution - request: The WSGI request associated with this execution (if any) - commit: Passed through to Script.run() - """ - job.start() - - script = ScriptModel.objects.get(pk=job.object_id).python_class() - - logger = logging.getLogger(f"netbox.scripts.{script.full_name}") - logger.info(f"Running script (commit={commit})") - - # Add files to form data - if request: - files = request.FILES - for field_name, fileobj in files.items(): - data[field_name] = fileobj - - # Add the current request as a property of the script - script.request = request - - def set_job_data(script): - job.data = { - 'log': script.messages, - 'output': script.output, - 'tests': script.tests, - } - - return job - - def _run_script(job): - """ - Core script execution task. We capture this within a subfunction to allow for conditionally wrapping it with - the event_tracking context manager (which is bypassed if commit == False). - """ - try: - try: - with transaction.atomic(): - script.output = script.run(data, commit) - if not commit: - raise AbortTransaction() - except AbortTransaction: - script.log_info(message=_("Database changes have been reverted automatically.")) - if request: - clear_events.send(request) - - job.data = script.get_job_data() - if script.failed: - logger.warning(f"Script failed") - job.terminate(status=JobStatusChoices.STATUS_FAILED) - else: - job.terminate() - - except Exception as e: - if type(e) is AbortScript: - msg = _("Script aborted with error: ") + str(e) - if is_report(type(script)): - script.log_failure(message=msg) - else: - script.log_failure(msg) - - logger.error(f"Script aborted with error: {e}") - else: - stacktrace = traceback.format_exc() - script.log_failure( - message=_("An exception occurred: ") + f"`{type(e).__name__}: {e}`\n```\n{stacktrace}\n```" - ) - logger.error(f"Exception raised during script execution: {e}") - script.log_info(message=_("Database changes have been reverted due to error.")) - - job.data = script.get_job_data() - job.terminate(status=JobStatusChoices.STATUS_ERRORED, error=repr(e)) - if request: - clear_events.send(request) - - logger.info(f"Script completed in {job.duration}") - - # Execute the script. If commit is True, wrap it with the event_tracking context manager to ensure we process - # change logging, event rules, etc. - if commit: - with event_tracking(request): - _run_script(job) - else: - _run_script(job) - - # Schedule the next job if an interval has been set - if job.interval: - new_scheduled_time = job.scheduled + timedelta(minutes=job.interval) - Job.enqueue( - run_script, - instance=job.object, - name=job.name, - user=job.user, - schedule_at=new_scheduled_time, - interval=job.interval, - job_timeout=script.job_timeout, - data=data, - request=request, - commit=commit - ) diff --git a/netbox/extras/views.py b/netbox/extras/views.py index 2ade79c96..54f00265a 100644 --- a/netbox/extras/views.py +++ b/netbox/extras/views.py @@ -6,6 +6,7 @@ from django.db.models import Count, Q from django.http import HttpResponseBadRequest, HttpResponseForbidden, HttpResponse from django.shortcuts import get_object_or_404, redirect, render from django.urls import reverse +from django.utils.module_loading import import_string from django.utils import timezone from django.utils.translation import gettext as _ from django.views.generic import View @@ -35,7 +36,6 @@ from virtualization.models import VirtualMachine from . import filtersets, forms, tables from .constants import LOG_LEVEL_RANK from .models import * -from .scripts import run_script from .tables import ReportResultsTable, ScriptResultsTable @@ -1175,10 +1175,9 @@ class ScriptView(BaseScriptView): if not get_workers_for_queue('default'): messages.error(request, _("Unable to run script: RQ worker process not running.")) elif form.is_valid(): - job = Job.enqueue( - run_script, + ScriptJob = import_string("extras.jobs.ScriptJob") + job = ScriptJob.enqueue( instance=script, - name=script_class.class_name, user=request.user, schedule_at=form.cleaned_data.pop('_schedule_at'), interval=form.cleaned_data.pop('_interval'), @@ -1246,7 +1245,10 @@ class ScriptResultView(TableMixin, generic.ObjectView): table = None index = 0 - log_threshold = LOG_LEVEL_RANK.get(request.GET.get('log_threshold', LogLevelChoices.LOG_DEFAULT)) + try: + log_threshold = LOG_LEVEL_RANK[request.GET.get('log_threshold', LogLevelChoices.LOG_DEBUG)] + except KeyError: + log_threshold = LOG_LEVEL_RANK[LogLevelChoices.LOG_DEBUG] if job.data: if 'log' in job.data: @@ -1303,12 +1305,16 @@ class ScriptResultView(TableMixin, generic.ObjectView): if job.completed: table = self.get_table(job, request, bulk_actions=False) + log_threshold = request.GET.get('log_threshold', LogLevelChoices.LOG_DEBUG) + if log_threshold not in LOG_LEVEL_RANK: + log_threshold = LogLevelChoices.LOG_DEBUG + context = { 'script': job.object, 'job': job, 'table': table, 'log_levels': dict(LogLevelChoices), - 'log_threshold': request.GET.get('log_threshold', LogLevelChoices.LOG_DEFAULT) + 'log_threshold': log_threshold, } if job.data and 'log' in job.data: diff --git a/netbox/netbox/constants.py b/netbox/netbox/constants.py index cdeda583d..b8c679ec0 100644 --- a/netbox/netbox/constants.py +++ b/netbox/netbox/constants.py @@ -23,6 +23,9 @@ ADVISORY_LOCK_KEYS = { 'wirelesslangroup': 105600, 'inventoryitem': 105700, 'inventoryitemtemplate': 105800, + + # Jobs + 'job-schedules': 110100, } # Default view action permission mapping diff --git a/netbox/netbox/models/features.py b/netbox/netbox/models/features.py index b270382d3..15957b547 100644 --- a/netbox/netbox/models/features.py +++ b/netbox/netbox/models/features.py @@ -289,7 +289,7 @@ class CustomFieldsMixin(models.Model): # Validate uniqueness if enforced if custom_fields[field_name].validation_unique and value not in CUSTOMFIELD_EMPTY_VALUES: - if self._meta.model.objects.filter(**{ + if self._meta.model.objects.exclude(pk=self.pk).filter(**{ f'custom_field_data__{field_name}': value }).exists(): raise ValidationError(_("Custom field '{name}' must have a unique value.").format( diff --git a/netbox/templates/circuits/circuitgroupassignment.html b/netbox/templates/circuits/circuitgroupassignment.html index 870e46be8..876bfa76d 100644 --- a/netbox/templates/circuits/circuitgroupassignment.html +++ b/netbox/templates/circuits/circuitgroupassignment.html @@ -20,23 +20,23 @@
{% trans "Group" %} | -{{ object.group }} | +{{ object.group|linkify }} |
---|---|---|
{% trans "Circuit" %} | -{{ object.circuit }} | +{{ object.circuit|linkify }} |
{% trans "Priority" %} | -{{ object.priority }} | +{{ object.get_priority_display }} |