mirror of
https://github.com/netbox-community/netbox.git
synced 2025-12-28 08:07:45 -06:00
Compare commits
114 Commits
v4.4.1
...
14884-scri
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
18efe72b11 | ||
|
|
6930df85f3 | ||
|
|
5ad6bd88f6 | ||
|
|
2bebfccf9b | ||
|
|
b7cc4c418b | ||
|
|
37a9d03348 | ||
|
|
a69bbcf651 | ||
|
|
2edfde5753 | ||
|
|
cfbd9632ac | ||
|
|
c9386bc9c3 | ||
|
|
c826c5cdb0 | ||
|
|
a4ab4f885d | ||
|
|
61d77dff14 | ||
|
|
24a83acc34 | ||
|
|
dbc71158ec | ||
|
|
a91af996d5 | ||
|
|
f0523611d1 | ||
|
|
7719b98697 | ||
|
|
f383067ecb | ||
|
|
20de263565 | ||
|
|
bb290dc792 | ||
|
|
fcdb7ff6c8 | ||
|
|
5ceb6a60da | ||
|
|
33d4759871 | ||
|
|
2abc5ac69a | ||
|
|
f8c074045f | ||
|
|
4db3d488ad | ||
|
|
b7cae04572 | ||
|
|
51528ae429 | ||
|
|
d5e8480367 | ||
|
|
18a308ae3a | ||
|
|
05e26b82c1 | ||
|
|
d8e4c95bcc | ||
|
|
faa89a53ff | ||
|
|
c63e60a62b | ||
|
|
d18bbe48c1 | ||
|
|
99e367cbaf | ||
|
|
f5ed095738 | ||
|
|
b70f1211ab | ||
|
|
10e8e7b071 | ||
|
|
c770e6b45d | ||
|
|
82db8a9c02 | ||
|
|
bb75bceec5 | ||
|
|
9a68cde95f | ||
|
|
6c723dfb1a | ||
|
|
9b85d92ad0 | ||
|
|
917a2c2618 | ||
|
|
6388705e57 | ||
|
|
ac335c3d87 | ||
|
|
a54c508da2 | ||
|
|
d69042f26e | ||
|
|
f6290dd7af | ||
|
|
c094699dc0 | ||
|
|
5f77d684e1 | ||
|
|
adce67a7cf | ||
|
|
f82f084c02 | ||
|
|
43fc7fb58a | ||
|
|
11099b01bb | ||
|
|
5dc48f3a88 | ||
|
|
1ee23ba6fa | ||
|
|
f23eb53312 | ||
|
|
91d5d284ca | ||
|
|
23d7515b41 | ||
|
|
c4dcc62c04 | ||
|
|
5a96b76cd4 | ||
|
|
26fc06b817 | ||
|
|
9bc60a157b | ||
|
|
28cc8e5c89 | ||
|
|
ba1c0d6d84 | ||
|
|
f31a5551ff | ||
|
|
b0a8b86a93 | ||
|
|
d222466882 | ||
|
|
9e75a2f955 | ||
|
|
10e76597a8 | ||
|
|
18862586e5 | ||
|
|
69a7c97c3e | ||
|
|
bfd1adf0b5 | ||
|
|
030f03b1a8 | ||
|
|
6cf6e2cd7f | ||
|
|
0b7baae23c | ||
|
|
0c22fc9408 | ||
|
|
a437931aef | ||
|
|
0fac8e671e | ||
|
|
6547a16ab6 | ||
|
|
07a53c8315 | ||
|
|
12818f1786 | ||
|
|
55cda3ca45 | ||
|
|
a173a9b4ac | ||
|
|
d34ce7794c | ||
|
|
f45a11d079 | ||
|
|
f0ae0da1c7 | ||
|
|
56db60f8c9 | ||
|
|
c30e4813b7 | ||
|
|
c8b30270a8 | ||
|
|
8e332055bc | ||
|
|
3c09ee8b11 | ||
|
|
a4f0b76cb5 | ||
|
|
f2097cce33 | ||
|
|
499ebb8ab4 | ||
|
|
57a7afd548 | ||
|
|
b4eaeead13 | ||
|
|
24fff6bd74 | ||
|
|
03a6032f36 | ||
|
|
b9567208d4 | ||
|
|
cfcea7c941 | ||
|
|
21ba27fb39 | ||
|
|
c0e4d1c1e3 | ||
|
|
53b15e3e41 | ||
|
|
d95eaa7ba2 | ||
|
|
5506901867 | ||
|
|
ec9da88134 | ||
|
|
e221f1fffa | ||
|
|
530dad279a | ||
|
|
b1439dc298 |
@@ -15,7 +15,7 @@ body:
|
||||
attributes:
|
||||
label: NetBox version
|
||||
description: What version of NetBox are you currently running?
|
||||
placeholder: v4.4.1
|
||||
placeholder: v4.4.3
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
|
||||
6
.github/ISSUE_TEMPLATE/02-bug_report.yaml
vendored
6
.github/ISSUE_TEMPLATE/02-bug_report.yaml
vendored
@@ -27,7 +27,7 @@ body:
|
||||
attributes:
|
||||
label: NetBox Version
|
||||
description: What version of NetBox are you currently running?
|
||||
placeholder: v4.4.1
|
||||
placeholder: v4.4.3
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
@@ -35,9 +35,9 @@ body:
|
||||
label: Python Version
|
||||
description: What version of Python are you currently running?
|
||||
options:
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
- "3.13"
|
||||
- "3.14"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
|
||||
@@ -25,9 +25,12 @@ body:
|
||||
- Getting started
|
||||
- Configuration
|
||||
- Customization
|
||||
- Best practices
|
||||
- Integrations/API
|
||||
- Plugins
|
||||
- Administration
|
||||
- Data model
|
||||
- Reference
|
||||
- Development
|
||||
- Other
|
||||
validations:
|
||||
|
||||
8
.github/codeql/codeql-config.yml
vendored
8
.github/codeql/codeql-config.yml
vendored
@@ -1,3 +1,11 @@
|
||||
paths-ignore:
|
||||
# Ignore compiled JS
|
||||
- netbox/project-static/dist
|
||||
|
||||
query-filters:
|
||||
# Exclude py/url-redirection: NetBox uses safe_for_redirect() wrapper function
|
||||
# which validates all redirects via Django's url_has_allowed_host_and_scheme().
|
||||
# CodeQL's taint tracking doesn't recognize wrapper functions without custom
|
||||
# query configuration. See #20484.
|
||||
- exclude:
|
||||
id: py/url-redirection
|
||||
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
NETBOX_CONFIGURATION: netbox.configuration_testing
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12']
|
||||
python-version: ['3.12', '3.13']
|
||||
node-version: ['20.x']
|
||||
services:
|
||||
redis:
|
||||
|
||||
@@ -12,9 +12,7 @@ django-cors-headers
|
||||
|
||||
# Runtime UI tool for debugging Django
|
||||
# https://github.com/jazzband/django-debug-toolbar/blob/main/docs/changes.rst
|
||||
# django-debug-toolbar v6.0.0 raises "Attribute Error at /: 'function' object has no attribute 'set'"
|
||||
# see https://github.com/netbox-community/netbox/issues/19974
|
||||
django-debug-toolbar==5.2.0
|
||||
django-debug-toolbar
|
||||
|
||||
# Library for writing reusable URL query filters
|
||||
# https://github.com/carltongibson/django-filter/blob/main/CHANGES.rst
|
||||
@@ -30,7 +28,8 @@ django-htmx
|
||||
|
||||
# Modified Preorder Tree Traversal (recursive nesting of objects)
|
||||
# https://github.com/django-mptt/django-mptt/blob/main/CHANGELOG.rst
|
||||
django-mptt
|
||||
# v0.18.0 introduces errant migrations which need to be resolved
|
||||
django-mptt==0.17.0
|
||||
|
||||
# Context managers for PostgreSQL advisory locks
|
||||
# https://github.com/Xof/django-pglocks/blob/master/CHANGES.txt
|
||||
@@ -70,7 +69,8 @@ django-timezone-field
|
||||
|
||||
# A REST API framework for Django projects
|
||||
# https://www.django-rest-framework.org/community/release-notes/
|
||||
djangorestframework
|
||||
# TODO: Re-evaluate the monkey-patch of get_unique_validators() before upgrading
|
||||
djangorestframework==3.16.1
|
||||
|
||||
# Sane and flexible OpenAPI 3 schema generation for Django REST framework.
|
||||
# https://github.com/tfranzel/drf-spectacular/blob/master/CHANGELOG.rst
|
||||
|
||||
@@ -332,14 +332,14 @@
|
||||
"100base-t1",
|
||||
"1000base-bx10-d",
|
||||
"1000base-bx10-u",
|
||||
"1000base-cx",
|
||||
"1000base-cwdm",
|
||||
"1000base-cx",
|
||||
"1000base-dwdm",
|
||||
"1000base-ex",
|
||||
"1000base-sx",
|
||||
"1000base-lsx",
|
||||
"1000base-lx",
|
||||
"1000base-lx10",
|
||||
"1000base-sx",
|
||||
"1000base-t",
|
||||
"1000base-tx",
|
||||
"1000base-zx",
|
||||
@@ -374,6 +374,7 @@
|
||||
"100gbase-cr2",
|
||||
"100gbase-cr4",
|
||||
"100gbase-cr10",
|
||||
"100gbase-cwdm4",
|
||||
"100gbase-dr",
|
||||
"100gbase-er4",
|
||||
"100gbase-fr1",
|
||||
@@ -387,12 +388,12 @@
|
||||
"100gbase-zr",
|
||||
"200gbase-cr2",
|
||||
"200gbase-cr4",
|
||||
"200gbase-sr2",
|
||||
"200gbase-sr4",
|
||||
"200gbase-dr4",
|
||||
"200gbase-er4",
|
||||
"200gbase-fr4",
|
||||
"200gbase-lr4",
|
||||
"200gbase-sr2",
|
||||
"200gbase-sr4",
|
||||
"200gbase-vr2",
|
||||
"400gbase-cr4",
|
||||
"400gbase-dr4",
|
||||
@@ -415,34 +416,34 @@
|
||||
"1000base-x-gbic",
|
||||
"1000base-x-sfp",
|
||||
"10gbase-x-sfpp",
|
||||
"10gbase-x-xfp",
|
||||
"10gbase-x-xenpak",
|
||||
"10gbase-x-xfp",
|
||||
"10gbase-x-x2",
|
||||
"25gbase-x-sfp28",
|
||||
"50gbase-x-sfp56",
|
||||
"40gbase-x-qsfpp",
|
||||
"50gbase-x-sfp28",
|
||||
"50gbase-x-sfp56",
|
||||
"100gbase-x-cfp",
|
||||
"100gbase-x-cfp2",
|
||||
"200gbase-x-cfp2",
|
||||
"400gbase-x-cfp2",
|
||||
"100gbase-x-cfp4",
|
||||
"100gbase-x-cxp",
|
||||
"100gbase-x-cpak",
|
||||
"100gbase-x-dsfp",
|
||||
"100gbase-x-sfpdd",
|
||||
"100gbase-x-qsfp28",
|
||||
"100gbase-x-qsfpdd",
|
||||
"100gbase-x-sfpdd",
|
||||
"200gbase-x-cfp2",
|
||||
"200gbase-x-qsfp56",
|
||||
"200gbase-x-qsfpdd",
|
||||
"400gbase-x-qsfp112",
|
||||
"400gbase-x-qsfpdd",
|
||||
"400gbase-x-cdfp",
|
||||
"400gbase-x-cfp2",
|
||||
"400gbase-x-cfp8",
|
||||
"400gbase-x-osfp",
|
||||
"400gbase-x-osfp-rhs",
|
||||
"400gbase-x-cdfp",
|
||||
"400gbase-x-cfp8",
|
||||
"800gbase-x-qsfpdd",
|
||||
"800gbase-x-osfp",
|
||||
"800gbase-x-qsfpdd",
|
||||
"1000base-kx",
|
||||
"2.5gbase-kx",
|
||||
"5gbase-kr",
|
||||
|
||||
1990
contrib/openapi.json
1990
contrib/openapi.json
File diff suppressed because one or more lines are too long
@@ -2,7 +2,7 @@
|
||||
|
||||
## Local Authentication
|
||||
|
||||
Local user accounts and groups can be created in NetBox under the "Authentication" section in the "Admin" menu. This section is available only to users with the "staff" permission enabled.
|
||||
Local user accounts and groups can be created in NetBox under the "Authentication" section in the "Admin" menu.
|
||||
|
||||
At a minimum, each user account must have a username and password set. User accounts may also denote a first name, last name, and email address. [Permissions](../permissions.md) may also be assigned to individual users and/or groups as needed.
|
||||
|
||||
|
||||
74
docs/best-practices/modeling-pluggable-transceivers.md
Normal file
74
docs/best-practices/modeling-pluggable-transceivers.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# Modeling Pluggable Transceivers
|
||||
|
||||
## Use Case
|
||||
|
||||
Many network devices utilize field-swappable [small-form factor pluggable transceivers (SFPs)](https://en.wikipedia.org/wiki/Small_Form-factor_Pluggable) to enable changing the physical media type of a fixed interface. For example, a 10 Gigabit Ethernet interface might be connected using copper, multimode fiber, or single-mode fiber, each of which requires a different type of SFP+ transceiver.
|
||||
|
||||
It can be challenging to model SFPs given their dynamic nature. This guide intends to capture the recommended strategy for modeling SFPs on NetBox v4.4 and later.
|
||||
|
||||
## Modeling Strategy
|
||||
|
||||
Pluggable transceivers are most accurately represented in NetBox as discrete [modules](../models/dcim/module.md) which are installed within [module bays](../models/dcim/modulebay.md). A module can deliver one or more [interfaces](../models/dcim/interface.md) (or other components) to the device in which it is installed. This approach ensures that a new interface is automatically created on the device when the module is installed, and deleted when the module is removed.
|
||||
|
||||
```mermaid
|
||||
flowchart BT
|
||||
interface1[Interface 1/1]--> module1[SFP]
|
||||
interface2[Interface 2/1]--> module2[SFP]
|
||||
interface3[Interface 3/1] & interface4[Interface 3/2]--> module3[SFP]
|
||||
module1 --> modulebay1[Module Bay 1]
|
||||
module2 --> modulebay2[Module Bay 2]
|
||||
module3 --> modulebay3[Module Bay 3]
|
||||
modulebay1 & modulebay2 & modulebay3 --> device[Device]
|
||||
```
|
||||
|
||||
### 1. Create an SFP Module Type Profile
|
||||
|
||||
If one has not already been defined, create a [module type profile](../models/dcim/moduletypeprofile.md) for SFPs. This profile will be assigned for all module types which represent a pluggable transceiver. Typically, you will need only one profile for all pluggable transceivers.
|
||||
|
||||
You might opt to define custom attributes for the profile by defining a custom [JSON schema](https://json-schema.org/). Profile attributes might be used to define characteristics unique to transceivers, such as optical wavelength and power ranges. Adding profile attributes is optional, and can be done at a later point.
|
||||
|
||||
!!! note
|
||||
Creating a module type profile is optional, but recommended as it allows for defining custom module attributes.
|
||||
|
||||
### 2. Create a Module Type for Each SFP Model in Inventory
|
||||
|
||||
Next, create a [module type](../models/dcim/moduletype.md) to represent each unique SFP model present in your network. Each module type should define a manufacturer and a unique model name, and may also include a part number. For example, you might create a module type for each of the following transceivers:
|
||||
|
||||
| Manufacturer | Model | Media Type |
|
||||
|--------------|------------------|------------|
|
||||
| Cisco | SFP-10G-SR | 10GE MMF |
|
||||
| Cisco | SFP-10G-LR | 10GE SMF |
|
||||
| Juniper | QFX-QSFP-40G-SR4 | 40GE MMF |
|
||||
| Juniper | JNP-QSFP-DAC-5M | 40GE DAC |
|
||||
|
||||
### 3. Add an Interface to the Module Type
|
||||
|
||||
After creating each module type, create an interface template on it to represent its physical interface. The definition of this interface template will depend on the transceiver's physical media type. (Reference the table above for examples.) When a new module is "installed" within a module bay on a device, its templated interface(s) will be automatically instantiated on that device as child interfaces of the module.
|
||||
|
||||
Determining which name to use for the transceiver's interface can be tricky, as the interface name might depend on the type of device in which the SFP is installed. To avoid having to rename interfaces, consider using the `{module}` token in place of a static interface name. The interface's name will inherit the position of the bay in which its parent module is installed. If creating multiple interfaces on a module, be sure to append a unique ID (e.g. `{module}:1`) to ensure each interface gets assigned a unique name.
|
||||
|
||||
### 4. Create Device Types
|
||||
|
||||
If you haven't already, create a [device type](../models/dcim/devicetype.md) to represent each unique device model in your network.
|
||||
|
||||
!!! note
|
||||
Skip this step if you've already created the necessary device types.
|
||||
|
||||
### 5. Add Module Bays to the Device Type
|
||||
|
||||
Once you've created a device type, add the appropriate number of module bays on each device type to represent its SFP slots. For example, a Juniper QFX5110 would have module bays numbered `0/0/0` through `0/0/55`: 48 SFP+ bays and 8 QSFP28 bays (56 total).
|
||||
|
||||
Be sure to define both the name **and position** of each module bay with a unique value. The module bay's position will be used to automatically name SFP interfaces.
|
||||
|
||||
### 6. Create a Device
|
||||
|
||||
Create a new device using the device type added in the previous step. The module bays (and any other components) defined on the device type will be instantiated on the new device automatically.
|
||||
|
||||
!!! note
|
||||
If you've already created the necessary devices in NetBox, you'll need to add their module bays manually. You can add multiple module bays at once by selecting the desired devices from the device list and selecting **Add Components > Module Bays** at the bottom of the page.
|
||||
|
||||
### 7. Add the SFP Modules
|
||||
|
||||
Finally, create each SFP in the new device by "installing" a new module of the appropriate type in each module bay. The interface(s) defined on the selected module type will be automatically populated on the new module. If present, the `{module}` token in the name of each interface template will be replaced with the position of the bay in which the module is being installed. For example, an interface template with the name `et-{module}` being created on a module installed in a bay with position `0/0/14` will create an interface named `et-0/0/14`.
|
||||
|
||||
When adding many modules at once, you may find it helpful to utilize NetBox's bulk import functionality. This allows you to create many modules at once from CSV, JSON, or YAML data.
|
||||
187
docs/best-practices/performance-handbook.md
Normal file
187
docs/best-practices/performance-handbook.md
Normal file
@@ -0,0 +1,187 @@
|
||||
# Performance Handbook
|
||||
|
||||
The purpose of this handbook is to help users and administrators use NetBox efficiently. It contains assorted recommendations and best practices compiled over time, intending to serve a wide variety of use cases.
|
||||
|
||||
## Server Configuration
|
||||
|
||||
### WSGI Server Configuration
|
||||
|
||||
NetBox operates as a [Web Server Gateway Interface (WSGI)](https://en.wikipedia.org/wiki/Web_Server_Gateway_Interface) application, which sits behind a frontend HTTP server such as nginx or Apache. The HTTP server handles low-level HTTP request processing and serving static assets, and forwards application-level requests to NetBox via WSGI.
|
||||
|
||||
A backend WSGI server (typically [Gunicorn](https://gunicorn.org/) or [uWSGI](https://uwsgi-docs.readthedocs.io/en/latest/)) is responsible for running the NetBox application. This is accomplished by initializing a number of WSGI worker processes which accept WSGI requests relayed from the frontend HTTP server.
|
||||
|
||||
Tuning your WSGI server is crucial to realizing optimal performance from NetBox. Below are some recommended configuration parameters.
|
||||
|
||||
#### Provision Multiple Workers
|
||||
|
||||
General guidance is to set the number of worker processes to double the number of CPU cores available, plus one (`2 * CPUs + 1`).
|
||||
|
||||
#### Limit the Worker Lifetime
|
||||
|
||||
Set a maximum number of requests that a worker can service before being respawned. This helps protect against potential memory leaks.
|
||||
|
||||
#### Set a Request Timeout
|
||||
|
||||
Limit the time a worker may spend processing any request. This prevents a long-running request from tying up a worker beyond an acceptable threshold. We suggest a limit of 120 seconds as a reasonable safeguard.
|
||||
|
||||
#### Bind Using a Unix Socket
|
||||
|
||||
When running the HTTP frontend and WSGI server on the same machine, binding via a Unix socket (instead of a TCP socket) may yield slight performance gains.
|
||||
|
||||
### NetBox Configuration
|
||||
|
||||
NetBox ships with a reasonable default configuration for most environments, but administrators are encouraged to explore all the [available parameters](../configuration/index.md) to tune their installation. Some of the most notable parameters impacting performance are called out below.
|
||||
|
||||
#### Reduce the Maximum Page Size
|
||||
|
||||
NetBox paginates large result sets to reduce the overall response size. The [`MAX_PAGE_SIZE`](../configuration/miscellaneous.md#max_page_size) parameter specifies the maximum number of results per page that a client can request. This is set to 1,000 by default. Consider lowering this number if you find that API clients are frequently requesting very large result sets.
|
||||
|
||||
#### Limit GraphQL Aliases
|
||||
|
||||
By default, NetBox restricts a GraphQL query to 10 aliases. Consider reducing this number by setting [`GRAPHQL_MAX_ALIASES`](../configuration/graphql-api.md#graphql_max_aliases) to a lower value.
|
||||
|
||||
#### Designate Isolated Deployments
|
||||
|
||||
If your NetBox installation does not have Internet access, set [`ISOLATED_DEPLOYMENT`](../configuration/system.md#isolated_deployment) to True. This will prevent the application from attempting routine external requests.
|
||||
|
||||
#### Reduce Sentry Sampling
|
||||
|
||||
If [Sentry](https://sentry.io/) has been enabled for error reporting and analytics, consider lowering its sampling rate. This can be accomplished by modifying the values for `sample_rate` and `traces_sample_rate` under [`SENTRY_CONFIG`](../configuration/error-reporting.md#sentry_config).
|
||||
|
||||
#### Remove Unneeded Event Handlers
|
||||
|
||||
Check whether any custom event handlers have been added under [`EVENTS_PIPELINE`](../configuration/miscellaneous.md#events_pipeline). Remove any that are no longer needed.
|
||||
|
||||
### Background Task Workers
|
||||
|
||||
NetBox defers the execution of certain tasks to background workers via Redis queues serviced by one or more background workers. These workers operate asynchronously from the frontend WSGI workers, and process tasks in the order they are enqueued.
|
||||
|
||||
NetBox creates three default queues for background tasks: `high`, `default`, and `low`. Additional queues can be configured via the [`QUEUE_MAPPINGS`](../configuration/miscellaneous.md#queue_mappings) configuration parameter.
|
||||
|
||||
By default, a background worker (spawned via `manage.py rqworker`) will listen to all available queues. To improve responsiveness to high-priority background tasks, consider dedicating one or more workers to service the `high` queue only:
|
||||
|
||||
```
|
||||
$ ./manage.py rqworker high
|
||||
19:31:20 Worker 861be45b32214afc95c235beeb19c9fa: started with PID 2300029, version 2.6.0
|
||||
19:31:20 Worker 861be45b32214afc95c235beeb19c9fa: subscribing to channel rq:pubsub:861be45b32214afc95c235beeb19c9fa
|
||||
19:31:20 *** Listening on high...
|
||||
19:31:20 Worker 861be45b32214afc95c235beeb19c9fa: cleaning registries for queue: high
|
||||
19:31:20 Scheduler for high started with PID 2300096
|
||||
```
|
||||
|
||||
## API Clients
|
||||
|
||||
### REST API
|
||||
|
||||
NetBox's [REST API](../integrations/rest-api.md) is the primary means of integration with external systems, allowing full create, read, update, and delete (CRUD) operations. There are a few performance considerations to keep in mind when dealing with very large data sets.
|
||||
|
||||
#### Use "Brief" Mode for Simple Lists
|
||||
|
||||
In cases where you need to retrieve only a minimal representation of objects, append `?brief=True` to the URL. This instructs NetBox to omit all fields except the following:
|
||||
|
||||
* ID
|
||||
* URL
|
||||
* Display text
|
||||
* Name (or similar identifier)
|
||||
* Slug (if present)
|
||||
* Description
|
||||
* Counts of notable related objects (where applicable)
|
||||
|
||||
For example, a site fetched using brief mode returns only the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": 2,
|
||||
"url": "https://netbox/api/dcim/sites/2/",
|
||||
"display": "DM-Akron",
|
||||
"name": "DM-Akron",
|
||||
"slug": "dm-akron",
|
||||
"description": ""
|
||||
}
|
||||
```
|
||||
|
||||
Omitting all other fields (especially those which fetch and return related objects) often results in much faster queries.
|
||||
|
||||
#### Declare Selected Fields
|
||||
|
||||
If you need more flexibility regarding the fields to be returned for an object type, you can specify a list of fields to include using the `fields` query parameter. For example, a request for `/api/dcim/sites/?fields=id,name,status,region` will return the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": 2,
|
||||
"name": "DM-Akron",
|
||||
"status": {
|
||||
"value": "active",
|
||||
"label": "Active"
|
||||
},
|
||||
"region": {
|
||||
"id": 51,
|
||||
"url": "https://netbox/api/dcim/regions/51/",
|
||||
"display": "Ohio",
|
||||
"name": "Ohio",
|
||||
"slug": "us-oh",
|
||||
"description": "",
|
||||
"site_count": 0,
|
||||
"_depth": 2
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Like brief mode, this approach can significantly reduce the response time of an API request by omitting unneeded data.
|
||||
|
||||
#### Employ Pagination
|
||||
|
||||
Like the user interface, the REST API employs pagination to limit the number of objects returned in a single response. If a page size is not specified by the request (i.e. by passing `?limit=10`), NetBox will use the default size defined by [`PAGINATE_COUNT`](../configuration/default-values.md#paginate_count). The default page size is 50.
|
||||
|
||||
For some requests, especially those using brief mode or a minimal selection of fields, it may be desirable to specify a higher page size, so that fewer requests are needed to retrieve all objects. Appending `?limit=0` to the request effectively seeks to disable pagination. (Note, however, that the requested page size cannot exceed the value of [`MAX_PAGE_SIZE`](../configuration/miscellaneous.md#max_page_size), which defaults to 1,000.)
|
||||
|
||||
Complex API requests, which pull in many related objects, generate a relatively high load on the application, and generally benefit from reduced page size. If you find that your API requests are taking an inordinate amount of time, try reducing the page size from the default value so that fewer objects need to be returned for each request.
|
||||
|
||||
### GraphQL API
|
||||
|
||||
NetBox's read-only [GraphQL API](../integrations/graphql-api.md) offers an alternative to its REST API, and provides a very flexible means of retrieving data. GraphQL enables the client to request any object from a single endpoint, specifying only the desired attributes and relations. Many users prefer this to the more rigid structure of the REST API, but it's important to understand the trade-offs of crafting complex queries.
|
||||
|
||||
#### Request Only the Necessary Fields
|
||||
|
||||
For optimal performance, craft your GraphQL queries to return only the fields needed by the client. This will reduce the overall query time, especially when omitting related objects.
|
||||
|
||||
#### Avoid Overly Complex Queries
|
||||
|
||||
The primary benefit of the GraphQL API is that it allows the client to offload to the server the work of stitching together various related objects, which would require the client to make multiple requests to different endpoints if using the REST API. However, this advantage does not come for free: The more information that is requested in a single query, the more work the server needs to do to fetch the raw data from the database and render it into a GraphQL response. Very complex queries can yield dozens or hundreds of SQL queries on the backend, which increase the time it takes to render a response.
|
||||
|
||||
While it can be tempting to pack as much data as possible into a single GraphQL query, realize that there is a balance to be struck between minimizing the number of queries needed and avoiding complexity in the interest of performance. For example, while it is possible to retrieve via a single GraphQL API request all the IP addresses and all attached cables for every device in a site, it is probably more efficient (often _much_ more efficient) to make two or three separate requests and correlate the data locally.
|
||||
|
||||
#### Use Filters
|
||||
|
||||
You can specify filters when making a GraphQL query to limit the set of objects returned. This works a bit differently from the REST API, as filters are declared inside the query statement rather than appended to the URL, but the concept is the same. For example, to return only active sites:
|
||||
|
||||
```graphql
|
||||
query {
|
||||
site_list(
|
||||
filters: {
|
||||
status: STATUS_ACTIVE
|
||||
}
|
||||
) {
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This returns only sites with a status of "active" and avoid needing to parse through all the others. For further information about filters, see the [GraphQL API documentation](../integrations/graphql-api.md).
|
||||
|
||||
#### Employ Pagination
|
||||
|
||||
Like the REST API, the GraphQL API supports pagination. Queries which return a large number of objects should employ pagination to limit the size of each response.
|
||||
|
||||
```graphql
|
||||
{
|
||||
device_list(
|
||||
pagination: {limit: 100}
|
||||
) {
|
||||
id
|
||||
name
|
||||
serial
|
||||
status
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
This parameter controls the content and layout of user's default dashboard. Once the dashboard has been created, the user is free to customize it as they please by adding, removing, and reconfiguring widgets.
|
||||
|
||||
This parameter must specify an iterable of dictionaries, each representing a discrete dashboard widget and its configuration. The follow widget attributes are supported:
|
||||
This parameter must specify an iterable of dictionaries, each representing a discrete dashboard widget and its configuration. The following widget attributes are supported:
|
||||
|
||||
* `widget`: Dotted path to the Python class (required)
|
||||
* `width`: Default widget width (between 1 and 12, inclusive)
|
||||
@@ -63,6 +63,8 @@ DEFAULT_USER_PREFERENCES = {
|
||||
|
||||
For a complete list of available preferences, log into NetBox and navigate to `/user/preferences/`. A period in a preference name indicates a level of nesting in the JSON data. The example above maps to `pagination.per_page`.
|
||||
|
||||
See also: [Clearing table preferences](../features/user-preferences.md#clearing-table-preferences) for resolving errors caused by saved table columns or ordering.
|
||||
|
||||
---
|
||||
|
||||
## PAGINATE_COUNT
|
||||
|
||||
@@ -1,7 +1,32 @@
|
||||
# Error Reporting Settings
|
||||
|
||||
## SENTRY_CONFIG
|
||||
|
||||
A dictionary mapping keyword arguments to values, to be passed to `sentry_sdk.init()`. See the [Sentry Python SDK documentation](https://docs.sentry.io/platforms/python/) for more information on supported parameters.
|
||||
|
||||
The default configuration is shown below:
|
||||
|
||||
```python
|
||||
{
|
||||
"sample_rate": 1.0,
|
||||
"send_default_pii": False,
|
||||
"traces_sample_rate": 0,
|
||||
}
|
||||
```
|
||||
|
||||
Additionally, `http_proxy` and `https_proxy` are set to the HTTP and HTTPS proxies, respectively, configured for NetBox (if any).
|
||||
|
||||
## SENTRY_DSN
|
||||
|
||||
!!! warning "This parameter will be removed in NetBox v4.5."
|
||||
Set this using `SENTRY_CONFIG` instead:
|
||||
|
||||
```
|
||||
SENTRY_CONFIG = {
|
||||
"dsn": "https://examplePublicKey@o0.ingest.sentry.io/0",
|
||||
}
|
||||
```
|
||||
|
||||
Default: `None`
|
||||
|
||||
Defines a Sentry data source name (DSN) for automated error reporting. `SENTRY_ENABLED` must be `True` for this parameter to take effect. For example:
|
||||
@@ -25,6 +50,15 @@ Set to `True` to enable automatic error reporting via [Sentry](https://sentry.io
|
||||
|
||||
## SENTRY_SAMPLE_RATE
|
||||
|
||||
!!! warning "This parameter will be removed in NetBox v4.5."
|
||||
Set this using `SENTRY_CONFIG` instead:
|
||||
|
||||
```
|
||||
SENTRY_CONFIG = {
|
||||
"sample_rate": 0.2,
|
||||
}
|
||||
```
|
||||
|
||||
Default: `1.0` (all)
|
||||
|
||||
The sampling rate for errors. Must be a value between 0 (disabled) and 1.0 (report on all errors).
|
||||
@@ -33,6 +67,15 @@ The sampling rate for errors. Must be a value between 0 (disabled) and 1.0 (repo
|
||||
|
||||
## SENTRY_SEND_DEFAULT_PII
|
||||
|
||||
!!! warning "This parameter will be removed in NetBox v4.5."
|
||||
Set this using `SENTRY_CONFIG` instead:
|
||||
|
||||
```
|
||||
SENTRY_CONFIG = {
|
||||
"send_default_pii": True,
|
||||
}
|
||||
```
|
||||
|
||||
Default: `False`
|
||||
|
||||
Maps to the Sentry SDK's [`send_default_pii`](https://docs.sentry.io/platforms/python/configuration/options/#send-default-pii) parameter. If enabled, certain personally identifiable information (PII) is added.
|
||||
@@ -60,6 +103,15 @@ SENTRY_TAGS = {
|
||||
|
||||
## SENTRY_TRACES_SAMPLE_RATE
|
||||
|
||||
!!! warning "This parameter will be removed in NetBox v4.5."
|
||||
Set this using `SENTRY_CONFIG` instead:
|
||||
|
||||
```
|
||||
SENTRY_CONFIG = {
|
||||
"traces_sample_rate": 0.2,
|
||||
}
|
||||
```
|
||||
|
||||
Default: `0` (disabled)
|
||||
|
||||
The sampling rate for transactions. Must be a value between 0 (disabled) and 1.0 (report on all transactions).
|
||||
|
||||
@@ -127,19 +127,3 @@ The list of groups that promote an remote User to Superuser on Login. If group i
|
||||
Default: `[]` (Empty list)
|
||||
|
||||
The list of users that get promoted to Superuser on Login. If user isn't present in list on next Login, the Role gets revoked. (Requires `REMOTE_AUTH_ENABLED` and `REMOTE_AUTH_GROUP_SYNC_ENABLED` )
|
||||
|
||||
---
|
||||
|
||||
## REMOTE_AUTH_STAFF_GROUPS
|
||||
|
||||
Default: `[]` (Empty list)
|
||||
|
||||
The list of groups that promote an remote User to Staff on Login. If group isn't present on next Login, the Role gets revoked. (Requires `REMOTE_AUTH_ENABLED` and `REMOTE_AUTH_GROUP_SYNC_ENABLED` )
|
||||
|
||||
---
|
||||
|
||||
## REMOTE_AUTH_STAFF_USERS
|
||||
|
||||
Default: `[]` (Empty list)
|
||||
|
||||
The list of users that get promoted to Staff on Login. If user isn't present in list on next Login, the Role gets revoked. (Requires `REMOTE_AUTH_ENABLED` and `REMOTE_AUTH_GROUP_SYNC_ENABLED` )
|
||||
|
||||
@@ -23,6 +23,31 @@ ALLOWED_HOSTS = ['*']
|
||||
|
||||
---
|
||||
|
||||
## API_TOKEN_PEPPERS
|
||||
|
||||
!!! info "This parameter was introduced in NetBox v4.5."
|
||||
|
||||
[Cryptographic peppers](https://en.wikipedia.org/wiki/Pepper_(cryptography)) are employed to generate hashes of sensitive values on the server. This parameter defines the peppers used to hash v2 API tokens in NetBox. You must define at least one pepper before creating a v2 API token. See the [API documentation](../integrations/rest-api.md#authentication) for further information about how peppers are used.
|
||||
|
||||
```python
|
||||
API_TOKEN_PEPPERS = {
|
||||
# DO NOT USE THIS EXAMPLE PEPPER IN PRODUCTION
|
||||
1: 'kp7ht*76fiQAhUi5dHfASLlYUE_S^gI^(7J^K5M!LfoH@vl&b_',
|
||||
}
|
||||
```
|
||||
|
||||
!!! warning "Peppers are sensitive"
|
||||
Treat pepper values as extremely sensitive. Consider populating peppers from environment variables at initialization time rather than defining them in the configuration file, if feasible.
|
||||
|
||||
Peppers must be at least 50 characters in length and should comprise a random string with a diverse character set. Consider using the Python script at `$INSTALL_ROOT/netbox/generate_secret_key.py` to generate a pepper value.
|
||||
|
||||
It is recommended to start with a pepper ID of `1`. Additional peppers can be introduced later as needed to begin rotating token hashes.
|
||||
|
||||
!!! tip
|
||||
Although NetBox will run without `API_TOKEN_PEPPERS` defined, the use of v2 API tokens will be unavailable.
|
||||
|
||||
---
|
||||
|
||||
## DATABASE
|
||||
|
||||
!!! warning "Legacy Configuration Parameter"
|
||||
|
||||
@@ -1,16 +1,5 @@
|
||||
# Security & Authentication Parameters
|
||||
|
||||
## ALLOW_TOKEN_RETRIEVAL
|
||||
|
||||
Default: `False`
|
||||
|
||||
!!! note
|
||||
The default value of this parameter changed from `True` to `False` in NetBox v4.3.0.
|
||||
|
||||
If disabled, the values of API tokens will not be displayed after each token's initial creation. A user **must** record the value of a token prior to its creation, or it will be lost. Note that this affects _all_ users, regardless of assigned permissions.
|
||||
|
||||
---
|
||||
|
||||
## ALLOWED_URL_SCHEMES
|
||||
|
||||
!!! tip "Dynamic Configuration Parameter"
|
||||
|
||||
@@ -131,17 +131,6 @@ self.log_info(f"Running as user {username} (IP: {ip_address})...")
|
||||
|
||||
For a complete list of available request parameters, please see the [Django documentation](https://docs.djangoproject.com/en/stable/ref/request-response/).
|
||||
|
||||
## Reading Data from Files
|
||||
|
||||
The Script class provides two convenience methods for reading data from files:
|
||||
|
||||
* `load_yaml`
|
||||
* `load_json`
|
||||
|
||||
These two methods will load data in YAML or JSON format, respectively, from files within the local path (i.e. `SCRIPTS_ROOT`).
|
||||
|
||||
**Note:** These convenience methods are deprecated and will be removed in NetBox v4.4. These only work if running scripts within the local path, they will not work if using a storage other than ScriptFileSystemStorage.
|
||||
|
||||
## Logging
|
||||
|
||||
The Script object provides a set of convenient functions for recording messages at different severity levels:
|
||||
|
||||
@@ -7,7 +7,7 @@ Getting started with NetBox development is pretty straightforward, and should fe
|
||||
* A Linux system or compatible environment
|
||||
* A PostgreSQL server, which can be installed locally [per the documentation](../installation/1-postgresql.md)
|
||||
* A Redis server, which can also be [installed locally](../installation/2-redis.md)
|
||||
* Python 3.10 or later
|
||||
* Python 3.12 or later
|
||||
|
||||
### 1. Fork the Repo
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
The `users.UserConfig` model holds individual preferences for each user in the form of JSON data. This page serves as a manifest of all recognized user preferences in NetBox.
|
||||
|
||||
For end‑user guidance on resetting saved table layouts, see [Features > User Preferences](../features/user-preferences.md#clearing-table-preferences).
|
||||
|
||||
## Available Preferences
|
||||
|
||||
| Name | Description |
|
||||
|
||||
@@ -8,7 +8,7 @@ NetBox's REST API, powered by the [Django REST Framework](https://www.django-res
|
||||
|
||||
```no-highlight
|
||||
curl -s -X POST \
|
||||
-H "Authorization: Token $TOKEN" \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
http://netbox/api/ipam/prefixes/ \
|
||||
--data '{"prefix": "192.0.2.0/24", "site": {"name": "Branch 12"}}'
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
While NetBox strives to meet the needs of every network, the needs of users to cater to their own unique environments cannot be ignored. NetBox was built with this in mind, and can be customized in many ways to better suit your particular needs.
|
||||
|
||||
For end‑user personalization topics (bookmarks, table preferences, language, CSV delimiter, and more), see [Features > User Preferences](../features/user-preferences.md).
|
||||
|
||||
## Tags
|
||||
|
||||
Most objects in NetBox can be assigned user-created tags to aid with organization and filtering. Tag values are completely arbitrary: They may be used to store data in key-value pairs, or they may be employed simply as labels against which objects can be filtered. Each tag can also be assigned a color for quicker differentiation in the user interface.
|
||||
@@ -18,10 +20,6 @@ The `tag` filter can be specified multiple times to match only objects which hav
|
||||
GET /api/dcim/devices/?tag=monitored&tag=deprecated
|
||||
```
|
||||
|
||||
## Bookmarks
|
||||
|
||||
Users can bookmark their most commonly visited objects for convenient access. Bookmarks are listed under a user's profile, and can be displayed with custom filtering and ordering on the user's personal dashboard.
|
||||
|
||||
## Custom Fields
|
||||
|
||||
While NetBox provides a rather extensive data model out of the box, the need may arise to store certain additional data associated with NetBox objects. For example, you might need to record the invoice ID alongside an installed device, or record an approving authority when creating a new IP prefix. NetBox administrators can create custom fields on built-in objects to meet these needs.
|
||||
@@ -38,7 +36,7 @@ Custom links allow you to conveniently reference external resources related to N
|
||||
http://server.local/vms/?name={{ object.name }}
|
||||
```
|
||||
|
||||
Now, when viewing a virtual machine in NetBox, a user will see a handy button with the chosen title and link (complete with the name of the VM being viewed). Both the text and URL of custom links can be templatized in this manner, and custom links can be grouped together into dropdowns for more efficient display.
|
||||
Now, when viewing a virtual machine in NetBox, a user will see a handy button with the chosen title and link (complete with the name of the VM being viewed). Both the text and URL of custom links can be templatized in this manner, and custom links can be grouped together into dropdowns for a more efficient display.
|
||||
|
||||
To learn more about this feature, check out the [custom link documentation](../customization/custom-links.md).
|
||||
|
||||
|
||||
60
docs/features/user-preferences.md
Normal file
60
docs/features/user-preferences.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# User Preferences
|
||||
|
||||
NetBox stores per‑user options that control aspects of the web interface and data display. Preferences persist across sessions and can be managed under **User → Preferences**.
|
||||
|
||||
## Table configurations
|
||||
|
||||
When a list view is configured using **Configure**, NetBox records the selected columns and ordering as per‑user table preferences for that table. These preferences are applied automatically on subsequent visits.
|
||||
|
||||
### Clearing table preferences
|
||||
|
||||
Saved table preferences may need to be reset, for example, if a table fails to render or after an upgrade that changes available columns.
|
||||
|
||||
To clear saved preferences for one or more tables:
|
||||
|
||||
1. Click the username in the top‑right corner.
|
||||
2. Select **Preferences** from the dropdown.
|
||||
3. Scroll to the **Table Configurations** section.
|
||||
4. Select the tables to reset.
|
||||
5. Click **Submit** to clear the selected preferences.
|
||||
|
||||
After clearing preferences, reopen the list view and use **Configure** to set the desired columns and ordering.
|
||||
|
||||
!!! note
|
||||
Per‑user table preferences are distinct from **Table Configs**, which are named, reusable configurations managed under *Customization → Table Configs*. Clearing preferences does not delete any Table Configs. See [Table Configs](../models/extras/tableconfig.md) for details.
|
||||
|
||||
## Other preferences
|
||||
|
||||
### Language
|
||||
Selects the user interface language from installed translations (subject to system configuration).
|
||||
|
||||
### Page length
|
||||
Sets the default number of rows displayed on paginated tables.
|
||||
|
||||
### Paginator placement
|
||||
Controls where pagination controls are rendered relative to a table.
|
||||
|
||||
### Striped table rows
|
||||
Toggles alternating row backgrounds on tables.
|
||||
|
||||
### Data format (raw views)
|
||||
Sets the default format (JSON or YAML) when rendering raw data blocks.
|
||||
|
||||
### CSV delimiter
|
||||
Overrides the delimiter used when exporting CSV data.
|
||||
|
||||
## Bookmarks
|
||||
|
||||
Users can bookmark frequently visited objects for convenient access. Bookmarks appear under the user menu and can be displayed on the personal dashboard using the bookmarks' widget. See [Bookmark](../models/extras/bookmark.md) for model details.
|
||||
|
||||
## Notifications and subscriptions
|
||||
|
||||
Users may subscribe to objects to receive notifications when changes occur. Notifications are listed under the user menu and can be marked as read or deleted. See [Features > Notifications](notifications.md) and the data‑model references for [Subscription](../models/extras/subscription.md) and [Notification](../models/extras/notification.md).
|
||||
|
||||
## Admin defaults
|
||||
|
||||
Administrators can define defaults for new users via [`DEFAULT_USER_PREFERENCES`](../configuration/default-values.md#default_user_preferences). Users may override these values under their own preferences.
|
||||
|
||||
## See also
|
||||
|
||||
- [Development > User Preferences](../development/user-preferences.md) (manifest of recognized preference keys)
|
||||
@@ -6,8 +6,8 @@ This section of the documentation discusses installing and configuring the NetBo
|
||||
|
||||
Begin by installing all system packages required by NetBox and its dependencies.
|
||||
|
||||
!!! warning "Python 3.10 or later required"
|
||||
NetBox supports Python 3.10, 3.11, and 3.12.
|
||||
!!! warning "Python 3.12 or later required"
|
||||
NetBox supports only Python 3.12 or later.
|
||||
|
||||
```no-highlight
|
||||
sudo apt install -y python3 python3-pip python3-venv python3-dev \
|
||||
@@ -15,7 +15,7 @@ build-essential libxml2-dev libxslt1-dev libffi-dev libpq-dev \
|
||||
libssl-dev zlib1g-dev
|
||||
```
|
||||
|
||||
Before continuing, check that your installed Python version is at least 3.10:
|
||||
Before continuing, check that your installed Python version is at least 3.12:
|
||||
|
||||
```no-highlight
|
||||
python3 -V
|
||||
@@ -120,6 +120,23 @@ If you are not yet sure what the domain name and/or IP address of the NetBox ins
|
||||
ALLOWED_HOSTS = ['*']
|
||||
```
|
||||
|
||||
### API_TOKEN_PEPPERS
|
||||
|
||||
Define at least one random cryptographic pepper, identified by a numeric ID starting at 1. This will be used to generate SHA256 checksums for API tokens.
|
||||
|
||||
```python
|
||||
API_TOKEN_PEPPERS = {
|
||||
# DO NOT USE THIS EXAMPLE PEPPER IN PRODUCTION
|
||||
1: 'kp7ht*76fiQAhUi5dHfASLlYUE_S^gI^(7J^K5M!LfoH@vl&b_',
|
||||
}
|
||||
```
|
||||
|
||||
!!! tip
|
||||
As with [`SECRET_KEY`](#secret_key) below, you can use the `generate_secret_key.py` script to generate a random pepper:
|
||||
```no-highlight
|
||||
python3 ../generate_secret_key.py
|
||||
```
|
||||
|
||||
### DATABASES
|
||||
|
||||
This parameter holds the PostgreSQL database configuration details. The default database must be defined; additional databases may be defined as needed e.g. by plugins.
|
||||
@@ -235,10 +252,10 @@ Once NetBox has been configured, we're ready to proceed with the actual installa
|
||||
sudo /opt/netbox/upgrade.sh
|
||||
```
|
||||
|
||||
Note that **Python 3.10 or later is required** for NetBox v4.0 and later releases. If the default Python installation on your server is set to a lesser version, pass the path to the supported installation as an environment variable named `PYTHON`. (Note that the environment variable must be passed _after_ the `sudo` command.)
|
||||
Note that **Python 3.12 or later is required** for NetBox v4.5 and later releases. If the default Python installation on your server is set to a lesser version, pass the path to the supported installation as an environment variable named `PYTHON`. (Note that the environment variable must be passed _after_ the `sudo` command.)
|
||||
|
||||
```no-highlight
|
||||
sudo PYTHON=/usr/bin/python3.10 /opt/netbox/upgrade.sh
|
||||
sudo PYTHON=/usr/bin/python3.12 /opt/netbox/upgrade.sh
|
||||
```
|
||||
|
||||
!!! note
|
||||
|
||||
@@ -60,6 +60,3 @@ You should see output similar to the following:
|
||||
If the NetBox service fails to start, issue the command `journalctl -eu netbox` to check for log messages that may indicate the problem.
|
||||
|
||||
Once you've verified that the WSGI workers are up and running, move on to HTTP server setup.
|
||||
|
||||
!!! note
|
||||
There is a bug in the current stable release of gunicorn (v21.2.0) where automatic restarts of the worker processes can result in 502 errors under heavy load. (See [gunicorn bug #3038](https://github.com/benoitc/gunicorn/issues/3038) for more detail.) Users who encounter this issue may opt to downgrade to an earlier, unaffected release of gunicorn (`pip install gunicorn==20.1.0`). Note, however, that this earlier release does not officially support Python 3.11.
|
||||
|
||||
@@ -121,7 +121,6 @@ AUTH_LDAP_MIRROR_GROUPS = True
|
||||
# Define special user types using groups. Exercise great caution when assigning superuser status.
|
||||
AUTH_LDAP_USER_FLAGS_BY_GROUP = {
|
||||
"is_active": "cn=active,ou=groups,dc=example,dc=com",
|
||||
"is_staff": "cn=staff,ou=groups,dc=example,dc=com",
|
||||
"is_superuser": "cn=superuser,ou=groups,dc=example,dc=com"
|
||||
}
|
||||
|
||||
@@ -134,7 +133,6 @@ AUTH_LDAP_CACHE_TIMEOUT = 3600
|
||||
```
|
||||
|
||||
* `is_active` - All users must be mapped to at least this group to enable authentication. Without this, users cannot log in.
|
||||
* `is_staff` - Users mapped to this group are enabled for access to the administration tools; this is the equivalent of checking the "staff status" box on a manually created user. This doesn't grant any specific permissions.
|
||||
* `is_superuser` - Users mapped to this group will be granted superuser status. Superusers are implicitly granted all permissions.
|
||||
|
||||
!!! warning
|
||||
@@ -248,7 +246,6 @@ AUTH_LDAP_MIRROR_GROUPS = True
|
||||
# Define special user types using groups. Exercise great caution when assigning superuser status.
|
||||
AUTH_LDAP_USER_FLAGS_BY_GROUP = {
|
||||
"is_active": "cn=active,ou=groups,dc=example,dc=com",
|
||||
"is_staff": "cn=staff,ou=groups,dc=example,dc=com",
|
||||
"is_superuser": "cn=superuser,ou=groups,dc=example,dc=com"
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ The following sections detail how to set up a new instance of NetBox:
|
||||
|
||||
| Dependency | Supported Versions |
|
||||
|------------|--------------------|
|
||||
| Python | 3.10, 3.11, 3.12 |
|
||||
| Python | 3.12, 3.13, 3.14 |
|
||||
| PostgreSQL | 14+ |
|
||||
| Redis | 4.0+ |
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ NetBox requires the following dependencies:
|
||||
|
||||
| Dependency | Supported Versions |
|
||||
|------------|--------------------|
|
||||
| Python | 3.10, 3.11, 3.12 |
|
||||
| Python | 3.12, 3.13, 3.14 |
|
||||
| PostgreSQL | 14+ |
|
||||
| Redis | 4.0+ |
|
||||
|
||||
@@ -27,6 +27,7 @@ NetBox requires the following dependencies:
|
||||
|
||||
| NetBox Version | Python min | Python max | PostgreSQL min | Redis min | Documentation |
|
||||
|:--------------:|:----------:|:----------:|:--------------:|:---------:|:-----------------------------------------------------------------------------------------:|
|
||||
| 4.5 | 3.12 | 3.14 | 14 | 4.0 | [Link](https://github.com/netbox-community/netbox/blob/v4.5.0/docs/installation/index.md) |
|
||||
| 4.4 | 3.10 | 3.12 | 14 | 4.0 | [Link](https://github.com/netbox-community/netbox/blob/v4.4.0/docs/installation/index.md) |
|
||||
| 4.3 | 3.10 | 3.12 | 14 | 4.0 | [Link](https://github.com/netbox-community/netbox/blob/v4.3.0/docs/installation/index.md) |
|
||||
| 4.2 | 3.10 | 3.12 | 13 | 4.0 | [Link](https://github.com/netbox-community/netbox/blob/v4.2.0/docs/installation/index.md) |
|
||||
@@ -130,7 +131,7 @@ sudo ./upgrade.sh
|
||||
If the default version of Python is not at least 3.10, you'll need to pass the path to a supported Python version as an environment variable when calling the upgrade script. For example:
|
||||
|
||||
```no-highlight
|
||||
sudo PYTHON=/usr/bin/python3.10 ./upgrade.sh
|
||||
sudo PYTHON=/usr/bin/python3.12 ./upgrade.sh
|
||||
```
|
||||
|
||||
!!! note
|
||||
|
||||
@@ -80,7 +80,7 @@ Likewise, the site, rack, and device objects are located under the "DCIM" applic
|
||||
|
||||
The full hierarchy of available endpoints can be viewed by navigating to the API root in a web browser.
|
||||
|
||||
Each model generally has two views associated with it: a list view and a detail view. The list view is used to retrieve a list of multiple objects and to create new objects. The detail view is used to retrieve, update, or delete an single existing object. All objects are referenced by their numeric primary key (`id`).
|
||||
Each model generally has two views associated with it: a list view and a detail view. The list view is used to retrieve a list of multiple objects and to create new objects. The detail view is used to retrieve, update, or delete a single existing object. All objects are referenced by their numeric primary key (`id`).
|
||||
|
||||
* `/api/dcim/devices/` - List existing devices or create a new device
|
||||
* `/api/dcim/devices/123/` - Retrieve, update, or delete the device with ID 123
|
||||
@@ -653,18 +653,22 @@ The NetBox REST API primarily employs token-based authentication. For convenienc
|
||||
|
||||
### Tokens
|
||||
|
||||
A token is a unique identifier mapped to a NetBox user account. Each user may have one or more tokens which he or she can use for authentication when making REST API requests. To create a token, navigate to the API tokens page under your user profile.
|
||||
A token is a secret, unique identifier mapped to a NetBox user account. Each user may have one or more tokens which he or she can use for authentication when making REST API requests. To create a token, navigate to the API tokens page under your user profile. When creating a token, NetBox will automatically populate a randomly-generated token value.
|
||||
|
||||
!!! note "Tokens cannot be retrieved once created"
|
||||
Once a token has been created, its plaintext value cannot be retrieved. For this reason, you must take care to securely record the token locally immediately upon its creation. If a token plaintext is lost, it cannot be recovered: A new token must be created.
|
||||
|
||||
By default, all users can create and manage their own REST API tokens under the user control panel in the UI or via the REST API. This ability can be disabled by overriding the [`DEFAULT_PERMISSIONS`](../configuration/security.md#default_permissions) configuration parameter.
|
||||
|
||||
Each token contains a 160-bit key represented as 40 hexadecimal characters. When creating a token, you'll typically leave the key field blank so that a random key will be automatically generated. However, NetBox allows you to specify a key in case you need to restore a previously deleted token to operation.
|
||||
|
||||
Additionally, a token can be set to expire at a specific time. This can be useful if an external client needs to be granted temporary access to NetBox.
|
||||
|
||||
!!! info "Restricting Token Retrieval"
|
||||
The ability to retrieve the key value of a previously-created API token can be restricted by disabling the [`ALLOW_TOKEN_RETRIEVAL`](../configuration/security.md#allow_token_retrieval) configuration parameter.
|
||||
#### v1 and v2 Tokens
|
||||
|
||||
### Restricting Write Operations
|
||||
Beginning with NetBox v4.5, two versions of API token are supported, denoted as v1 and v2. Users are strongly encouraged to create only v2 tokens and to discontinue the use of v1 tokens. Support for v1 tokens will be removed in a future NetBox release.
|
||||
|
||||
v2 API tokens offer much stronger security. The token plaintext given at creation time is hashed together with a configured [cryptographic pepper](../configuration/required-parameters.md#api_token_peppers) to generate a unique checksum. This checksum is irreversible; the token plaintext is never stored on the server and thus cannot be retrieved even with database-level access.
|
||||
|
||||
#### Restricting Write Operations
|
||||
|
||||
By default, a token can be used to perform all actions via the API that a user would be permitted to do via the web UI. Deselecting the "write enabled" option will restrict API requests made with the token to read operations (e.g. GET) only.
|
||||
|
||||
@@ -681,10 +685,22 @@ It is possible to provision authentication tokens for other users via the REST A
|
||||
|
||||
### Authenticating to the API
|
||||
|
||||
An authentication token is attached to a request by setting the `Authorization` header to the string `Token` followed by a space and the user's token:
|
||||
An authentication token is included with a request in its `Authorization` header. The format of the header value depends on the version of token in use. v2 tokens use the following form, concatenating the token's prefix (`nbt_`) and key with its plaintext value, separated by a period:
|
||||
|
||||
```
|
||||
$ curl -H "Authorization: Token $TOKEN" \
|
||||
Authorization: Bearer nbt_<key>.<token>
|
||||
```
|
||||
|
||||
Legacy v1 tokens use the prefix `Token` rather than `Bearer`, and include only the token plaintext. (v1 tokens do not have a key.)
|
||||
|
||||
```
|
||||
Authorization: Token <token>
|
||||
```
|
||||
|
||||
Below is an example REST API request utilizing a v2 token.
|
||||
|
||||
```
|
||||
$ curl -H "Authorization: Bearer nbt_4F9DAouzURLb.zjebxBPzICiPbWz0Wtx0fTL7bCKXKGTYhNzkgC2S" \
|
||||
-H "Accept: application/json; indent=4" \
|
||||
https://netbox/api/dcim/sites/
|
||||
{
|
||||
|
||||
@@ -4,6 +4,9 @@ This object represents the saved configuration of an object table in NetBox. Tab
|
||||
|
||||
For example, you might wish to create a table config for the devices list to assist in inventory tasks. This view might show the device name, location, serial number, and asset tag, but omit operational details like IP addresses. Once applied, this table config can be saved for reuse in future audits.
|
||||
|
||||
!!! note
|
||||
Per‑user table preferences (columns and ordering remembered for an individual user) are distinct from Table Configs. If a list view fails to render due to outdated saved preferences, see [Clearing table preferences](../../features/user-preferences.md#clearing-table-preferences).
|
||||
|
||||
## Fields
|
||||
|
||||
### Name
|
||||
@@ -20,7 +23,7 @@ The type of NetBox object to which the table config pertains.
|
||||
|
||||
### Table
|
||||
|
||||
The name of the specific table to which the table config pertains. (Some NetBox object use multiple tables.)
|
||||
The name of the specific table to which the table config pertains. (Some NetBox objects use multiple tables.)
|
||||
|
||||
### Weight
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Filters & Filter Sets
|
||||
|
||||
Filter sets define the mechanisms available for filtering or searching through a set of objects in NetBox. For instance, sites can be filtered by their parent region or group, status, facility ID, and so on. The same filter set is used consistently for a model whether the request is made via the UI or REST API. (Note that the GraphQL API uses a separate filter class.) NetBox employs the [django-filters2](https://django-tables2.readthedocs.io/en/latest/) library to define filter sets.
|
||||
Filter sets define the mechanisms available for filtering or searching through a set of objects in NetBox. For instance, sites can be filtered by their parent region or group, status, facility ID, and so on. The same filter set is used consistently for a model whether the request is made via the UI or REST API. (Note that the GraphQL API uses a separate filter class.) NetBox employs the [django-filter](https://django-filter.readthedocs.io/en/stable/) library to define filter sets.
|
||||
|
||||
## FilterSet Classes
|
||||
|
||||
|
||||
@@ -173,12 +173,12 @@ classifiers=[
|
||||
'Intended Audience :: Developers',
|
||||
'Natural Language :: English',
|
||||
"Programming Language :: Python :: 3 :: Only",
|
||||
'Programming Language :: Python :: 3.10',
|
||||
'Programming Language :: Python :: 3.11',
|
||||
'Programming Language :: Python :: 3.12',
|
||||
'Programming Language :: Python :: 3.13',
|
||||
'Programming Language :: Python :: 3.14',
|
||||
]
|
||||
|
||||
requires-python = ">=3.10.0"
|
||||
requires-python = ">=3.12.0"
|
||||
|
||||
```
|
||||
|
||||
@@ -195,7 +195,7 @@ python3 -m venv ~/.virtualenvs/my_plugin
|
||||
You can make NetBox available within this environment by creating a path file pointing to its location. This will add NetBox to the Python path upon activation. (Be sure to adjust the command below to specify your actual virtual environment path, Python version, and NetBox installation.)
|
||||
|
||||
```shell
|
||||
echo /opt/netbox/netbox > $VENV/lib/python3.10/site-packages/netbox.pth
|
||||
echo /opt/netbox/netbox > $VENV/lib/python3.12/site-packages/netbox.pth
|
||||
```
|
||||
|
||||
## Development Installation
|
||||
|
||||
@@ -64,14 +64,17 @@ item1 = PluginMenuItem(
|
||||
|
||||
A `PluginMenuItem` has the following attributes:
|
||||
|
||||
| Attribute | Required | Description |
|
||||
|-----------------|----------|----------------------------------------------------------------------------------------------------------|
|
||||
| `link` | Yes | Name of the URL path to which this menu item links |
|
||||
| `link_text` | Yes | The text presented to the user |
|
||||
| `permissions` | - | A list of permissions required to display this link |
|
||||
| `auth_required` | - | Display only for authenticated users |
|
||||
| `staff_only` | - | Display only for users who have `is_staff` set to true (any specified permissions will also be required) |
|
||||
| `buttons` | - | An iterable of PluginMenuButton instances to include |
|
||||
| Attribute | Required | Description |
|
||||
|-----------------|----------|------------------------------------------------------|
|
||||
| `link` | Yes | Name of the URL path to which this menu item links |
|
||||
| `link_text` | Yes | The text presented to the user |
|
||||
| `permissions` | - | A list of permissions required to display this link |
|
||||
| `auth_required` | - | Display only for authenticated users |
|
||||
| `staff_only` | - | Display only for superusers |
|
||||
| `buttons` | - | An iterable of PluginMenuButton instances to include |
|
||||
|
||||
!!! note "Changed in NetBox v4.5"
|
||||
In releases prior to NetBox v4.5, `staff_only` restricted display of a menu item to only users with `is_staff` set to True. In NetBox v4.5, the `is_staff` flag was removed from the user model. Menu items with `staff_only` set to True are now displayed only for superusers.
|
||||
|
||||
## Menu Buttons
|
||||
|
||||
|
||||
@@ -1,5 +1,61 @@
|
||||
# NetBox v4.4
|
||||
|
||||
## v4.4.3 (2025-10-14)
|
||||
|
||||
### Enhancements
|
||||
|
||||
* [#20426](https://github.com/netbox-community/netbox/issues/20426) - Add a copy-to-clipboard button for custom script output
|
||||
* [#20516](https://github.com/netbox-community/netbox/issues/20516) - Improve rendering of VLAN ID ranges in VLAN group tables
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* [#19302](https://github.com/netbox-community/netbox/issues/19302) - Fix uniqueness validation in REST API for nullable fields
|
||||
* [#19615](https://github.com/netbox-community/netbox/issues/19615) - Fix support for static file parameters in templates when external storage is in use
|
||||
* [#19818](https://github.com/netbox-community/netbox/issues/19818) - Hide primary IP assignment fields when creating a new virtual machine in the UI
|
||||
* [#19825](https://github.com/netbox-community/netbox/issues/19825) - Prevent cache for config revisions from being erroneously overwritten when debugging is enabled
|
||||
* [#20140](https://github.com/netbox-community/netbox/issues/20140) - Changing a site's region or group should update any associated circuit terminations
|
||||
* [#20156](https://github.com/netbox-community/netbox/issues/20156) - Fix display of rack elevation labels
|
||||
* [#20290](https://github.com/netbox-community/netbox/issues/20290) - Fix migration error when upgrading to NetBox v4.4 from releases earlier than v4.3
|
||||
* [#20471](https://github.com/netbox-community/netbox/issues/20471) - Saving an unmodified VLAN group should not generate a change record
|
||||
* [#20475](https://github.com/netbox-community/netbox/issues/20475) - Collapse singleton VLAN IDs in VLAN group display
|
||||
* [#20494](https://github.com/netbox-community/netbox/issues/20494) - Correct OpenAPI schema definition for `IntegerRangeSerializer`
|
||||
* [#20496](https://github.com/netbox-community/netbox/issues/20496) - REST API should always honor `MAX_PAGE_SIZE` value
|
||||
* [#20497](https://github.com/netbox-community/netbox/issues/20497) - Fix filtering of VLAN groups by VLAN ID range in GraphQL API
|
||||
* [#20507](https://github.com/netbox-community/netbox/issues/20507) - Fix support for fetching ASN contacts via GraphQL API
|
||||
* [#20523](https://github.com/netbox-community/netbox/issues/20523) - Hide password change form for users authenticated via SSO
|
||||
* [#20542](https://github.com/netbox-community/netbox/issues/20542) - Fix the creation of MAC addresses using the "quick add" form
|
||||
|
||||
---
|
||||
|
||||
## v4.4.2 (2025-09-30)
|
||||
|
||||
### Enhancements
|
||||
|
||||
* [#17010](https://github.com/netbox-community/netbox/issues/17010) - Show admin navigation menu items only for staff & superusers
|
||||
* [#19590](https://github.com/netbox-community/netbox/issues/19590) - Add columns for device site & location to device component tables
|
||||
* [#19765](https://github.com/netbox-community/netbox/issues/19765) - Linkify assigned object types under saved filter view
|
||||
* [#20308](https://github.com/netbox-community/netbox/issues/20308) - Add a hotkey (`/`) for the global search field
|
||||
* [#20332](https://github.com/netbox-community/netbox/issues/20332) - Add a "none" option to object tag filters
|
||||
* [#20380](https://github.com/netbox-community/netbox/issues/20380) - Introduce the `SENTRY_CONFIG` configuration parameter
|
||||
* [#20412](https://github.com/netbox-community/netbox/issues/20412) - Linkify cluster type on virtual machine detail view
|
||||
* [#20438](https://github.com/netbox-community/netbox/issues/20438) - Add `facility` field to bulk edit forms for sites and locations
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* [#18878](https://github.com/netbox-community/netbox/issues/18878) - Automatically assign a designated primary MAC address upon creation of a new interface
|
||||
* [#20243](https://github.com/netbox-community/netbox/issues/20243) - Prevent scheduled system jobs from re-running multiple times
|
||||
* [#20253](https://github.com/netbox-community/netbox/issues/20253) - Fix support for filtering object contact assignments in GraphQL API
|
||||
* [#20365](https://github.com/netbox-community/netbox/issues/20365) - Address various inaccuracies in generated OpenAPI schema
|
||||
* [#20375](https://github.com/netbox-community/netbox/issues/20375) - Preserve filter parameters when performing bulk operations
|
||||
* [#20390](https://github.com/netbox-community/netbox/issues/20390) - Fix styling of page size selection dropdown
|
||||
* [#20392](https://github.com/netbox-community/netbox/issues/20392) - Clean up ordering of interface type options
|
||||
* [#20398](https://github.com/netbox-community/netbox/issues/20398) - Fix misleading error reporting for min/max custom field values
|
||||
* [#20419](https://github.com/netbox-community/netbox/issues/20419) - Correct action buttons for child object views
|
||||
* [#20425](https://github.com/netbox-community/netbox/issues/20425) - Fix Markdown preview functionality within "quick add" modal
|
||||
* [#20441](https://github.com/netbox-community/netbox/issues/20441) - Fix display of the "groups" column in contact assignments table
|
||||
|
||||
---
|
||||
|
||||
## v4.4.1 (2025-09-16)
|
||||
|
||||
### Enhancements
|
||||
|
||||
@@ -86,6 +86,7 @@ nav:
|
||||
- Change Logging: 'features/change-logging.md'
|
||||
- Journaling: 'features/journaling.md'
|
||||
- Event Rules: 'features/event-rules.md'
|
||||
- User Preferences: 'features/user-preferences.md'
|
||||
- Notifications: 'features/notifications.md'
|
||||
- Background Jobs: 'features/background-jobs.md'
|
||||
- Auth & Permissions: 'features/authentication-permissions.md'
|
||||
@@ -124,6 +125,9 @@ nav:
|
||||
- Export Templates: 'customization/export-templates.md'
|
||||
- Reports: 'customization/reports.md'
|
||||
- Custom Scripts: 'customization/custom-scripts.md'
|
||||
- Best Practices:
|
||||
- Modeling Pluggable Transceivers: 'best-practices/modeling-pluggable-transceivers.md'
|
||||
- Performance Handbook: 'best-practices/performance-handbook.md'
|
||||
- Integrations:
|
||||
- REST API: 'integrations/rest-api.md'
|
||||
- GraphQL API: 'integrations/graphql-api.md'
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
from django.utils.translation import gettext as _
|
||||
|
||||
from account.models import UserToken
|
||||
from netbox.tables import NetBoxTable, columns
|
||||
|
||||
__all__ = (
|
||||
'UserTokenTable',
|
||||
)
|
||||
|
||||
|
||||
TOKEN = """<samp><span id="token_{{ record.pk }}">{{ record }}</span></samp>"""
|
||||
|
||||
ALLOWED_IPS = """{{ value|join:", " }}"""
|
||||
|
||||
COPY_BUTTON = """
|
||||
{% if settings.ALLOW_TOKEN_RETRIEVAL %}
|
||||
{% copy_content record.pk prefix="token_" color="success" %}
|
||||
{% endif %}
|
||||
"""
|
||||
|
||||
|
||||
class UserTokenTable(NetBoxTable):
|
||||
"""
|
||||
Table for users to manager their own API tokens under account views.
|
||||
"""
|
||||
key = columns.TemplateColumn(
|
||||
verbose_name=_('Key'),
|
||||
template_code=TOKEN,
|
||||
)
|
||||
write_enabled = columns.BooleanColumn(
|
||||
verbose_name=_('Write Enabled')
|
||||
)
|
||||
created = columns.DateTimeColumn(
|
||||
timespec='minutes',
|
||||
verbose_name=_('Created'),
|
||||
)
|
||||
expires = columns.DateTimeColumn(
|
||||
timespec='minutes',
|
||||
verbose_name=_('Expires'),
|
||||
)
|
||||
last_used = columns.DateTimeColumn(
|
||||
verbose_name=_('Last Used'),
|
||||
)
|
||||
allowed_ips = columns.TemplateColumn(
|
||||
verbose_name=_('Allowed IPs'),
|
||||
template_code=ALLOWED_IPS
|
||||
)
|
||||
actions = columns.ActionsColumn(
|
||||
actions=('edit', 'delete'),
|
||||
extra_buttons=COPY_BUTTON
|
||||
)
|
||||
|
||||
class Meta(NetBoxTable.Meta):
|
||||
model = UserToken
|
||||
fields = (
|
||||
'pk', 'id', 'key', 'description', 'write_enabled', 'created', 'expires', 'last_used', 'allowed_ips',
|
||||
)
|
||||
@@ -26,8 +26,9 @@ from extras.tables import BookmarkTable, NotificationTable, SubscriptionTable
|
||||
from netbox.authentication import get_auth_backend_display, get_saml_idps
|
||||
from netbox.config import get_config
|
||||
from netbox.views import generic
|
||||
from users import forms, tables
|
||||
from users import forms
|
||||
from users.models import UserConfig
|
||||
from users.tables import TokenTable
|
||||
from utilities.request import safe_for_redirect
|
||||
from utilities.string import remove_linebreaks
|
||||
from utilities.views import register_model_view
|
||||
@@ -328,7 +329,8 @@ class UserTokenListView(LoginRequiredMixin, View):
|
||||
|
||||
def get(self, request):
|
||||
tokens = UserToken.objects.filter(user=request.user)
|
||||
table = tables.UserTokenTable(tokens)
|
||||
table = TokenTable(tokens)
|
||||
table.columns.hide('user')
|
||||
table.configure(request)
|
||||
|
||||
return render(request, 'account/token_list.html', {
|
||||
@@ -343,11 +345,9 @@ class UserTokenView(LoginRequiredMixin, View):
|
||||
|
||||
def get(self, request, pk):
|
||||
token = get_object_or_404(UserToken.objects.filter(user=request.user), pk=pk)
|
||||
key = token.key if settings.ALLOW_TOKEN_RETRIEVAL else None
|
||||
|
||||
return render(request, 'account/token.html', {
|
||||
'object': token,
|
||||
'key': key,
|
||||
})
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from django.apps import AppConfig
|
||||
|
||||
from netbox import denormalized
|
||||
|
||||
|
||||
class CircuitsConfig(AppConfig):
|
||||
name = "circuits"
|
||||
@@ -8,6 +10,16 @@ class CircuitsConfig(AppConfig):
|
||||
def ready(self):
|
||||
from netbox.models.features import register_models
|
||||
from . import signals, search # noqa: F401
|
||||
from .models import CircuitTermination
|
||||
|
||||
# Register models
|
||||
register_models(*self.get_models())
|
||||
|
||||
denormalized.register(CircuitTermination, '_site', {
|
||||
'_region': 'region',
|
||||
'_site_group': 'group',
|
||||
})
|
||||
|
||||
denormalized.register(CircuitTermination, '_location', {
|
||||
'_site': 'site',
|
||||
})
|
||||
|
||||
@@ -282,18 +282,18 @@ class FixSerializedPKRelatedField(OpenApiSerializerFieldExtension):
|
||||
|
||||
class FixIntegerRangeSerializerSchema(OpenApiSerializerExtension):
|
||||
target_class = 'netbox.api.fields.IntegerRangeSerializer'
|
||||
match_subclasses = True
|
||||
|
||||
def map_serializer(self, auto_schema: 'AutoSchema', direction: Direction) -> _SchemaType:
|
||||
# One range = two integers; many=True will wrap this in an outer array
|
||||
return {
|
||||
'type': 'array',
|
||||
'items': {
|
||||
'type': 'array',
|
||||
'items': {
|
||||
'type': 'integer',
|
||||
},
|
||||
'minItems': 2,
|
||||
'maxItems': 2,
|
||||
'type': 'integer',
|
||||
},
|
||||
'minItems': 2,
|
||||
'maxItems': 2,
|
||||
'example': [10, 20],
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ class BackgroundTaskSerializer(serializers.Serializer):
|
||||
url = serializers.HyperlinkedIdentityField(
|
||||
view_name='core-api:rqtask-detail',
|
||||
lookup_field='id',
|
||||
lookup_url_kwarg='pk'
|
||||
lookup_url_kwarg='id'
|
||||
)
|
||||
description = serializers.CharField()
|
||||
origin = serializers.CharField()
|
||||
|
||||
@@ -5,11 +5,10 @@ from django_rq.queues import get_redis_connection
|
||||
from django_rq.settings import QUEUES_LIST
|
||||
from django_rq.utils import get_statistics
|
||||
from drf_spectacular.types import OpenApiTypes
|
||||
from drf_spectacular.utils import extend_schema
|
||||
from drf_spectacular.utils import OpenApiParameter, extend_schema
|
||||
from rest_framework import viewsets
|
||||
from rest_framework.decorators import action
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from rest_framework.permissions import IsAdminUser
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.routers import APIRootView
|
||||
from rest_framework.viewsets import ReadOnlyModelViewSet
|
||||
@@ -24,6 +23,7 @@ from netbox.api.authentication import IsAuthenticatedOrLoginNotRequired
|
||||
from netbox.api.metadata import ContentTypeMetadata
|
||||
from netbox.api.pagination import LimitOffsetListPagination
|
||||
from netbox.api.viewsets import NetBoxModelViewSet, NetBoxReadOnlyModelViewSet
|
||||
from utilities.api import IsSuperuser
|
||||
from . import serializers
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ class BaseRQViewSet(viewsets.ViewSet):
|
||||
"""
|
||||
Base class for RQ view sets. Provides a list() method. Subclasses must implement get_data().
|
||||
"""
|
||||
permission_classes = [IsAdminUser]
|
||||
permission_classes = [IsSuperuser]
|
||||
serializer_class = None
|
||||
|
||||
def get_data(self):
|
||||
@@ -117,29 +117,49 @@ class BaseRQViewSet(viewsets.ViewSet):
|
||||
def get_serializer(self, *args, **kwargs):
|
||||
"""
|
||||
Return the serializer instance that should be used for validating and
|
||||
deserializing input, and for serializing output.
|
||||
deserializing input and for serializing output.
|
||||
"""
|
||||
serializer_class = self.get_serializer_class()
|
||||
kwargs['context'] = self.get_serializer_context()
|
||||
return serializer_class(*args, **kwargs)
|
||||
|
||||
def get_serializer_class(self):
|
||||
"""
|
||||
Return the class to use for the serializer.
|
||||
"""
|
||||
return self.serializer_class
|
||||
|
||||
def get_serializer_context(self):
|
||||
"""
|
||||
Extra context provided to the serializer class.
|
||||
"""
|
||||
return {
|
||||
'request': self.request,
|
||||
'format': self.format_kwarg,
|
||||
'view': self,
|
||||
}
|
||||
|
||||
|
||||
class BackgroundQueueViewSet(BaseRQViewSet):
|
||||
"""
|
||||
Retrieve a list of RQ Queues.
|
||||
Note: Queue names are not URL safe so not returning a detail view.
|
||||
Note: Queue names are not URL safe, so not returning a detail view.
|
||||
"""
|
||||
serializer_class = serializers.BackgroundQueueSerializer
|
||||
lookup_field = 'name'
|
||||
lookup_value_regex = r'[\w.@+-]+'
|
||||
|
||||
def get_view_name(self):
|
||||
return "Background Queues"
|
||||
return 'Background Queues'
|
||||
|
||||
def get_data(self):
|
||||
return get_statistics(run_maintenance_tasks=True)["queues"]
|
||||
return get_statistics(run_maintenance_tasks=True)['queues']
|
||||
|
||||
@extend_schema(responses={200: OpenApiTypes.OBJECT})
|
||||
@extend_schema(
|
||||
operation_id='core_background_queues_retrieve_by_name',
|
||||
parameters=[OpenApiParameter(name='name', type=OpenApiTypes.STR, location=OpenApiParameter.PATH)],
|
||||
responses={200: OpenApiTypes.OBJECT},
|
||||
)
|
||||
def retrieve(self, request, name):
|
||||
data = self.get_data()
|
||||
if not data:
|
||||
@@ -161,12 +181,17 @@ class BackgroundWorkerViewSet(BaseRQViewSet):
|
||||
lookup_field = 'name'
|
||||
|
||||
def get_view_name(self):
|
||||
return "Background Workers"
|
||||
return 'Background Workers'
|
||||
|
||||
def get_data(self):
|
||||
config = QUEUES_LIST[0]
|
||||
return Worker.all(get_redis_connection(config['connection_config']))
|
||||
|
||||
@extend_schema(
|
||||
operation_id='core_background_workers_retrieve_by_name',
|
||||
parameters=[OpenApiParameter(name='name', type=OpenApiTypes.STR, location=OpenApiParameter.PATH)],
|
||||
responses={200: OpenApiTypes.OBJECT},
|
||||
)
|
||||
def retrieve(self, request, name):
|
||||
# all the RQ queues should use the same connection
|
||||
config = QUEUES_LIST[0]
|
||||
@@ -184,9 +209,10 @@ class BackgroundTaskViewSet(BaseRQViewSet):
|
||||
Retrieve a list of RQ Tasks.
|
||||
"""
|
||||
serializer_class = serializers.BackgroundTaskSerializer
|
||||
lookup_field = 'id'
|
||||
|
||||
def get_view_name(self):
|
||||
return "Background Tasks"
|
||||
return 'Background Tasks'
|
||||
|
||||
def get_data(self):
|
||||
return get_rq_jobs()
|
||||
@@ -199,45 +225,53 @@ class BackgroundTaskViewSet(BaseRQViewSet):
|
||||
|
||||
return task
|
||||
|
||||
@extend_schema(responses={200: OpenApiTypes.OBJECT})
|
||||
def retrieve(self, request, pk):
|
||||
@extend_schema(
|
||||
operation_id='core_background_tasks_retrieve_by_id',
|
||||
parameters=[OpenApiParameter(name='id', type=OpenApiTypes.STR, location=OpenApiParameter.PATH)],
|
||||
responses={200: OpenApiTypes.OBJECT},
|
||||
)
|
||||
def retrieve(self, request, id):
|
||||
"""
|
||||
Retrieve the details of the specified RQ Task.
|
||||
"""
|
||||
task = self.get_task_from_id(pk)
|
||||
task = self.get_task_from_id(id)
|
||||
serializer = self.serializer_class(task, context={'request': request})
|
||||
return Response(serializer.data)
|
||||
|
||||
@action(methods=["POST"], detail=True)
|
||||
def delete(self, request, pk):
|
||||
@extend_schema(parameters=[OpenApiParameter(name='id', type=OpenApiTypes.STR, location=OpenApiParameter.PATH)])
|
||||
@action(methods=['POST'], detail=True)
|
||||
def delete(self, request, id):
|
||||
"""
|
||||
Delete the specified RQ Task.
|
||||
"""
|
||||
delete_rq_job(pk)
|
||||
delete_rq_job(id)
|
||||
return HttpResponse(status=200)
|
||||
|
||||
@action(methods=["POST"], detail=True)
|
||||
def requeue(self, request, pk):
|
||||
@extend_schema(parameters=[OpenApiParameter(name='id', type=OpenApiTypes.STR, location=OpenApiParameter.PATH)])
|
||||
@action(methods=['POST'], detail=True)
|
||||
def requeue(self, request, id):
|
||||
"""
|
||||
Requeues the specified RQ Task.
|
||||
"""
|
||||
requeue_rq_job(pk)
|
||||
requeue_rq_job(id)
|
||||
return HttpResponse(status=200)
|
||||
|
||||
@action(methods=["POST"], detail=True)
|
||||
def enqueue(self, request, pk):
|
||||
@extend_schema(parameters=[OpenApiParameter(name='id', type=OpenApiTypes.STR, location=OpenApiParameter.PATH)])
|
||||
@action(methods=['POST'], detail=True)
|
||||
def enqueue(self, request, id):
|
||||
"""
|
||||
Enqueues the specified RQ Task.
|
||||
"""
|
||||
enqueue_rq_job(pk)
|
||||
enqueue_rq_job(id)
|
||||
return HttpResponse(status=200)
|
||||
|
||||
@action(methods=["POST"], detail=True)
|
||||
def stop(self, request, pk):
|
||||
@extend_schema(parameters=[OpenApiParameter(name='id', type=OpenApiTypes.STR, location=OpenApiParameter.PATH)])
|
||||
@action(methods=['POST'], detail=True)
|
||||
def stop(self, request, id):
|
||||
"""
|
||||
Stops the specified RQ Task.
|
||||
"""
|
||||
stopped_jobs = stop_rq_job(pk)
|
||||
stopped_jobs = stop_rq_job(id)
|
||||
if len(stopped_jobs) == 1:
|
||||
return HttpResponse(status=200)
|
||||
else:
|
||||
|
||||
@@ -3,12 +3,12 @@ from typing import Annotated, List, TYPE_CHECKING
|
||||
import strawberry
|
||||
import strawberry_django
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from strawberry.types import Info
|
||||
|
||||
from core.models import ObjectChange
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.graphql.types import DataFileType, DataSourceType
|
||||
from netbox.core.graphql.types import ObjectChangeType
|
||||
from core.graphql.types import DataFileType, DataSourceType, ObjectChangeType
|
||||
|
||||
__all__ = (
|
||||
'ChangelogMixin',
|
||||
@@ -20,7 +20,7 @@ __all__ = (
|
||||
class ChangelogMixin:
|
||||
|
||||
@strawberry_django.field
|
||||
def changelog(self, info) -> List[Annotated["ObjectChangeType", strawberry.lazy('.types')]]: # noqa: F821
|
||||
def changelog(self, info: Info) -> List[Annotated['ObjectChangeType', strawberry.lazy('.types')]]: # noqa: F821
|
||||
content_type = ContentType.objects.get_for_model(self)
|
||||
object_changes = ObjectChange.objects.filter(
|
||||
changed_object_type=content_type,
|
||||
@@ -31,5 +31,5 @@ class ChangelogMixin:
|
||||
|
||||
@strawberry.type
|
||||
class SyncedDataMixin:
|
||||
data_source: Annotated["DataSourceType", strawberry.lazy('core.graphql.types')] | None
|
||||
data_file: Annotated["DataFileType", strawberry.lazy('core.graphql.types')] | None
|
||||
data_source: Annotated['DataSourceType', strawberry.lazy('core.graphql.types')] | None
|
||||
data_file: Annotated['DataFileType', strawberry.lazy('core.graphql.types')] | None
|
||||
|
||||
48
netbox/core/migrations/0019_configrevision_active.py
Normal file
48
netbox/core/migrations/0019_configrevision_active.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# Generated by Django 5.2.5 on 2025-09-09 16:48
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
def get_active(apps, schema_editor):
|
||||
from django.core.cache import cache
|
||||
ConfigRevision = apps.get_model('core', 'ConfigRevision')
|
||||
version = None
|
||||
revision = None
|
||||
|
||||
# Try and get the latest version from cache
|
||||
try:
|
||||
version = cache.get('config_version')
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# If there is a version in cache, attempt to set revision to the current version from cache
|
||||
# If the version in cache does not exist or there is no version, try the lastest revision in the database
|
||||
if not version or (version and not (revision := ConfigRevision.objects.filter(pk=version).first())):
|
||||
revision = ConfigRevision.objects.order_by('-created').first()
|
||||
|
||||
# If there is a revision set, set the active revision
|
||||
if revision:
|
||||
revision.active = True
|
||||
revision.save()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('core', '0018_concrete_objecttype'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='configrevision',
|
||||
name='active',
|
||||
field=models.BooleanField(default=False),
|
||||
),
|
||||
migrations.RunPython(code=get_active, reverse_code=migrations.RunPython.noop),
|
||||
migrations.AddConstraint(
|
||||
model_name='configrevision',
|
||||
constraint=models.UniqueConstraint(
|
||||
condition=models.Q(('active', True)), fields=('active',), name='unique_active_config_revision'
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -14,6 +14,9 @@ class ConfigRevision(models.Model):
|
||||
"""
|
||||
An atomic revision of NetBox's configuration.
|
||||
"""
|
||||
active = models.BooleanField(
|
||||
default=False
|
||||
)
|
||||
created = models.DateTimeField(
|
||||
verbose_name=_('created'),
|
||||
auto_now_add=True
|
||||
@@ -35,6 +38,13 @@ class ConfigRevision(models.Model):
|
||||
ordering = ['-created']
|
||||
verbose_name = _('config revision')
|
||||
verbose_name_plural = _('config revisions')
|
||||
constraints = [
|
||||
models.UniqueConstraint(
|
||||
fields=('active',),
|
||||
condition=models.Q(active=True),
|
||||
name='unique_active_config_revision',
|
||||
)
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
if not self.pk:
|
||||
@@ -59,8 +69,13 @@ class ConfigRevision(models.Model):
|
||||
"""
|
||||
cache.set('config', self.data, None)
|
||||
cache.set('config_version', self.pk, None)
|
||||
|
||||
# Set all instances of ConfigRevision to false and set this instance to true
|
||||
ConfigRevision.objects.all().update(active=False)
|
||||
ConfigRevision.objects.filter(pk=self.pk).update(active=True)
|
||||
|
||||
activate.alters_data = True
|
||||
|
||||
@property
|
||||
def is_active(self):
|
||||
return cache.get('config_version') == self.pk
|
||||
return self.active
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
# TODO: Remove this module in NetBox v4.5
|
||||
# Provided for backward compatibility
|
||||
from .object_types import *
|
||||
@@ -5,7 +5,7 @@ from django.contrib.contenttypes.models import ContentType
|
||||
from django.contrib.postgres.fields import ArrayField
|
||||
from django.contrib.postgres.indexes import GinIndex
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db import models
|
||||
from django.db import connection, models
|
||||
from django.db.models import Q
|
||||
from django.utils.translation import gettext as _
|
||||
|
||||
@@ -66,6 +66,14 @@ class ObjectTypeManager(models.Manager):
|
||||
"""
|
||||
from netbox.models.features import get_model_features, model_is_public
|
||||
|
||||
# TODO: Remove this in NetBox v5.0
|
||||
# If the ObjectType table has not yet been provisioned (e.g. because we're in a pre-v4.4 migration),
|
||||
# fall back to ContentType.
|
||||
if 'core_objecttype' not in connection.introspection.table_names():
|
||||
ct = ContentType.objects.get_for_model(model, for_concrete_model=for_concrete_model)
|
||||
ct.features = get_model_features(ct.model_class())
|
||||
return ct
|
||||
|
||||
if not inspect.isclass(model):
|
||||
model = model.__class__
|
||||
opts = self._get_opts(model, for_concrete_model)
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import datetime
|
||||
import importlib
|
||||
import importlib.util
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ from rq.job import Job as RQ_Job, JobStatus
|
||||
from rq.registry import FailedJobRegistry, StartedJobRegistry
|
||||
|
||||
from rest_framework import status
|
||||
from users.constants import TOKEN_PREFIX
|
||||
from users.models import Token, User
|
||||
from utilities.testing import APITestCase, APIViewTestCases, TestCase
|
||||
from utilities.testing.utils import disable_logging
|
||||
@@ -107,14 +108,14 @@ class ObjectTypeTest(APITestCase):
|
||||
def test_list_objects(self):
|
||||
object_type_count = ObjectType.objects.count()
|
||||
|
||||
response = self.client.get(reverse('extras-api:objecttype-list'), **self.header)
|
||||
response = self.client.get(reverse('core-api:objecttype-list'), **self.header)
|
||||
self.assertHttpStatus(response, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data['count'], object_type_count)
|
||||
|
||||
def test_get_object(self):
|
||||
object_type = ObjectType.objects.first()
|
||||
|
||||
url = reverse('extras-api:objecttype-detail', kwargs={'pk': object_type.pk})
|
||||
url = reverse('core-api:objecttype-detail', kwargs={'pk': object_type.pk})
|
||||
self.assertHttpStatus(self.client.get(url, **self.header), status.HTTP_200_OK)
|
||||
|
||||
|
||||
@@ -134,12 +135,9 @@ class BackgroundTaskTestCase(TestCase):
|
||||
Create a user and token for API calls.
|
||||
"""
|
||||
# Create the test user and assign permissions
|
||||
self.user = User.objects.create_user(username='testuser')
|
||||
self.user.is_staff = True
|
||||
self.user.is_active = True
|
||||
self.user.save()
|
||||
self.user = User.objects.create_user(username='testuser', is_active=True)
|
||||
self.token = Token.objects.create(user=self.user)
|
||||
self.header = {'HTTP_AUTHORIZATION': f'Token {self.token.key}'}
|
||||
self.header = {'HTTP_AUTHORIZATION': f'Bearer {TOKEN_PREFIX}{self.token.key}.{self.token.token}'}
|
||||
|
||||
# Clear all queues prior to running each test
|
||||
get_queue('default').connection.flushall()
|
||||
@@ -150,13 +148,11 @@ class BackgroundTaskTestCase(TestCase):
|
||||
url = reverse('core-api:rqqueue-list')
|
||||
|
||||
# Attempt to load view without permission
|
||||
self.user.is_staff = False
|
||||
self.user.save()
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
# Load view with permission
|
||||
self.user.is_staff = True
|
||||
self.user.is_superuser = True
|
||||
self.user.save()
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
@@ -165,7 +161,16 @@ class BackgroundTaskTestCase(TestCase):
|
||||
self.assertIn('low', str(response.content))
|
||||
|
||||
def test_background_queue(self):
|
||||
response = self.client.get(reverse('core-api:rqqueue-detail', args=['default']), **self.header)
|
||||
url = reverse('core-api:rqqueue-detail', args=['default'])
|
||||
|
||||
# Attempt to load view without permission
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
# Load view with permission
|
||||
self.user.is_superuser = True
|
||||
self.user.save()
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
self.assertIn('default', str(response.content))
|
||||
self.assertIn('oldest_job_timestamp', str(response.content))
|
||||
@@ -174,8 +179,16 @@ class BackgroundTaskTestCase(TestCase):
|
||||
def test_background_task_list(self):
|
||||
queue = get_queue('default')
|
||||
queue.enqueue(self.dummy_job_default)
|
||||
url = reverse('core-api:rqtask-list')
|
||||
|
||||
response = self.client.get(reverse('core-api:rqtask-list'), **self.header)
|
||||
# Attempt to load view without permission
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
# Load view with permission
|
||||
self.user.is_superuser = True
|
||||
self.user.save()
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
self.assertIn('origin', str(response.content))
|
||||
self.assertIn('core.tests.test_api.BackgroundTaskTestCase.dummy_job_default()', str(response.content))
|
||||
@@ -183,8 +196,16 @@ class BackgroundTaskTestCase(TestCase):
|
||||
def test_background_task(self):
|
||||
queue = get_queue('default')
|
||||
job = queue.enqueue(self.dummy_job_default)
|
||||
url = reverse('core-api:rqtask-detail', args=[job.id])
|
||||
|
||||
response = self.client.get(reverse('core-api:rqtask-detail', args=[job.id]), **self.header)
|
||||
# Attempt to load view without permission
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
# Load view with permission
|
||||
self.user.is_superuser = True
|
||||
self.user.save()
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
self.assertIn(str(job.id), str(response.content))
|
||||
self.assertIn('origin', str(response.content))
|
||||
@@ -194,45 +215,65 @@ class BackgroundTaskTestCase(TestCase):
|
||||
def test_background_task_delete(self):
|
||||
queue = get_queue('default')
|
||||
job = queue.enqueue(self.dummy_job_default)
|
||||
url = reverse('core-api:rqtask-delete', args=[job.id])
|
||||
|
||||
response = self.client.post(reverse('core-api:rqtask-delete', args=[job.id]), **self.header)
|
||||
# Attempt to load view without permission
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
# Load view with permission
|
||||
self.user.is_superuser = True
|
||||
self.user.save()
|
||||
response = self.client.post(url, **self.header)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
self.assertFalse(RQ_Job.exists(job.id, connection=queue.connection))
|
||||
queue = get_queue('default')
|
||||
self.assertNotIn(job.id, queue.job_ids)
|
||||
|
||||
def test_background_task_requeue(self):
|
||||
queue = get_queue('default')
|
||||
|
||||
# Enqueue & run a job that will fail
|
||||
queue = get_queue('default')
|
||||
job = queue.enqueue(self.dummy_job_failing)
|
||||
worker = get_worker('default')
|
||||
with disable_logging():
|
||||
worker.work(burst=True)
|
||||
self.assertTrue(job.is_failed)
|
||||
url = reverse('core-api:rqtask-requeue', args=[job.id])
|
||||
|
||||
# Attempt to requeue the job without permission
|
||||
response = self.client.post(url, **self.header)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
# Re-enqueue the failed job and check that its status has been reset
|
||||
response = self.client.post(reverse('core-api:rqtask-requeue', args=[job.id]), **self.header)
|
||||
self.user.is_superuser = True
|
||||
self.user.save()
|
||||
response = self.client.post(url, **self.header)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
job = RQ_Job.fetch(job.id, queue.connection)
|
||||
self.assertFalse(job.is_failed)
|
||||
|
||||
def test_background_task_enqueue(self):
|
||||
queue = get_queue('default')
|
||||
|
||||
# Enqueue some jobs that each depends on its predecessor
|
||||
queue = get_queue('default')
|
||||
job = previous_job = None
|
||||
for _ in range(0, 3):
|
||||
job = queue.enqueue(self.dummy_job_default, depends_on=previous_job)
|
||||
previous_job = job
|
||||
url = reverse('core-api:rqtask-enqueue', args=[job.id])
|
||||
|
||||
# Check that the last job to be enqueued has a status of deferred
|
||||
self.assertIsNotNone(job)
|
||||
self.assertEqual(job.get_status(), JobStatus.DEFERRED)
|
||||
self.assertIsNone(job.enqueued_at)
|
||||
|
||||
# Attempt to force-enqueue the job without permission
|
||||
response = self.client.post(url, **self.header)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
# Force-enqueue the deferred job
|
||||
response = self.client.post(reverse('core-api:rqtask-enqueue', args=[job.id]), **self.header)
|
||||
self.user.is_superuser = True
|
||||
self.user.save()
|
||||
response = self.client.post(url, **self.header)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
# Check that job's status is updated correctly
|
||||
@@ -242,19 +283,27 @@ class BackgroundTaskTestCase(TestCase):
|
||||
|
||||
def test_background_task_stop(self):
|
||||
queue = get_queue('default')
|
||||
|
||||
worker = get_worker('default')
|
||||
job = queue.enqueue(self.dummy_job_default)
|
||||
worker.prepare_job_execution(job)
|
||||
|
||||
url = reverse('core-api:rqtask-stop', args=[job.id])
|
||||
self.assertEqual(job.get_status(), JobStatus.STARTED)
|
||||
response = self.client.post(reverse('core-api:rqtask-stop', args=[job.id]), **self.header)
|
||||
|
||||
# Attempt to stop the task without permission
|
||||
response = self.client.post(url, **self.header)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
# Stop the task
|
||||
self.user.is_superuser = True
|
||||
self.user.save()
|
||||
response = self.client.post(url, **self.header)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
with disable_logging():
|
||||
worker.monitor_work_horse(job, queue) # Sets the job as Failed and removes from Started
|
||||
started_job_registry = StartedJobRegistry(queue.name, connection=queue.connection)
|
||||
self.assertEqual(len(started_job_registry), 0)
|
||||
|
||||
# Verify that the task was cancelled
|
||||
canceled_job_registry = FailedJobRegistry(queue.name, connection=queue.connection)
|
||||
self.assertEqual(len(canceled_job_registry), 1)
|
||||
self.assertIn(job.id, canceled_job_registry)
|
||||
@@ -262,19 +311,34 @@ class BackgroundTaskTestCase(TestCase):
|
||||
def test_worker_list(self):
|
||||
worker1 = get_worker('default', name=uuid.uuid4().hex)
|
||||
worker1.register_birth()
|
||||
|
||||
worker2 = get_worker('high')
|
||||
worker2.register_birth()
|
||||
url = reverse('core-api:rqworker-list')
|
||||
|
||||
response = self.client.get(reverse('core-api:rqworker-list'), **self.header)
|
||||
# Attempt to fetch the worker list without permission
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
# Fetch the worker list
|
||||
self.user.is_superuser = True
|
||||
self.user.save()
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
self.assertIn(str(worker1.name), str(response.content))
|
||||
|
||||
def test_worker(self):
|
||||
worker1 = get_worker('default', name=uuid.uuid4().hex)
|
||||
worker1.register_birth()
|
||||
url = reverse('core-api:rqworker-detail', args=[worker1.name])
|
||||
|
||||
response = self.client.get(reverse('core-api:rqworker-detail', args=[worker1.name]), **self.header)
|
||||
# Attempt to fetch a worker without permission
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
# Fetch the worker
|
||||
self.user.is_superuser = True
|
||||
self.user.save()
|
||||
response = self.client.get(url, **self.header)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
self.assertIn(str(worker1.name), str(response.content))
|
||||
self.assertIn('birth_date', str(response.content))
|
||||
|
||||
@@ -158,7 +158,7 @@ class BackgroundTaskTestCase(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.user.is_staff = True
|
||||
self.user.is_superuser = True
|
||||
self.user.is_active = True
|
||||
self.user.save()
|
||||
|
||||
@@ -171,13 +171,13 @@ class BackgroundTaskTestCase(TestCase):
|
||||
url = reverse('core:background_queue_list')
|
||||
|
||||
# Attempt to load view without permission
|
||||
self.user.is_staff = False
|
||||
self.user.is_superuser = False
|
||||
self.user.save()
|
||||
response = self.client.get(url)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
# Load view with permission
|
||||
self.user.is_staff = True
|
||||
self.user.is_superuser = True
|
||||
self.user.save()
|
||||
response = self.client.get(url)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
@@ -356,7 +356,7 @@ class SystemTestCase(TestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
self.user.is_staff = True
|
||||
self.user.is_superuser = True
|
||||
self.user.save()
|
||||
|
||||
def test_system_view_default(self):
|
||||
|
||||
@@ -372,7 +372,7 @@ class ConfigRevisionRestoreView(ContentTypePermissionRequiredMixin, View):
|
||||
class BaseRQView(UserPassesTestMixin, View):
|
||||
|
||||
def test_func(self):
|
||||
return self.request.user.is_staff
|
||||
return self.request.user.is_superuser
|
||||
|
||||
|
||||
class BackgroundQueueListView(TableMixin, BaseRQView):
|
||||
@@ -555,7 +555,7 @@ class WorkerView(BaseRQView):
|
||||
class SystemView(UserPassesTestMixin, View):
|
||||
|
||||
def test_func(self):
|
||||
return self.request.user.is_staff
|
||||
return self.request.user.is_superuser
|
||||
|
||||
def get(self, request):
|
||||
|
||||
@@ -638,7 +638,7 @@ class BasePluginView(UserPassesTestMixin, View):
|
||||
CACHE_KEY_CATALOG_ERROR = 'plugins-catalog-error'
|
||||
|
||||
def test_func(self):
|
||||
return self.request.user.is_staff
|
||||
return self.request.user.is_superuser
|
||||
|
||||
def get_cached_plugins(self, request):
|
||||
catalog_plugins = {}
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from drf_spectacular.types import OpenApiTypes
|
||||
from drf_spectacular.utils import extend_schema_field
|
||||
from rest_framework import serializers
|
||||
|
||||
from dcim.choices import *
|
||||
from dcim.constants import *
|
||||
from dcim.models import Cable, CablePath, CableTermination
|
||||
from netbox.api.fields import ChoiceField, ContentTypeField
|
||||
from netbox.api.serializers import BaseModelSerializer, GenericObjectSerializer, NetBoxModelSerializer
|
||||
@@ -51,9 +49,11 @@ class TracedCableSerializer(BaseModelSerializer):
|
||||
|
||||
class CableTerminationSerializer(NetBoxModelSerializer):
|
||||
termination_type = ContentTypeField(
|
||||
queryset=ContentType.objects.filter(CABLE_TERMINATION_MODELS)
|
||||
read_only=True,
|
||||
)
|
||||
termination = serializers.SerializerMethodField(
|
||||
read_only=True,
|
||||
)
|
||||
termination = serializers.SerializerMethodField(read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = CableTermination
|
||||
@@ -61,6 +61,8 @@ class CableTerminationSerializer(NetBoxModelSerializer):
|
||||
'id', 'url', 'display', 'cable', 'cable_end', 'termination_type', 'termination_id',
|
||||
'termination', 'created', 'last_updated',
|
||||
]
|
||||
read_only_fields = fields
|
||||
brief_fields = ('id', 'url', 'display', 'cable', 'cable_end', 'termination_type', 'termination_id')
|
||||
|
||||
@extend_schema_field(serializers.JSONField(allow_null=True))
|
||||
def get_termination(self, obj):
|
||||
|
||||
@@ -16,7 +16,7 @@ from extras.api.mixins import ConfigContextQuerySetMixin, RenderConfigMixin
|
||||
from netbox.api.authentication import IsAuthenticatedOrLoginNotRequired
|
||||
from netbox.api.metadata import ContentTypeMetadata
|
||||
from netbox.api.pagination import StripCountAnnotationsPaginator
|
||||
from netbox.api.viewsets import NetBoxModelViewSet, MPTTLockedMixin
|
||||
from netbox.api.viewsets import NetBoxModelViewSet, MPTTLockedMixin, NetBoxReadOnlyModelViewSet
|
||||
from netbox.api.viewsets.mixins import SequentialBulkCreatesMixin
|
||||
from utilities.api import get_serializer_for_model
|
||||
from utilities.query_functions import CollateAsChar
|
||||
@@ -563,7 +563,7 @@ class CableViewSet(NetBoxModelViewSet):
|
||||
filterset_class = filtersets.CableFilterSet
|
||||
|
||||
|
||||
class CableTerminationViewSet(NetBoxModelViewSet):
|
||||
class CableTerminationViewSet(NetBoxReadOnlyModelViewSet):
|
||||
metadata_class = ContentTypeMetadata
|
||||
queryset = CableTermination.objects.all()
|
||||
serializer_class = serializers.CableTerminationSerializer
|
||||
|
||||
@@ -1163,14 +1163,14 @@ class InterfaceTypeChoices(ChoiceSet):
|
||||
(
|
||||
(TYPE_1GE_BX10_D, '1000BASE-BX10-D (1GE BiDi Down)'),
|
||||
(TYPE_1GE_BX10_U, '1000BASE-BX10-U (1GE BiDi Up)'),
|
||||
(TYPE_1GE_CX, '1000BASE-CX (1GE DAC)'),
|
||||
(TYPE_1GE_CWDM, '1000BASE-CWDM (1GE)'),
|
||||
(TYPE_1GE_CX, '1000BASE-CX (1GE DAC)'),
|
||||
(TYPE_1GE_DWDM, '1000BASE-DWDM (1GE)'),
|
||||
(TYPE_1GE_EX, '1000BASE-EX (1GE)'),
|
||||
(TYPE_1GE_SX_FIXED, '1000BASE-SX (1GE)'),
|
||||
(TYPE_1GE_LSX, '1000BASE-LSX (1GE)'),
|
||||
(TYPE_1GE_LX_FIXED, '1000BASE-LX (1GE)'),
|
||||
(TYPE_1GE_LX10, '1000BASE-LX10/LH (1GE)'),
|
||||
(TYPE_1GE_SX_FIXED, '1000BASE-SX (1GE)'),
|
||||
(TYPE_1GE_FIXED, '1000BASE-T (1GE)'),
|
||||
(TYPE_1GE_TX_FIXED, '1000BASE-TX (1GE)'),
|
||||
(TYPE_1GE_ZX, '1000BASE-ZX (1GE)'),
|
||||
@@ -1186,8 +1186,8 @@ class InterfaceTypeChoices(ChoiceSet):
|
||||
(
|
||||
_('10 Gbps Ethernet'),
|
||||
(
|
||||
(TYPE_10GE_BR_D, '10GBASE-DR-D (10GE BiDi Down)'),
|
||||
(TYPE_10GE_BR_U, '10GBASE-DR-U (10GE BiDi Up)'),
|
||||
(TYPE_10GE_BR_D, '10GBASE-BR-D (10GE BiDi Down)'),
|
||||
(TYPE_10GE_BR_U, '10GBASE-BR-U (10GE BiDi Up)'),
|
||||
(TYPE_10GE_CX4, '10GBASE-CX4 (10GE DAC)'),
|
||||
(TYPE_10GE_ER, '10GBASE-ER (10GE)'),
|
||||
(TYPE_10GE_LR, '10GBASE-LR (10GE)'),
|
||||
@@ -1235,6 +1235,7 @@ class InterfaceTypeChoices(ChoiceSet):
|
||||
(TYPE_100GE_CR2, '100GBASE-CR2 (100GE DAC)'),
|
||||
(TYPE_100GE_CR4, '100GBASE-CR4 (100GE DAC)'),
|
||||
(TYPE_100GE_CR10, '100GBASE-CR10 (100GE DAC)'),
|
||||
(TYPE_100GE_CWDM4, '100GBASE-CWDM4 (100GE)'),
|
||||
(TYPE_100GE_DR, '100GBASE-DR (100GE)'),
|
||||
(TYPE_100GE_ER4, '100GBASE-ER4 (100GE)'),
|
||||
(TYPE_100GE_FR1, '100GBASE-FR1 (100GE)'),
|
||||
@@ -1253,12 +1254,12 @@ class InterfaceTypeChoices(ChoiceSet):
|
||||
(
|
||||
(TYPE_200GE_CR2, '200GBASE-CR2 (200GE)'),
|
||||
(TYPE_200GE_CR4, '200GBASE-CR4 (200GE)'),
|
||||
(TYPE_200GE_SR2, '200GBASE-SR2 (200GE)'),
|
||||
(TYPE_200GE_SR4, '200GBASE-SR4 (200GE)'),
|
||||
(TYPE_200GE_DR4, '200GBASE-DR4 (200GE)'),
|
||||
(TYPE_200GE_ER4, '200GBASE-ER4 (200GE)'),
|
||||
(TYPE_200GE_FR4, '200GBASE-FR4 (200GE)'),
|
||||
(TYPE_200GE_LR4, '200GBASE-LR4 (200GE)'),
|
||||
(TYPE_200GE_SR2, '200GBASE-SR2 (200GE)'),
|
||||
(TYPE_200GE_SR4, '200GBASE-SR4 (200GE)'),
|
||||
(TYPE_200GE_VR2, '200GBASE-VR2 (200GE)'),
|
||||
)
|
||||
),
|
||||
@@ -1296,34 +1297,34 @@ class InterfaceTypeChoices(ChoiceSet):
|
||||
(TYPE_1GE_GBIC, 'GBIC (1GE)'),
|
||||
(TYPE_1GE_SFP, 'SFP (1GE)'),
|
||||
(TYPE_10GE_SFP_PLUS, 'SFP+ (10GE)'),
|
||||
(TYPE_10GE_XFP, 'XFP (10GE)'),
|
||||
(TYPE_10GE_XENPAK, 'XENPAK (10GE)'),
|
||||
(TYPE_10GE_XFP, 'XFP (10GE)'),
|
||||
(TYPE_10GE_X2, 'X2 (10GE)'),
|
||||
(TYPE_25GE_SFP28, 'SFP28 (25GE)'),
|
||||
(TYPE_50GE_SFP56, 'SFP56 (50GE)'),
|
||||
(TYPE_40GE_QSFP_PLUS, 'QSFP+ (40GE)'),
|
||||
(TYPE_50GE_QSFP28, 'QSFP28 (50GE)'),
|
||||
(TYPE_50GE_SFP56, 'SFP56 (50GE)'),
|
||||
(TYPE_100GE_CFP, 'CFP (100GE)'),
|
||||
(TYPE_100GE_CFP2, 'CFP2 (100GE)'),
|
||||
(TYPE_200GE_CFP2, 'CFP2 (200GE)'),
|
||||
(TYPE_400GE_CFP2, 'CFP2 (400GE)'),
|
||||
(TYPE_100GE_CFP4, 'CFP4 (100GE)'),
|
||||
(TYPE_100GE_CXP, 'CXP (100GE)'),
|
||||
(TYPE_100GE_CPAK, 'Cisco CPAK (100GE)'),
|
||||
(TYPE_100GE_DSFP, 'DSFP (100GE)'),
|
||||
(TYPE_100GE_SFP_DD, 'SFP-DD (100GE)'),
|
||||
(TYPE_100GE_QSFP28, 'QSFP28 (100GE)'),
|
||||
(TYPE_100GE_QSFP_DD, 'QSFP-DD (100GE)'),
|
||||
(TYPE_100GE_SFP_DD, 'SFP-DD (100GE)'),
|
||||
(TYPE_200GE_CFP2, 'CFP2 (200GE)'),
|
||||
(TYPE_200GE_QSFP56, 'QSFP56 (200GE)'),
|
||||
(TYPE_200GE_QSFP_DD, 'QSFP-DD (200GE)'),
|
||||
(TYPE_400GE_QSFP112, 'QSFP112 (400GE)'),
|
||||
(TYPE_400GE_QSFP_DD, 'QSFP-DD (400GE)'),
|
||||
(TYPE_400GE_CDFP, 'CDFP (400GE)'),
|
||||
(TYPE_400GE_CFP2, 'CFP2 (400GE)'),
|
||||
(TYPE_400GE_CFP8, 'CPF8 (400GE)'),
|
||||
(TYPE_400GE_OSFP, 'OSFP (400GE)'),
|
||||
(TYPE_400GE_OSFP_RHS, 'OSFP-RHS (400GE)'),
|
||||
(TYPE_400GE_CDFP, 'CDFP (400GE)'),
|
||||
(TYPE_400GE_CFP8, 'CPF8 (400GE)'),
|
||||
(TYPE_800GE_QSFP_DD, 'QSFP-DD (800GE)'),
|
||||
(TYPE_800GE_OSFP, 'OSFP (800GE)'),
|
||||
(TYPE_800GE_QSFP_DD, 'QSFP-DD (800GE)'),
|
||||
)
|
||||
),
|
||||
(
|
||||
|
||||
@@ -26,7 +26,7 @@ class eui64_unix_expanded_uppercase(eui64_unix_expanded):
|
||||
#
|
||||
|
||||
class MACAddressField(models.Field):
|
||||
description = "PostgreSQL MAC Address field"
|
||||
description = 'PostgreSQL MAC Address field'
|
||||
|
||||
def python_type(self):
|
||||
return EUI
|
||||
@@ -34,6 +34,9 @@ class MACAddressField(models.Field):
|
||||
def from_db_value(self, value, expression, connection):
|
||||
return self.to_python(value)
|
||||
|
||||
def get_internal_type(self):
|
||||
return 'CharField'
|
||||
|
||||
def to_python(self, value):
|
||||
if value is None:
|
||||
return value
|
||||
@@ -54,7 +57,7 @@ class MACAddressField(models.Field):
|
||||
|
||||
|
||||
class WWNField(models.Field):
|
||||
description = "World Wide Name field"
|
||||
description = 'World Wide Name field'
|
||||
|
||||
def python_type(self):
|
||||
return EUI
|
||||
@@ -62,6 +65,9 @@ class WWNField(models.Field):
|
||||
def from_db_value(self, value, expression, connection):
|
||||
return self.to_python(value)
|
||||
|
||||
def get_internal_type(self):
|
||||
return 'CharField'
|
||||
|
||||
def to_python(self, value):
|
||||
if value is None:
|
||||
return value
|
||||
|
||||
@@ -133,6 +133,11 @@ class SiteBulkEditForm(NetBoxModelBulkEditForm):
|
||||
queryset=Tenant.objects.all(),
|
||||
required=False
|
||||
)
|
||||
facility = forms.CharField(
|
||||
label=_('Facility'),
|
||||
max_length=50,
|
||||
required=False
|
||||
)
|
||||
asns = DynamicModelMultipleChoiceField(
|
||||
queryset=ASN.objects.all(),
|
||||
label=_('ASNs'),
|
||||
@@ -166,10 +171,10 @@ class SiteBulkEditForm(NetBoxModelBulkEditForm):
|
||||
|
||||
model = Site
|
||||
fieldsets = (
|
||||
FieldSet('status', 'region', 'group', 'tenant', 'asns', 'time_zone', 'description'),
|
||||
FieldSet('status', 'region', 'group', 'tenant', 'facility', 'asns', 'time_zone', 'description'),
|
||||
)
|
||||
nullable_fields = (
|
||||
'region', 'group', 'tenant', 'asns', 'time_zone', 'description', 'comments',
|
||||
'region', 'group', 'tenant', 'facility', 'asns', 'time_zone', 'description', 'comments',
|
||||
)
|
||||
|
||||
|
||||
@@ -198,6 +203,11 @@ class LocationBulkEditForm(NetBoxModelBulkEditForm):
|
||||
queryset=Tenant.objects.all(),
|
||||
required=False
|
||||
)
|
||||
facility = forms.CharField(
|
||||
label=_('Facility'),
|
||||
max_length=50,
|
||||
required=False
|
||||
)
|
||||
description = forms.CharField(
|
||||
label=_('Description'),
|
||||
max_length=200,
|
||||
@@ -207,9 +217,9 @@ class LocationBulkEditForm(NetBoxModelBulkEditForm):
|
||||
|
||||
model = Location
|
||||
fieldsets = (
|
||||
FieldSet('site', 'parent', 'status', 'tenant', 'description'),
|
||||
FieldSet('site', 'parent', 'status', 'tenant', 'facility', 'description'),
|
||||
)
|
||||
nullable_fields = ('parent', 'tenant', 'description', 'comments')
|
||||
nullable_fields = ('parent', 'tenant', 'facility', 'description', 'comments')
|
||||
|
||||
|
||||
class RackRoleBulkEditForm(NetBoxModelBulkEditForm):
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from strawberry.types import Info
|
||||
|
||||
from circuits.graphql.types import CircuitTerminationType, ProviderNetworkType
|
||||
from circuits.models import CircuitTermination, ProviderNetwork
|
||||
from dcim.graphql.types import (
|
||||
@@ -49,7 +51,7 @@ class InventoryItemTemplateComponentType:
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def resolve_type(cls, instance, info):
|
||||
def resolve_type(cls, instance, info: Info):
|
||||
if type(instance) is ConsolePortTemplate:
|
||||
return ConsolePortTemplateType
|
||||
if type(instance) is ConsoleServerPortTemplate:
|
||||
@@ -79,7 +81,7 @@ class InventoryItemComponentType:
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def resolve_type(cls, instance, info):
|
||||
def resolve_type(cls, instance, info: Info):
|
||||
if type(instance) is ConsolePort:
|
||||
return ConsolePortType
|
||||
if type(instance) is ConsoleServerPort:
|
||||
@@ -112,7 +114,7 @@ class ConnectedEndpointType:
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def resolve_type(cls, instance, info):
|
||||
def resolve_type(cls, instance, info: Info):
|
||||
if type(instance) is CircuitTermination:
|
||||
return CircuitTerminationType
|
||||
if type(instance) is ConsolePortType:
|
||||
|
||||
@@ -3,9 +3,7 @@ import django.db.models.deletion
|
||||
import taggit.managers
|
||||
from django.db import migrations, models
|
||||
|
||||
import utilities.fields
|
||||
import utilities.json
|
||||
import utilities.ordering
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
@@ -632,10 +632,17 @@ class BaseInterface(models.Model):
|
||||
})
|
||||
|
||||
# Check that the primary MAC address (if any) is assigned to this interface
|
||||
if self.primary_mac_address and self.primary_mac_address.assigned_object != self:
|
||||
if (
|
||||
self.primary_mac_address and
|
||||
self.primary_mac_address.assigned_object is not None and
|
||||
self.primary_mac_address.assigned_object != self
|
||||
):
|
||||
raise ValidationError({
|
||||
'primary_mac_address': _("MAC address {mac_address} is not assigned to this interface.").format(
|
||||
mac_address=self.primary_mac_address
|
||||
'primary_mac_address': _(
|
||||
"MAC address {mac_address} is assigned to a different interface ({interface})."
|
||||
).format(
|
||||
mac_address=self.primary_mac_address,
|
||||
interface=self.primary_mac_address.assigned_object,
|
||||
)
|
||||
})
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ from django.db.models.signals import post_save, post_delete, pre_delete
|
||||
from django.dispatch import receiver
|
||||
|
||||
from dcim.choices import CableEndChoices, LinkStatusChoices
|
||||
from virtualization.models import VMInterface
|
||||
from .models import (
|
||||
Cable, CablePath, CableTermination, ConsolePort, ConsoleServerPort, Device, DeviceBay, FrontPort, Interface,
|
||||
InventoryItem, ModuleBay, PathEndpoint, PowerOutlet, PowerPanel, PowerPort, Rack, RearPort, Location,
|
||||
@@ -170,3 +171,15 @@ def extend_rearport_cable_paths(instance, created, raw, **kwargs):
|
||||
rearport = instance.rear_port
|
||||
for cablepath in CablePath.objects.filter(_nodes__contains=rearport):
|
||||
cablepath.retrace()
|
||||
|
||||
|
||||
@receiver(post_save, sender=Interface)
|
||||
@receiver(post_save, sender=VMInterface)
|
||||
def update_mac_address_interface(instance, created, raw, **kwargs):
|
||||
"""
|
||||
When creating a new Interface or VMInterface, check whether a MACAddress has been designated as its primary. If so,
|
||||
assign the MACAddress to the interface.
|
||||
"""
|
||||
if created and not raw and instance.primary_mac_address:
|
||||
instance.primary_mac_address.assigned_object = instance
|
||||
instance.primary_mac_address.save()
|
||||
|
||||
@@ -196,7 +196,7 @@ class DeviceTable(TenancyColumnsMixin, ContactsColumnMixin, NetBoxTable):
|
||||
verbose_name=_('Type')
|
||||
)
|
||||
u_height = columns.TemplateColumn(
|
||||
accessor=tables.A('device_type.u_height'),
|
||||
accessor=tables.A('device_type__u_height'),
|
||||
verbose_name=_('U Height'),
|
||||
template_code='{{ value|floatformat }}'
|
||||
)
|
||||
@@ -312,6 +312,16 @@ class DeviceComponentTable(NetBoxTable):
|
||||
verbose_name=_('Name'),
|
||||
linkify=True,
|
||||
)
|
||||
device_location = tables.Column(
|
||||
accessor=tables.A('device__location'),
|
||||
verbose_name=_('Device Location'),
|
||||
linkify=True,
|
||||
)
|
||||
device_site = tables.Column(
|
||||
accessor=tables.A('device__site'),
|
||||
verbose_name=_('Device Site'),
|
||||
linkify=True,
|
||||
)
|
||||
device_status = columns.ChoiceFieldColumn(
|
||||
accessor=tables.A('device__status'),
|
||||
verbose_name=_('Device Status'),
|
||||
|
||||
@@ -2376,6 +2376,33 @@ class CableTest(APIViewTestCases.APIViewTestCase):
|
||||
]
|
||||
|
||||
|
||||
class CableTerminationTest(
|
||||
APIViewTestCases.GetObjectViewTestCase,
|
||||
APIViewTestCases.ListObjectsViewTestCase,
|
||||
):
|
||||
model = CableTermination
|
||||
brief_fields = ['cable', 'cable_end', 'display', 'id', 'termination_id', 'termination_type', 'url']
|
||||
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
device1 = create_test_device('Device 1')
|
||||
device2 = create_test_device('Device 2')
|
||||
|
||||
interfaces = []
|
||||
for device in (device1, device2):
|
||||
for i in range(0, 10):
|
||||
interfaces.append(Interface(device=device, type=InterfaceTypeChoices.TYPE_1GE_FIXED, name=f'eth{i}'))
|
||||
Interface.objects.bulk_create(interfaces)
|
||||
|
||||
cables = (
|
||||
Cable(a_terminations=[interfaces[0]], b_terminations=[interfaces[10]], label='Cable 1'),
|
||||
Cable(a_terminations=[interfaces[1]], b_terminations=[interfaces[11]], label='Cable 2'),
|
||||
Cable(a_terminations=[interfaces[2]], b_terminations=[interfaces[12]], label='Cable 3'),
|
||||
)
|
||||
for cable in cables:
|
||||
cable.save()
|
||||
|
||||
|
||||
class ConnectedDeviceTest(APITestCase):
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -7,13 +7,14 @@ from django.test import override_settings, tag
|
||||
from django.urls import reverse
|
||||
from netaddr import EUI
|
||||
|
||||
from core.models import ObjectType
|
||||
from dcim.choices import *
|
||||
from dcim.constants import *
|
||||
from dcim.models import *
|
||||
from ipam.models import ASN, RIR, VLAN, VRF
|
||||
from netbox.choices import CSVDelimiterChoices, ImportFormatChoices, WeightUnitChoices
|
||||
from tenancy.models import Tenant
|
||||
from users.models import User
|
||||
from users.models import ObjectPermission, User
|
||||
from utilities.testing import ViewTestCases, create_tags, create_test_device, post_data
|
||||
from wireless.models import WirelessLAN
|
||||
|
||||
@@ -3728,3 +3729,29 @@ class MACAddressTestCase(ViewTestCases.PrimaryObjectViewTestCase):
|
||||
cls.bulk_edit_data = {
|
||||
'description': 'New description',
|
||||
}
|
||||
|
||||
@tag('regression') # Issue #20542
|
||||
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'], EXEMPT_EXCLUDE_MODELS=[])
|
||||
def test_create_macaddress_via_quickadd(self):
|
||||
"""
|
||||
Test creating a MAC address via quick-add modal (e.g., from Interface form).
|
||||
Regression test for issue #20542 where form prefix was missing in POST handler.
|
||||
"""
|
||||
obj_perm = ObjectPermission(name='Test permission', actions=['add'])
|
||||
obj_perm.save()
|
||||
obj_perm.users.add(self.user)
|
||||
obj_perm.object_types.add(ObjectType.objects.get_for_model(self.model))
|
||||
|
||||
# Simulate quick-add form submission with 'quickadd-' prefix
|
||||
formatted_data = post_data(self.form_data)
|
||||
quickadd_data = {f'quickadd-{k}': v for k, v in formatted_data.items()}
|
||||
quickadd_data['_quickadd'] = 'True'
|
||||
|
||||
initial_count = self._get_queryset().count()
|
||||
url = f"{self._get_url('add')}?_quickadd=True&target=id_primary_mac_address"
|
||||
response = self.client.post(url, data=quickadd_data)
|
||||
|
||||
# Should successfully create the MAC address and return the quick_add_created template
|
||||
self.assertHttpStatus(response, 200)
|
||||
self.assertIn(b'quick-add-object', response.content)
|
||||
self.assertEqual(initial_count + 1, self._get_queryset().count())
|
||||
|
||||
@@ -26,6 +26,7 @@ class CustomFieldChoiceSetSerializer(ChangeLogMessageSerializer, ValidatedModelS
|
||||
max_length=2
|
||||
)
|
||||
)
|
||||
choices_count = serializers.IntegerField(read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = CustomFieldChoiceSet
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
from django.urls import include, path
|
||||
|
||||
from core.api.views import ObjectTypeViewSet
|
||||
from netbox.api.routers import NetBoxRouter
|
||||
from . import views
|
||||
|
||||
|
||||
router = NetBoxRouter()
|
||||
router.APIRootView = views.ExtrasRootView
|
||||
|
||||
@@ -29,9 +27,6 @@ router.register('config-context-profiles', views.ConfigContextProfileViewSet)
|
||||
router.register('config-templates', views.ConfigTemplateViewSet)
|
||||
router.register('scripts', views.ScriptViewSet, basename='script')
|
||||
|
||||
# TODO: Remove in NetBox v4.5
|
||||
router.register('object-types', ObjectTypeViewSet)
|
||||
|
||||
app_name = 'extras-api'
|
||||
urlpatterns = [
|
||||
path('dashboard/', views.DashboardView.as_view(), name='dashboard'),
|
||||
|
||||
@@ -95,7 +95,11 @@ def process_event_rules(event_rules, object_type, event_type, data, username=Non
|
||||
continue
|
||||
|
||||
# Compile event data
|
||||
event_data = event_rule.action_data or {}
|
||||
if event_rule.action_type == EventRuleActionChoices.SCRIPT:
|
||||
event_data = {}
|
||||
else:
|
||||
event_data = event_rule.action_data or {}
|
||||
|
||||
event_data.update(data)
|
||||
|
||||
# Webhooks
|
||||
@@ -131,6 +135,20 @@ def process_event_rules(event_rules, object_type, event_type, data, username=Non
|
||||
elif event_rule.action_type == EventRuleActionChoices.SCRIPT:
|
||||
# Resolve the script from action parameters
|
||||
script = event_rule.action_object.python_class()
|
||||
if event_rule.action_data:
|
||||
form = script.as_form(event_rule.action_data)
|
||||
if form.is_valid():
|
||||
form.cleaned_data.pop('_schedule_at')
|
||||
form.cleaned_data.pop('_interval')
|
||||
form.cleaned_data.pop('_commit')
|
||||
event_data.update(form.cleaned_data)
|
||||
else:
|
||||
logger.error(
|
||||
_("Processing event rule {event_rule} failed - Cannot validate script form: {errors}").format(
|
||||
event_rule=event_rule, errors=form.errors
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
# Enqueue a Job to record the script's execution
|
||||
from extras.jobs import ScriptJob
|
||||
|
||||
@@ -2,6 +2,7 @@ from typing import TYPE_CHECKING, Annotated, List
|
||||
|
||||
import strawberry
|
||||
import strawberry_django
|
||||
from strawberry.types import Info
|
||||
|
||||
__all__ = (
|
||||
'ConfigContextMixin',
|
||||
@@ -37,7 +38,7 @@ class CustomFieldsMixin:
|
||||
class ImageAttachmentsMixin:
|
||||
|
||||
@strawberry_django.field
|
||||
def image_attachments(self, info) -> List[Annotated["ImageAttachmentType", strawberry.lazy('.types')]]:
|
||||
def image_attachments(self, info: Info) -> List[Annotated['ImageAttachmentType', strawberry.lazy('.types')]]:
|
||||
return self.images.restrict(info.context.request.user, 'view')
|
||||
|
||||
|
||||
@@ -45,17 +46,17 @@ class ImageAttachmentsMixin:
|
||||
class JournalEntriesMixin:
|
||||
|
||||
@strawberry_django.field
|
||||
def journal_entries(self, info) -> List[Annotated["JournalEntryType", strawberry.lazy('.types')]]:
|
||||
def journal_entries(self, info: Info) -> List[Annotated['JournalEntryType', strawberry.lazy('.types')]]:
|
||||
return self.journal_entries.all()
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class TagsMixin:
|
||||
|
||||
tags: List[Annotated["TagType", strawberry.lazy('.types')]]
|
||||
tags: List[Annotated['TagType', strawberry.lazy('.types')]]
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class ContactsMixin:
|
||||
|
||||
contacts: List[Annotated["ContactAssignmentType", strawberry.lazy('tenancy.graphql.types')]]
|
||||
contacts: List[Annotated['ContactAssignmentType', strawberry.lazy('tenancy.graphql.types')]]
|
||||
|
||||
@@ -1,9 +1,39 @@
|
||||
from django.contrib.postgres.fields import ArrayField
|
||||
from django.contrib.postgres.fields.ranges import RangeField
|
||||
from django.db.models import CharField, JSONField, Lookup
|
||||
from django.db.models.fields.json import KeyTextTransform
|
||||
|
||||
from .fields import CachedValueField
|
||||
|
||||
|
||||
class RangeContains(Lookup):
|
||||
"""
|
||||
Filter ArrayField(RangeField) columns where ANY element-range contains the scalar RHS.
|
||||
|
||||
Usage (ORM):
|
||||
Model.objects.filter(<range_array_field>__range_contains=<scalar>)
|
||||
|
||||
Works with int4range[], int8range[], daterange[], tstzrange[], etc.
|
||||
"""
|
||||
|
||||
lookup_name = 'range_contains'
|
||||
|
||||
def as_sql(self, compiler, connection):
|
||||
# Compile LHS (the array-of-ranges column/expression) and RHS (scalar)
|
||||
lhs, lhs_params = self.process_lhs(compiler, connection)
|
||||
rhs, rhs_params = self.process_rhs(compiler, connection)
|
||||
|
||||
# Guard: only allow ArrayField whose base_field is a PostgreSQL RangeField
|
||||
field = getattr(self.lhs, 'output_field', None)
|
||||
if not (isinstance(field, ArrayField) and isinstance(field.base_field, RangeField)):
|
||||
raise TypeError('range_contains is only valid for ArrayField(RangeField) columns')
|
||||
|
||||
# Range-contains-element using EXISTS + UNNEST keeps the range on the LHS: r @> value
|
||||
sql = f"EXISTS (SELECT 1 FROM unnest({lhs}) AS r WHERE r @> {rhs})"
|
||||
params = lhs_params + rhs_params
|
||||
return sql, params
|
||||
|
||||
|
||||
class Empty(Lookup):
|
||||
"""
|
||||
Filter on whether a string is empty.
|
||||
@@ -25,7 +55,7 @@ class JSONEmpty(Lookup):
|
||||
|
||||
A key is considered empty if it is "", null, or does not exist.
|
||||
"""
|
||||
lookup_name = "empty"
|
||||
lookup_name = 'empty'
|
||||
|
||||
def as_sql(self, compiler, connection):
|
||||
# self.lhs.lhs is the parent expression (could be a JSONField or another KeyTransform)
|
||||
@@ -69,6 +99,7 @@ class NetContainsOrEquals(Lookup):
|
||||
return 'CAST(%s AS INET) >>= %s' % (lhs, rhs), params
|
||||
|
||||
|
||||
ArrayField.register_lookup(RangeContains)
|
||||
CharField.register_lookup(Empty)
|
||||
JSONField.register_lookup(JSONEmpty)
|
||||
CachedValueField.register_lookup(NetHost)
|
||||
|
||||
@@ -90,7 +90,7 @@ class ConfigContextModelQuerySet(RestrictedQuerySet):
|
||||
ConfigContext.objects.filter(
|
||||
self._get_config_context_filters()
|
||||
).annotate(
|
||||
_data=EmptyGroupByJSONBAgg('data', ordering=['weight', 'name'])
|
||||
_data=EmptyGroupByJSONBAgg('data', order_by=['weight', 'name'])
|
||||
).values("_data").order_by()
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
import inspect
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
import yaml
|
||||
from django import forms
|
||||
from django.conf import settings
|
||||
from django.core.files.storage import storages
|
||||
from django.core.validators import RegexValidator
|
||||
from django.utils import timezone
|
||||
@@ -488,7 +485,7 @@ class BaseScript:
|
||||
if self.fieldsets:
|
||||
fieldsets.extend(self.fieldsets)
|
||||
else:
|
||||
fields = list(name for name, _ in self._get_vars().items())
|
||||
fields = list(name for name, __ in self._get_vars().items())
|
||||
fieldsets.append((_('Script Data'), fields))
|
||||
|
||||
# Append the default fieldset if defined in the Meta class
|
||||
@@ -580,40 +577,6 @@ class BaseScript:
|
||||
self._log(message, obj, level=LogLevelChoices.LOG_FAILURE)
|
||||
self.failed = True
|
||||
|
||||
#
|
||||
# Convenience functions
|
||||
#
|
||||
|
||||
def load_yaml(self, filename):
|
||||
"""
|
||||
Return data from a YAML file
|
||||
"""
|
||||
# TODO: DEPRECATED: Remove this method in v4.5
|
||||
self._log(
|
||||
_("load_yaml is deprecated and will be removed in v4.5"),
|
||||
level=LogLevelChoices.LOG_WARNING
|
||||
)
|
||||
file_path = os.path.join(settings.SCRIPTS_ROOT, filename)
|
||||
with open(file_path, 'r') as datafile:
|
||||
data = yaml.load(datafile, Loader=yaml.SafeLoader)
|
||||
|
||||
return data
|
||||
|
||||
def load_json(self, filename):
|
||||
"""
|
||||
Return data from a JSON file
|
||||
"""
|
||||
# TODO: DEPRECATED: Remove this method in v4.5
|
||||
self._log(
|
||||
_("load_json is deprecated and will be removed in v4.5"),
|
||||
level=LogLevelChoices.LOG_WARNING
|
||||
)
|
||||
file_path = os.path.join(settings.SCRIPTS_ROOT, filename)
|
||||
with open(file_path, 'r') as datafile:
|
||||
data = json.load(datafile)
|
||||
|
||||
return data
|
||||
|
||||
#
|
||||
# Legacy Report functionality
|
||||
#
|
||||
|
||||
@@ -363,7 +363,7 @@ class EventRuleTest(APITestCase):
|
||||
body = json.loads(request.body)
|
||||
self.assertEqual(body['event'], 'created')
|
||||
self.assertEqual(body['timestamp'], job.kwargs['timestamp'])
|
||||
self.assertEqual(body['model'], 'site')
|
||||
self.assertEqual(body['object_type'], 'dcim.site')
|
||||
self.assertEqual(body['username'], 'testuser')
|
||||
self.assertEqual(body['request_id'], str(request_id))
|
||||
self.assertEqual(body['data']['name'], 'Site 1')
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import logging
|
||||
import tempfile
|
||||
from datetime import date, datetime, timezone
|
||||
from decimal import Decimal
|
||||
|
||||
@@ -9,7 +7,6 @@ from netaddr import IPAddress, IPNetwork
|
||||
|
||||
from dcim.models import DeviceRole
|
||||
from extras.scripts import *
|
||||
from utilities.testing import disable_logging
|
||||
|
||||
CHOICES = (
|
||||
('ff0000', 'Red'),
|
||||
@@ -35,35 +32,6 @@ JSON_DATA = """
|
||||
"""
|
||||
|
||||
|
||||
class ScriptTest(TestCase):
|
||||
|
||||
def test_load_yaml(self):
|
||||
datafile = tempfile.NamedTemporaryFile()
|
||||
datafile.write(bytes(YAML_DATA, 'UTF-8'))
|
||||
datafile.seek(0)
|
||||
|
||||
with disable_logging(level=logging.WARNING):
|
||||
data = Script().load_yaml(datafile.name)
|
||||
self.assertEqual(data, {
|
||||
'Foo': 123,
|
||||
'Bar': 456,
|
||||
'Baz': ['A', 'B', 'C'],
|
||||
})
|
||||
|
||||
def test_load_json(self):
|
||||
datafile = tempfile.NamedTemporaryFile()
|
||||
datafile.write(bytes(JSON_DATA, 'UTF-8'))
|
||||
datafile.seek(0)
|
||||
|
||||
with disable_logging(level=logging.WARNING):
|
||||
data = Script().load_json(datafile.name)
|
||||
self.assertEqual(data, {
|
||||
'Foo': 123,
|
||||
'Bar': 456,
|
||||
'Baz': ['A', 'B', 'C'],
|
||||
})
|
||||
|
||||
|
||||
class ScriptVariablesTest(TestCase):
|
||||
|
||||
def test_stringvar(self):
|
||||
|
||||
@@ -52,7 +52,6 @@ def send_webhook(event_rule, object_type, event_type, data, timestamp, username,
|
||||
'event': WEBHOOK_EVENT_TYPES.get(event_type, event_type),
|
||||
'timestamp': timestamp,
|
||||
'object_type': '.'.join(object_type.natural_key()),
|
||||
'model': object_type.model,
|
||||
'username': username,
|
||||
'request_id': request.id if request else None,
|
||||
'data': data,
|
||||
@@ -100,7 +99,7 @@ def send_webhook(event_rule, object_type, event_type, data, timestamp, username,
|
||||
'data': body.encode('utf8'),
|
||||
}
|
||||
logger.info(
|
||||
f"Sending {params['method']} request to {params['url']} ({context['model']} {context['event']})"
|
||||
f"Sending {params['method']} request to {params['url']} ({context['object_type']} {context['event']})"
|
||||
)
|
||||
logger.debug(params)
|
||||
try:
|
||||
|
||||
@@ -26,6 +26,9 @@ class BaseIPField(models.Field):
|
||||
def from_db_value(self, value, expression, connection):
|
||||
return self.to_python(value)
|
||||
|
||||
def get_internal_type(self):
|
||||
return 'CharField'
|
||||
|
||||
def to_python(self, value):
|
||||
if not value:
|
||||
return value
|
||||
@@ -57,7 +60,7 @@ class IPNetworkField(BaseIPField):
|
||||
"""
|
||||
IP prefix (network and mask)
|
||||
"""
|
||||
description = "PostgreSQL CIDR field"
|
||||
description = 'PostgreSQL CIDR field'
|
||||
default_validators = [validators.prefix_validator]
|
||||
|
||||
def db_type(self, connection):
|
||||
@@ -83,7 +86,7 @@ class IPAddressField(BaseIPField):
|
||||
"""
|
||||
IP address (host address and mask)
|
||||
"""
|
||||
description = "PostgreSQL INET field"
|
||||
description = 'PostgreSQL INET field'
|
||||
|
||||
def db_type(self, connection):
|
||||
return 'inet'
|
||||
@@ -110,7 +113,7 @@ IPAddressField.register_lookup(lookups.Inet)
|
||||
|
||||
|
||||
class ASNField(models.BigIntegerField):
|
||||
description = "32-bit ASN field"
|
||||
description = '32-bit ASN field'
|
||||
default_validators = [
|
||||
MinValueValidator(BGP_ASN_MIN),
|
||||
MaxValueValidator(BGP_ASN_MAX),
|
||||
|
||||
@@ -354,13 +354,13 @@ class PrefixFilterSet(NetBoxModelFilterSet, ScopedFilterSet, TenancyFilterSet, C
|
||||
vlan_group_id = django_filters.ModelMultipleChoiceFilter(
|
||||
field_name='vlan__group',
|
||||
queryset=VLANGroup.objects.all(),
|
||||
to_field_name="id",
|
||||
to_field_name='id',
|
||||
label=_('VLAN Group (ID)'),
|
||||
)
|
||||
vlan_group = django_filters.ModelMultipleChoiceFilter(
|
||||
field_name='vlan__group__slug',
|
||||
queryset=VLANGroup.objects.all(),
|
||||
to_field_name="slug",
|
||||
to_field_name='slug',
|
||||
label=_('VLAN Group (slug)'),
|
||||
)
|
||||
vlan_id = django_filters.ModelMultipleChoiceFilter(
|
||||
@@ -695,12 +695,12 @@ class IPAddressFilterSet(NetBoxModelFilterSet, TenancyFilterSet, ContactModelFil
|
||||
return queryset.filter(q)
|
||||
|
||||
def parse_inet_addresses(self, value):
|
||||
'''
|
||||
"""
|
||||
Parse networks or IP addresses and cast to a format
|
||||
acceptable by the Postgres inet type.
|
||||
|
||||
Skips invalid values.
|
||||
'''
|
||||
"""
|
||||
parsed = []
|
||||
for addr in value:
|
||||
if netaddr.valid_ipv4(addr) or netaddr.valid_ipv6(addr):
|
||||
@@ -718,7 +718,7 @@ class IPAddressFilterSet(NetBoxModelFilterSet, TenancyFilterSet, ContactModelFil
|
||||
# as argument. If they are all invalid,
|
||||
# we return an empty queryset
|
||||
value = self.parse_inet_addresses(value)
|
||||
if (len(value) == 0):
|
||||
if len(value) == 0:
|
||||
return queryset.none()
|
||||
|
||||
try:
|
||||
@@ -908,7 +908,8 @@ class VLANGroupFilterSet(OrganizationalModelFilterSet, TenancyFilterSet):
|
||||
method='filter_scope'
|
||||
)
|
||||
contains_vid = django_filters.NumberFilter(
|
||||
method='filter_contains_vid'
|
||||
field_name='vid_ranges',
|
||||
lookup_expr='range_contains',
|
||||
)
|
||||
|
||||
class Meta:
|
||||
@@ -931,21 +932,6 @@ class VLANGroupFilterSet(OrganizationalModelFilterSet, TenancyFilterSet):
|
||||
scope_id=value
|
||||
)
|
||||
|
||||
def filter_contains_vid(self, queryset, name, value):
|
||||
"""
|
||||
Return all VLANGroups which contain the given VLAN ID.
|
||||
"""
|
||||
table_name = VLANGroup._meta.db_table
|
||||
# TODO: See if this can be optimized without compromising queryset integrity
|
||||
# Expand VLAN ID ranges to query by integer
|
||||
groups = VLANGroup.objects.raw(
|
||||
f'SELECT id FROM {table_name}, unnest(vid_ranges) vid_range WHERE %s <@ vid_range',
|
||||
params=(value,)
|
||||
)
|
||||
return queryset.filter(
|
||||
pk__in=[g.id for g in groups]
|
||||
)
|
||||
|
||||
|
||||
class VLANFilterSet(NetBoxModelFilterSet, TenancyFilterSet):
|
||||
region_id = TreeNodeMultipleChoiceFilter(
|
||||
@@ -1079,6 +1065,7 @@ class VLANFilterSet(NetBoxModelFilterSet, TenancyFilterSet):
|
||||
def get_for_virtualmachine(self, queryset, name, value):
|
||||
return queryset.get_for_virtualmachine(value)
|
||||
|
||||
@extend_schema_field(OpenApiTypes.INT)
|
||||
def filter_interface_id(self, queryset, name, value):
|
||||
if value is None:
|
||||
return queryset.none()
|
||||
@@ -1087,6 +1074,7 @@ class VLANFilterSet(NetBoxModelFilterSet, TenancyFilterSet):
|
||||
Q(interfaces_as_untagged=value)
|
||||
).distinct()
|
||||
|
||||
@extend_schema_field(OpenApiTypes.INT)
|
||||
def filter_vminterface_id(self, queryset, name, value):
|
||||
if value is None:
|
||||
return queryset.none()
|
||||
|
||||
@@ -19,7 +19,7 @@ from tenancy.graphql.filter_mixins import ContactFilterMixin, TenancyFilterMixin
|
||||
from virtualization.models import VMInterface
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from netbox.graphql.filter_lookups import IntegerArrayLookup, IntegerLookup
|
||||
from netbox.graphql.filter_lookups import IntegerLookup, IntegerRangeArrayLookup
|
||||
from circuits.graphql.filters import ProviderFilter
|
||||
from core.graphql.filters import ContentTypeFilter
|
||||
from dcim.graphql.filters import SiteFilter
|
||||
@@ -340,7 +340,7 @@ class VLANFilter(TenancyFilterMixin, PrimaryModelFilterMixin):
|
||||
|
||||
@strawberry_django.filter_type(models.VLANGroup, lookups=True)
|
||||
class VLANGroupFilter(ScopedFilterMixin, OrganizationalModelFilterMixin):
|
||||
vid_ranges: Annotated['IntegerArrayLookup', strawberry.lazy('netbox.graphql.filter_lookups')] | None = (
|
||||
vid_ranges: Annotated['IntegerRangeArrayLookup', strawberry.lazy('netbox.graphql.filter_lookups')] | None = (
|
||||
strawberry_django.filter_field()
|
||||
)
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ class BaseIPAddressFamilyType:
|
||||
filters=ASNFilter,
|
||||
pagination=True
|
||||
)
|
||||
class ASNType(NetBoxObjectType):
|
||||
class ASNType(NetBoxObjectType, ContactsMixin):
|
||||
asn: BigInt
|
||||
rir: Annotated["RIRType", strawberry.lazy('ipam.graphql.types')] | None
|
||||
tenant: Annotated["TenantType", strawberry.lazy('tenancy.graphql.types')] | None
|
||||
|
||||
@@ -548,7 +548,7 @@ class IPRange(ContactsMixin, PrimaryModel):
|
||||
mark_utilized = models.BooleanField(
|
||||
verbose_name=_('mark utilized'),
|
||||
default=False,
|
||||
help_text=_("Report space as 100% utilized")
|
||||
help_text=_("Report space as fully utilized")
|
||||
)
|
||||
|
||||
clone_fields = (
|
||||
|
||||
@@ -10,9 +10,9 @@ from django.utils.translation import gettext_lazy as _
|
||||
from dcim.models import Interface, Site, SiteGroup
|
||||
from ipam.choices import *
|
||||
from ipam.constants import *
|
||||
from ipam.querysets import VLANQuerySet, VLANGroupQuerySet
|
||||
from ipam.querysets import VLANGroupQuerySet, VLANQuerySet
|
||||
from netbox.models import OrganizationalModel, PrimaryModel, NetBoxModel
|
||||
from utilities.data import check_ranges_overlap, ranges_to_string
|
||||
from utilities.data import check_ranges_overlap, ranges_to_string, ranges_to_string_list
|
||||
from virtualization.models import VMInterface
|
||||
|
||||
__all__ = (
|
||||
@@ -164,8 +164,18 @@ class VLANGroup(OrganizationalModel):
|
||||
"""
|
||||
return VLAN.objects.filter(group=self).order_by('vid')
|
||||
|
||||
@property
|
||||
def vid_ranges_items(self):
|
||||
"""
|
||||
Property that converts VID ranges to a list of string representations.
|
||||
"""
|
||||
return ranges_to_string_list(self.vid_ranges)
|
||||
|
||||
@property
|
||||
def vid_ranges_list(self):
|
||||
"""
|
||||
Property that converts VID ranges into a string representation.
|
||||
"""
|
||||
return ranges_to_string(self.vid_ranges)
|
||||
|
||||
|
||||
|
||||
@@ -41,7 +41,8 @@ class VLANGroupTable(TenancyColumnsMixin, NetBoxTable):
|
||||
linkify=True,
|
||||
orderable=False
|
||||
)
|
||||
vid_ranges_list = tables.Column(
|
||||
vid_ranges_list = columns.ArrayColumn(
|
||||
accessor='vid_ranges_items',
|
||||
verbose_name=_('VID Ranges'),
|
||||
orderable=False
|
||||
)
|
||||
|
||||
@@ -1723,6 +1723,10 @@ class VLANGroupTestCase(TestCase, ChangeLoggedFilterSetTests):
|
||||
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
|
||||
params = {'contains_vid': 1}
|
||||
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 8)
|
||||
params = {'contains_vid': 12} # 11 is NOT in [1,11)
|
||||
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
|
||||
params = {'contains_vid': 4095}
|
||||
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 0)
|
||||
|
||||
def test_region(self):
|
||||
params = {'region': Region.objects.first().pk}
|
||||
|
||||
66
netbox/ipam/tests/test_lookups.py
Normal file
66
netbox/ipam/tests/test_lookups.py
Normal file
@@ -0,0 +1,66 @@
|
||||
from django.test import TestCase
|
||||
from django.db.backends.postgresql.psycopg_any import NumericRange
|
||||
from ipam.models import VLANGroup
|
||||
|
||||
|
||||
class VLANGroupRangeContainsLookupTests(TestCase):
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
# Two ranges: [1,11) and [20,31)
|
||||
cls.g1 = VLANGroup.objects.create(
|
||||
name='VlanGroup-A',
|
||||
slug='VlanGroup-A',
|
||||
vid_ranges=[NumericRange(1, 11), NumericRange(20, 31)],
|
||||
)
|
||||
# One range: [100,201)
|
||||
cls.g2 = VLANGroup.objects.create(
|
||||
name='VlanGroup-B',
|
||||
slug='VlanGroup-B',
|
||||
vid_ranges=[NumericRange(100, 201)],
|
||||
)
|
||||
cls.g_empty = VLANGroup.objects.create(
|
||||
name='VlanGroup-empty',
|
||||
slug='VlanGroup-empty',
|
||||
vid_ranges=[],
|
||||
)
|
||||
|
||||
def test_contains_value_in_first_range(self):
|
||||
"""
|
||||
Tests whether a specific value is contained within the first range in a queried
|
||||
set of VLANGroup objects.
|
||||
"""
|
||||
names = list(
|
||||
VLANGroup.objects.filter(vid_ranges__range_contains=10).values_list('name', flat=True).order_by('name')
|
||||
)
|
||||
self.assertEqual(names, ['VlanGroup-A'])
|
||||
|
||||
def test_contains_value_in_second_range(self):
|
||||
"""
|
||||
Tests if a value exists in the second range of VLANGroup objects and
|
||||
validates the result against the expected list of names.
|
||||
"""
|
||||
names = list(
|
||||
VLANGroup.objects.filter(vid_ranges__range_contains=25).values_list('name', flat=True).order_by('name')
|
||||
)
|
||||
self.assertEqual(names, ['VlanGroup-A'])
|
||||
|
||||
def test_upper_bound_is_exclusive(self):
|
||||
"""
|
||||
Tests if the upper bound of the range is exclusive in the filter method.
|
||||
"""
|
||||
# 11 is NOT in [1,11)
|
||||
self.assertFalse(VLANGroup.objects.filter(vid_ranges__range_contains=11).exists())
|
||||
|
||||
def test_no_match_far_outside(self):
|
||||
"""
|
||||
Tests that no VLANGroup contains a VID within a specified range far outside
|
||||
common VID bounds and returns `False`.
|
||||
"""
|
||||
self.assertFalse(VLANGroup.objects.filter(vid_ranges__range_contains=4095).exists())
|
||||
|
||||
def test_empty_array_never_matches(self):
|
||||
"""
|
||||
Tests the behavior of VLANGroup objects when an empty array is used to match a
|
||||
specific condition.
|
||||
"""
|
||||
self.assertFalse(VLANGroup.objects.filter(pk=self.g_empty.pk, vid_ranges__range_contains=1).exists())
|
||||
@@ -2,47 +2,90 @@ import logging
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils import timezone
|
||||
from rest_framework import authentication, exceptions
|
||||
from drf_spectacular.extensions import OpenApiAuthenticationExtension
|
||||
from rest_framework import exceptions
|
||||
from rest_framework.authentication import BaseAuthentication, get_authorization_header
|
||||
from rest_framework.permissions import BasePermission, DjangoObjectPermissions, SAFE_METHODS
|
||||
|
||||
from netbox.config import get_config
|
||||
from users.constants import TOKEN_PREFIX
|
||||
from users.models import Token
|
||||
from utilities.request import get_client_ip
|
||||
|
||||
V1_KEYWORD = 'Token'
|
||||
V2_KEYWORD = 'Bearer'
|
||||
|
||||
class TokenAuthentication(authentication.TokenAuthentication):
|
||||
|
||||
class TokenAuthentication(BaseAuthentication):
|
||||
"""
|
||||
A custom authentication scheme which enforces Token expiration times and source IP restrictions.
|
||||
"""
|
||||
model = Token
|
||||
|
||||
def authenticate(self, request):
|
||||
result = super().authenticate(request)
|
||||
|
||||
if result:
|
||||
token = result[1]
|
||||
|
||||
# Enforce source IP restrictions (if any) set on the token
|
||||
if token.allowed_ips:
|
||||
client_ip = get_client_ip(request)
|
||||
if client_ip is None:
|
||||
raise exceptions.AuthenticationFailed(
|
||||
"Client IP address could not be determined for validation. Check that the HTTP server is "
|
||||
"correctly configured to pass the required header(s)."
|
||||
)
|
||||
if not token.validate_client_ip(client_ip):
|
||||
raise exceptions.AuthenticationFailed(
|
||||
f"Source IP {client_ip} is not permitted to authenticate using this token."
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def authenticate_credentials(self, key):
|
||||
model = self.get_model()
|
||||
# Authorization header is not present; ignore
|
||||
if not (auth := get_authorization_header(request).split()):
|
||||
return
|
||||
# Unrecognized header; ignore
|
||||
if auth[0].lower() not in (V1_KEYWORD.lower().encode(), V2_KEYWORD.lower().encode()):
|
||||
return
|
||||
# Check for extraneous token content
|
||||
if len(auth) != 2:
|
||||
raise exceptions.AuthenticationFailed(
|
||||
'Invalid authorization header: Must be in the form "Bearer <key>.<token>" or "Token <token>"'
|
||||
)
|
||||
# Extract the key (if v2) & token plaintext from the auth header
|
||||
try:
|
||||
token = model.objects.prefetch_related('user').get(key=key)
|
||||
except model.DoesNotExist:
|
||||
raise exceptions.AuthenticationFailed("Invalid token")
|
||||
auth_value = auth[1].decode()
|
||||
except UnicodeError:
|
||||
raise exceptions.AuthenticationFailed("Invalid authorization header: Token contains invalid characters")
|
||||
|
||||
# Infer token version from presence or absence of prefix
|
||||
version = 2 if auth_value.startswith(TOKEN_PREFIX) else 1
|
||||
|
||||
if version == 1:
|
||||
key, plaintext = None, auth_value
|
||||
else:
|
||||
auth_value = auth_value.removeprefix(TOKEN_PREFIX)
|
||||
try:
|
||||
key, plaintext = auth_value.split('.', 1)
|
||||
except ValueError:
|
||||
raise exceptions.AuthenticationFailed(
|
||||
"Invalid authorization header: Could not parse key from v2 token. Did you mean to use 'Token' "
|
||||
"instead of 'Bearer'?"
|
||||
)
|
||||
|
||||
# Look for a matching token in the database
|
||||
try:
|
||||
qs = Token.objects.prefetch_related('user')
|
||||
if version == 1:
|
||||
# Fetch v1 token by querying plaintext value directly
|
||||
token = qs.get(version=version, plaintext=plaintext)
|
||||
else:
|
||||
# Fetch v2 token by key, then validate the plaintext
|
||||
token = qs.get(version=version, key=key)
|
||||
if not token.validate(plaintext):
|
||||
# Key is valid but plaintext is not. Raise DoesNotExist to guard against key enumeration.
|
||||
raise Token.DoesNotExist()
|
||||
except Token.DoesNotExist:
|
||||
raise exceptions.AuthenticationFailed(f"Invalid v{version} token")
|
||||
|
||||
# Enforce source IP restrictions (if any) set on the token
|
||||
if token.allowed_ips:
|
||||
client_ip = get_client_ip(request)
|
||||
if client_ip is None:
|
||||
raise exceptions.AuthenticationFailed(
|
||||
"Client IP address could not be determined for validation. Check that the HTTP server is "
|
||||
"correctly configured to pass the required header(s)."
|
||||
)
|
||||
if not token.validate_client_ip(client_ip):
|
||||
raise exceptions.AuthenticationFailed(
|
||||
f"Source IP {client_ip} is not permitted to authenticate using this token."
|
||||
)
|
||||
|
||||
# Enforce the Token's expiration time, if one has been set.
|
||||
if token.is_expired:
|
||||
raise exceptions.AuthenticationFailed("Token expired")
|
||||
|
||||
# Update last used, but only once per minute at most. This reduces write load on the database
|
||||
if not token.last_used or (timezone.now() - token.last_used).total_seconds() > 60:
|
||||
@@ -54,11 +97,8 @@ class TokenAuthentication(authentication.TokenAuthentication):
|
||||
else:
|
||||
Token.objects.filter(pk=token.pk).update(last_used=timezone.now())
|
||||
|
||||
# Enforce the Token's expiration time, if one has been set.
|
||||
if token.is_expired:
|
||||
raise exceptions.AuthenticationFailed("Token expired")
|
||||
|
||||
user = token.user
|
||||
|
||||
# When LDAP authentication is active try to load user data from LDAP directory
|
||||
if 'netbox.authentication.LDAPBackend' in settings.REMOTE_AUTH_BACKEND:
|
||||
from netbox.authentication import LDAPBackend
|
||||
@@ -132,3 +172,17 @@ class IsAuthenticatedOrLoginNotRequired(BasePermission):
|
||||
if not settings.LOGIN_REQUIRED:
|
||||
return True
|
||||
return request.user.is_authenticated
|
||||
|
||||
|
||||
class TokenScheme(OpenApiAuthenticationExtension):
|
||||
target_class = 'netbox.api.authentication.TokenAuthentication'
|
||||
name = 'tokenAuth'
|
||||
match_subclasses = True
|
||||
|
||||
def get_security_definition(self, auto_schema):
|
||||
return {
|
||||
'type': 'apiKey',
|
||||
'in': 'header',
|
||||
'name': 'Authorization',
|
||||
'description': '`Token <token>` (v1) or `Bearer <key>.<token>` (v2)',
|
||||
}
|
||||
|
||||
@@ -169,7 +169,7 @@ class IntegerRangeSerializer(serializers.Serializer):
|
||||
if type(data[0]) is not int or type(data[1]) is not int:
|
||||
raise ValidationError(_("Range boundaries must be defined as integers."))
|
||||
|
||||
return NumericRange(data[0], data[1], bounds='[]')
|
||||
return NumericRange(data[0], data[1] + 1, bounds='[)')
|
||||
|
||||
def to_representation(self, instance):
|
||||
return instance.lower, instance.upper - 1
|
||||
|
||||
@@ -44,22 +44,28 @@ class OptionalLimitOffsetPagination(LimitOffsetPagination):
|
||||
return list(queryset[self.offset:])
|
||||
|
||||
def get_limit(self, request):
|
||||
max_limit = self.default_limit
|
||||
MAX_PAGE_SIZE = get_config().MAX_PAGE_SIZE
|
||||
if MAX_PAGE_SIZE:
|
||||
max_limit = min(max_limit, MAX_PAGE_SIZE)
|
||||
|
||||
if self.limit_query_param:
|
||||
MAX_PAGE_SIZE = get_config().MAX_PAGE_SIZE
|
||||
if MAX_PAGE_SIZE:
|
||||
MAX_PAGE_SIZE = max(MAX_PAGE_SIZE, self.default_limit)
|
||||
try:
|
||||
limit = int(request.query_params[self.limit_query_param])
|
||||
if limit < 0:
|
||||
raise ValueError()
|
||||
# Enforce maximum page size, if defined
|
||||
|
||||
if MAX_PAGE_SIZE:
|
||||
return MAX_PAGE_SIZE if limit == 0 else min(limit, MAX_PAGE_SIZE)
|
||||
return limit
|
||||
if limit == 0:
|
||||
max_limit = MAX_PAGE_SIZE
|
||||
else:
|
||||
max_limit = min(MAX_PAGE_SIZE, limit)
|
||||
else:
|
||||
max_limit = limit
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
|
||||
return self.default_limit
|
||||
return max_limit
|
||||
|
||||
def get_queryset_count(self, queryset):
|
||||
return queryset.count()
|
||||
|
||||
@@ -184,14 +184,13 @@ class RemoteUserBackend(_RemoteUserBackend):
|
||||
else:
|
||||
user.groups.clear()
|
||||
logger.debug(f"Stripping user {user} from Groups")
|
||||
|
||||
# Evaluate superuser status
|
||||
user.is_superuser = self._is_superuser(user)
|
||||
logger.debug(f"User {user} is Superuser: {user.is_superuser}")
|
||||
logger.debug(
|
||||
f"User {user} should be Superuser: {self._is_superuser(user)}")
|
||||
|
||||
user.is_staff = self._is_staff(user)
|
||||
logger.debug(f"User {user} is Staff: {user.is_staff}")
|
||||
logger.debug(f"User {user} should be Staff: {self._is_staff(user)}")
|
||||
user.save()
|
||||
return user
|
||||
|
||||
@@ -251,19 +250,8 @@ class RemoteUserBackend(_RemoteUserBackend):
|
||||
return bool(result)
|
||||
|
||||
def _is_staff(self, user):
|
||||
logger = logging.getLogger('netbox.auth.RemoteUserBackend')
|
||||
staff_groups = settings.REMOTE_AUTH_STAFF_GROUPS
|
||||
logger.debug(f"Superuser Groups: {staff_groups}")
|
||||
staff_users = settings.REMOTE_AUTH_STAFF_USERS
|
||||
logger.debug(f"Staff Users :{staff_users}")
|
||||
user_groups = set()
|
||||
for g in user.groups.all():
|
||||
user_groups.add(g.name)
|
||||
logger.debug(f"User {user.username} is in Groups:{user_groups}")
|
||||
result = user.username in staff_users or (
|
||||
set(user_groups) & set(staff_groups))
|
||||
logger.debug(f"User {user.username} in Staff Users :{result}")
|
||||
return bool(result)
|
||||
# Retain for pre-v4.5 compatibility
|
||||
return user.is_superuser
|
||||
|
||||
def configure_user(self, request, user):
|
||||
logger = logging.getLogger('netbox.auth.RemoteUserBackend')
|
||||
|
||||
@@ -78,11 +78,16 @@ class Config:
|
||||
from core.models import ConfigRevision
|
||||
|
||||
try:
|
||||
revision = ConfigRevision.objects.last()
|
||||
# Enforce the creation date as the ordering parameter
|
||||
revision = ConfigRevision.objects.get(active=True)
|
||||
logger.debug(f"Loaded active configuration revision #{revision.pk}")
|
||||
except (ConfigRevision.DoesNotExist, ConfigRevision.MultipleObjectsReturned):
|
||||
logger.warning("No active configuration revision found - falling back to most recent")
|
||||
revision = ConfigRevision.objects.order_by('-created').first()
|
||||
if revision is None:
|
||||
logger.debug("No previous configuration found in database; proceeding with default values")
|
||||
return
|
||||
logger.debug("Loaded configuration data from database")
|
||||
logger.debug(f"Using fallback configuration revision #{revision.pk}")
|
||||
except DatabaseError:
|
||||
# The database may not be available yet (e.g. when running a management command)
|
||||
logger.warning("Skipping config initialization (database unavailable)")
|
||||
|
||||
@@ -68,6 +68,16 @@ REDIS = {
|
||||
# https://docs.djangoproject.com/en/stable/ref/settings/#std:setting-SECRET_KEY
|
||||
SECRET_KEY = ''
|
||||
|
||||
# Define a mapping of cryptographic peppers to use when hashing API tokens. A minimum of one pepper is required to
|
||||
# enable v2 API tokens (NetBox v4.5+). Define peppers as a mapping of numeric ID to pepper value, as shown below. Each
|
||||
# pepper must be at least 50 characters in length.
|
||||
#
|
||||
# API_TOKEN_PEPPERS = {
|
||||
# 1: "<random string>",
|
||||
# 2: "<random string>",
|
||||
# }
|
||||
API_TOKEN_PEPPERS = {}
|
||||
|
||||
|
||||
#########################
|
||||
# #
|
||||
@@ -81,9 +91,6 @@ ADMINS = [
|
||||
# ('John Doe', 'jdoe@example.com'),
|
||||
]
|
||||
|
||||
# Permit the retrieval of API tokens after their creation.
|
||||
ALLOW_TOKEN_RETRIEVAL = False
|
||||
|
||||
# Enable any desired validators for local account passwords below. For a list of included validators, please see the
|
||||
# Django documentation at https://docs.djangoproject.com/en/stable/topics/auth/passwords/#password-validation.
|
||||
AUTH_PASSWORD_VALIDATORS = [
|
||||
|
||||
@@ -43,7 +43,9 @@ SECRET_KEY = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
|
||||
|
||||
DEFAULT_PERMISSIONS = {}
|
||||
|
||||
ALLOW_TOKEN_RETRIEVAL = True
|
||||
API_TOKEN_PEPPERS = {
|
||||
1: 'TEST-VALUE-DO-NOT-USE-TEST-VALUE-DO-NOT-USE-TEST-VALUE-DO-NOT-USE',
|
||||
}
|
||||
|
||||
LOGGING = {
|
||||
'version': 1,
|
||||
|
||||
@@ -28,7 +28,6 @@ def preferences(request):
|
||||
user_preferences = request.user.config if request.user.is_authenticated else {}
|
||||
return {
|
||||
'preferences': user_preferences,
|
||||
'htmx_navigation': user_preferences.get('ui.htmx_navigation', False) == 'true'
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ from django.core.exceptions import FieldDoesNotExist
|
||||
from django.db.models import Q, QuerySet
|
||||
from django.db.models.fields.related import ForeignKey, ManyToManyField, ManyToManyRel, ManyToOneRel
|
||||
from strawberry import ID
|
||||
from strawberry.directive import DirectiveValue
|
||||
from strawberry.types import Info
|
||||
from strawberry_django import (
|
||||
ComparisonFilterLookup,
|
||||
@@ -24,6 +25,7 @@ __all__ = (
|
||||
'FloatLookup',
|
||||
'IntegerArrayLookup',
|
||||
'IntegerLookup',
|
||||
'IntegerRangeArrayLookup',
|
||||
'JSONFilter',
|
||||
'StringArrayLookup',
|
||||
'TreeNodeFilter',
|
||||
@@ -67,7 +69,7 @@ class IntegerLookup:
|
||||
return None
|
||||
|
||||
@strawberry_django.filter_field
|
||||
def filter(self, info: Info, queryset: QuerySet, prefix: str = '') -> Tuple[QuerySet, Q]:
|
||||
def filter(self, info: Info, queryset: QuerySet, prefix: DirectiveValue[str] = '') -> Tuple[QuerySet, Q]:
|
||||
filters = self.get_filter()
|
||||
|
||||
if not filters:
|
||||
@@ -90,7 +92,7 @@ class FloatLookup:
|
||||
return None
|
||||
|
||||
@strawberry_django.filter_field
|
||||
def filter(self, info: Info, queryset: QuerySet, prefix: str = '') -> Tuple[QuerySet, Q]:
|
||||
def filter(self, info: Info, queryset: QuerySet, prefix: DirectiveValue[str] = '') -> Tuple[QuerySet, Q]:
|
||||
filters = self.get_filter()
|
||||
|
||||
if not filters:
|
||||
@@ -109,7 +111,7 @@ class JSONFilter:
|
||||
lookup: JSONLookup
|
||||
|
||||
@strawberry_django.filter_field
|
||||
def filter(self, info: Info, queryset: QuerySet, prefix: str = '') -> Tuple[QuerySet, Q]:
|
||||
def filter(self, info: Info, queryset: QuerySet, prefix: DirectiveValue[str] = '') -> Tuple[QuerySet, Q]:
|
||||
filters = self.lookup.get_filter()
|
||||
|
||||
if not filters:
|
||||
@@ -136,7 +138,7 @@ class TreeNodeFilter:
|
||||
match_type: TreeNodeMatch
|
||||
|
||||
@strawberry_django.filter_field
|
||||
def filter(self, info: Info, queryset: QuerySet, prefix: str = '') -> Tuple[QuerySet, Q]:
|
||||
def filter(self, info: Info, queryset: QuerySet, prefix: DirectiveValue[str] = '') -> Tuple[QuerySet, Q]:
|
||||
model_field_name = prefix.removesuffix('__').removesuffix('_id')
|
||||
model_field = None
|
||||
try:
|
||||
@@ -217,3 +219,30 @@ class FloatArrayLookup(ArrayLookup[float]):
|
||||
@strawberry.input(one_of=True, description='Lookup for Array fields. Only one of the lookup fields can be set.')
|
||||
class StringArrayLookup(ArrayLookup[str]):
|
||||
pass
|
||||
|
||||
|
||||
@strawberry.input(one_of=True, description='Lookups for an ArrayField(RangeField). Only one may be set.')
|
||||
class RangeArrayValueLookup(Generic[T]):
|
||||
"""
|
||||
class for Array field of Range fields lookups
|
||||
"""
|
||||
|
||||
contains: T | None = strawberry.field(
|
||||
default=strawberry.UNSET, description='Return rows where any stored range contains this value.'
|
||||
)
|
||||
|
||||
@strawberry_django.filter_field
|
||||
def filter(self, info: Info, queryset: QuerySet, prefix: str = '') -> Tuple[QuerySet, Q]:
|
||||
"""
|
||||
Map GraphQL: { <field>: { contains: <T> } } To Django ORM: <field>__range_contains=<T>
|
||||
"""
|
||||
if self.contains is strawberry.UNSET or self.contains is None:
|
||||
return queryset, Q()
|
||||
|
||||
# Build '<prefix>range_contains' so it works for nested paths too
|
||||
return queryset, Q(**{f'{prefix}range_contains': self.contains})
|
||||
|
||||
|
||||
@strawberry.input(one_of=True, description='Lookups for an ArrayField(IntegerRangeField). Only one may be set.')
|
||||
class IntegerRangeArrayLookup(RangeArrayValueLookup[int]):
|
||||
pass
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import strawberry
|
||||
import strawberry_django
|
||||
from strawberry.types import Info
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
from core.graphql.mixins import ChangelogMixin
|
||||
@@ -26,7 +27,7 @@ class BaseObjectType:
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_queryset(cls, queryset, info, **kwargs):
|
||||
def get_queryset(cls, queryset, info: Info, **kwargs):
|
||||
# Enforce object permissions on the queryset
|
||||
if hasattr(queryset, 'restrict'):
|
||||
return queryset.restrict(info.context.request.user, 'view')
|
||||
|
||||
@@ -4,6 +4,7 @@ from datetime import timedelta
|
||||
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.utils.functional import classproperty
|
||||
from django.utils import timezone
|
||||
from django_pglocks import advisory_lock
|
||||
from rq.timeouts import JobTimeoutException
|
||||
|
||||
@@ -113,7 +114,11 @@ class JobRunner(ABC):
|
||||
# If the executed job is a periodic job, schedule its next execution at the specified interval.
|
||||
finally:
|
||||
if job.interval:
|
||||
new_scheduled_time = (job.scheduled or job.started) + timedelta(minutes=job.interval)
|
||||
# Determine the new scheduled time. Cannot be earlier than one minute in the future.
|
||||
new_scheduled_time = max(
|
||||
(job.scheduled or job.started) + timedelta(minutes=job.interval),
|
||||
timezone.now() + timedelta(minutes=1)
|
||||
)
|
||||
if job.object and getattr(job.object, "python_class", None):
|
||||
kwargs["job_timeout"] = job.object.python_class.job_timeout
|
||||
cls.enqueue(
|
||||
|
||||
@@ -673,10 +673,15 @@ def has_feature(model_or_ct, feature):
|
||||
# If an ObjectType was passed, we can use it directly
|
||||
if type(model_or_ct) is ObjectType:
|
||||
ot = model_or_ct
|
||||
# If a ContentType was passed, resolve its model class
|
||||
# If a ContentType was passed, resolve its model class and run the associated feature test
|
||||
elif type(model_or_ct) is ContentType:
|
||||
model_class = model_or_ct.model_class()
|
||||
ot = ObjectType.objects.get_for_model(model_class) if model_class else None
|
||||
model = model_or_ct.model_class()
|
||||
try:
|
||||
test_func = registry['model_features'][feature]
|
||||
except KeyError:
|
||||
# Unknown feature
|
||||
return False
|
||||
return test_func(model)
|
||||
# For anything else, look up the ObjectType
|
||||
else:
|
||||
ot = ObjectType.objects.get_for_model(model_or_ct)
|
||||
|
||||
39
netbox/netbox/monkey.py
Normal file
39
netbox/netbox/monkey.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from django.db.models import UniqueConstraint
|
||||
from rest_framework.utils.field_mapping import get_unique_error_message
|
||||
from rest_framework.validators import UniqueValidator
|
||||
|
||||
__all__ = (
|
||||
'get_unique_validators',
|
||||
)
|
||||
|
||||
|
||||
def get_unique_validators(field_name, model_field):
|
||||
"""
|
||||
Extend Django REST Framework's get_unique_validators() function to attach a UniqueValidator to a field *only* if the
|
||||
associated UniqueConstraint does NOT have a condition which references another field. See bug #19302.
|
||||
"""
|
||||
field_set = {field_name}
|
||||
conditions = {
|
||||
c.condition
|
||||
for c in model_field.model._meta.constraints
|
||||
if isinstance(c, UniqueConstraint) and set(c.fields) == field_set
|
||||
}
|
||||
|
||||
# START custom logic
|
||||
conditions = {
|
||||
cond for cond in conditions
|
||||
if cond.referenced_base_fields == field_set
|
||||
}
|
||||
# END custom logic
|
||||
|
||||
if getattr(model_field, 'unique', False):
|
||||
conditions.add(None)
|
||||
if not conditions:
|
||||
return
|
||||
unique_error_message = get_unique_error_message(model_field)
|
||||
queryset = model_field.model._default_manager
|
||||
for condition in conditions:
|
||||
yield UniqueValidator(
|
||||
queryset=queryset if condition is None else queryset.filter(condition),
|
||||
message=unique_error_message
|
||||
)
|
||||
@@ -412,7 +412,7 @@ ADMIN_MENU = Menu(
|
||||
MenuItem(
|
||||
link='users:user_list',
|
||||
link_text=_('Users'),
|
||||
auth_required=True,
|
||||
staff_only=True,
|
||||
permissions=['users.view_user'],
|
||||
buttons=(
|
||||
MenuItemButton(
|
||||
@@ -432,7 +432,7 @@ ADMIN_MENU = Menu(
|
||||
MenuItem(
|
||||
link='users:group_list',
|
||||
link_text=_('Groups'),
|
||||
auth_required=True,
|
||||
staff_only=True,
|
||||
permissions=['users.view_group'],
|
||||
buttons=(
|
||||
MenuItemButton(
|
||||
@@ -452,14 +452,14 @@ ADMIN_MENU = Menu(
|
||||
MenuItem(
|
||||
link='users:token_list',
|
||||
link_text=_('API Tokens'),
|
||||
auth_required=True,
|
||||
staff_only=True,
|
||||
permissions=['users.view_token'],
|
||||
buttons=get_model_buttons('users', 'token')
|
||||
),
|
||||
MenuItem(
|
||||
link='users:objectpermission_list',
|
||||
link_text=_('Permissions'),
|
||||
auth_required=True,
|
||||
staff_only=True,
|
||||
permissions=['users.view_objectpermission'],
|
||||
buttons=get_model_buttons('users', 'objectpermission', actions=['add'])
|
||||
),
|
||||
@@ -471,23 +471,23 @@ ADMIN_MENU = Menu(
|
||||
MenuItem(
|
||||
link='core:system',
|
||||
link_text=_('System'),
|
||||
auth_required=True
|
||||
staff_only=True,
|
||||
),
|
||||
MenuItem(
|
||||
link='core:plugin_list',
|
||||
link_text=_('Plugins'),
|
||||
auth_required=True
|
||||
staff_only=True,
|
||||
),
|
||||
MenuItem(
|
||||
link='core:configrevision_list',
|
||||
link_text=_('Configuration History'),
|
||||
auth_required=True,
|
||||
permissions=['core.view_configrevision']
|
||||
staff_only=True,
|
||||
permissions=['core.view_configrevision'],
|
||||
),
|
||||
MenuItem(
|
||||
link='core:background_queue_list',
|
||||
link_text=_('Background Tasks'),
|
||||
auth_required=True
|
||||
staff_only=True,
|
||||
),
|
||||
),
|
||||
),
|
||||
|
||||
@@ -50,6 +50,14 @@ class ObjectAction:
|
||||
except NoReverseMatch:
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def get_url_params(cls, context):
|
||||
request = context['request']
|
||||
params = request.GET.copy()
|
||||
if 'return_url' in context:
|
||||
params['return_url'] = context['return_url']
|
||||
return params
|
||||
|
||||
@classmethod
|
||||
def get_context(cls, context, obj):
|
||||
"""
|
||||
@@ -63,6 +71,7 @@ class ObjectAction:
|
||||
'perms': context['perms'],
|
||||
'request': context['request'],
|
||||
'url': cls.get_url(obj),
|
||||
'url_params': cls.get_url_params(context),
|
||||
'label': cls.label,
|
||||
**cls.get_context(context, obj),
|
||||
**kwargs,
|
||||
|
||||
@@ -3,12 +3,12 @@ from collections import OrderedDict
|
||||
from django.apps import apps
|
||||
from django.urls.exceptions import NoReverseMatch
|
||||
from drf_spectacular.utils import extend_schema
|
||||
from rest_framework import permissions
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.reverse import reverse
|
||||
from rest_framework.views import APIView
|
||||
|
||||
from netbox.registry import registry
|
||||
from utilities.api import IsSuperuser
|
||||
|
||||
|
||||
@extend_schema(exclude=True)
|
||||
@@ -16,7 +16,7 @@ class InstalledPluginsAPIView(APIView):
|
||||
"""
|
||||
API view for listing all installed plugins
|
||||
"""
|
||||
permission_classes = [permissions.IsAdminUser]
|
||||
permission_classes = [IsSuperuser]
|
||||
_ignore_model_permissions = True
|
||||
schema = None
|
||||
|
||||
|
||||
@@ -26,16 +26,6 @@ def get_csv_delimiters():
|
||||
PREFERENCES = {
|
||||
|
||||
# User interface
|
||||
'ui.htmx_navigation': UserPreference(
|
||||
label=_('HTMX Navigation'),
|
||||
choices=(
|
||||
('', _('Disabled')),
|
||||
('true', _('Enabled')),
|
||||
),
|
||||
description=_('Enable dynamic UI navigation'),
|
||||
default=False,
|
||||
warning=_('Experimental feature')
|
||||
),
|
||||
'locale.language': UserPreference(
|
||||
label=_('Language'),
|
||||
choices=(
|
||||
|
||||
@@ -11,6 +11,7 @@ from django.core.exceptions import ImproperlyConfigured, ValidationError
|
||||
from django.core.validators import URLValidator
|
||||
from django.utils.module_loading import import_string
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework.utils import field_mapping
|
||||
|
||||
from core.exceptions import IncompatiblePluginError
|
||||
from netbox.config import PARAMS as CONFIG_PARAMS
|
||||
@@ -19,7 +20,19 @@ from netbox.plugins import PluginConfig
|
||||
from netbox.registry import registry
|
||||
import storages.utils # type: ignore
|
||||
from utilities.release import load_release_data
|
||||
from utilities.security import validate_peppers
|
||||
from utilities.string import trailing_slash
|
||||
from .monkey import get_unique_validators
|
||||
|
||||
|
||||
#
|
||||
# Monkey-patching
|
||||
#
|
||||
|
||||
# TODO: Remove this once #20547 has been implemented
|
||||
# Override DRF's get_unique_validators() function with our own (see bug #19302)
|
||||
field_mapping.get_unique_validators = get_unique_validators
|
||||
|
||||
|
||||
#
|
||||
# Environment setup
|
||||
@@ -31,9 +44,9 @@ VERSION = RELEASE.full_version # Retained for backward compatibility
|
||||
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
# Validate Python version
|
||||
if sys.version_info < (3, 10):
|
||||
if sys.version_info < (3, 12):
|
||||
raise RuntimeError(
|
||||
f"NetBox requires Python 3.10 or later. (Currently installed: Python {platform.python_version()})"
|
||||
f"NetBox requires Python 3.12 or later. (Currently installed: Python {platform.python_version()})"
|
||||
)
|
||||
|
||||
#
|
||||
@@ -63,8 +76,8 @@ elif hasattr(configuration, 'DATABASE') and hasattr(configuration, 'DATABASES'):
|
||||
|
||||
# Set static config parameters
|
||||
ADMINS = getattr(configuration, 'ADMINS', [])
|
||||
ALLOW_TOKEN_RETRIEVAL = getattr(configuration, 'ALLOW_TOKEN_RETRIEVAL', False)
|
||||
ALLOWED_HOSTS = getattr(configuration, 'ALLOWED_HOSTS') # Required
|
||||
API_TOKEN_PEPPERS = getattr(configuration, 'API_TOKEN_PEPPERS', {})
|
||||
AUTH_PASSWORD_VALIDATORS = getattr(configuration, 'AUTH_PASSWORD_VALIDATORS', [
|
||||
{
|
||||
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
|
||||
@@ -162,8 +175,6 @@ REMOTE_AUTH_SUPERUSERS = getattr(configuration, 'REMOTE_AUTH_SUPERUSERS', [])
|
||||
REMOTE_AUTH_USER_EMAIL = getattr(configuration, 'REMOTE_AUTH_USER_EMAIL', 'HTTP_REMOTE_USER_EMAIL')
|
||||
REMOTE_AUTH_USER_FIRST_NAME = getattr(configuration, 'REMOTE_AUTH_USER_FIRST_NAME', 'HTTP_REMOTE_USER_FIRST_NAME')
|
||||
REMOTE_AUTH_USER_LAST_NAME = getattr(configuration, 'REMOTE_AUTH_USER_LAST_NAME', 'HTTP_REMOTE_USER_LAST_NAME')
|
||||
REMOTE_AUTH_STAFF_GROUPS = getattr(configuration, 'REMOTE_AUTH_STAFF_GROUPS', [])
|
||||
REMOTE_AUTH_STAFF_USERS = getattr(configuration, 'REMOTE_AUTH_STAFF_USERS', [])
|
||||
# Required by extras/migrations/0109_script_models.py
|
||||
REPORTS_ROOT = getattr(configuration, 'REPORTS_ROOT', os.path.join(BASE_DIR, 'reports')).rstrip('/')
|
||||
RQ_DEFAULT_TIMEOUT = getattr(configuration, 'RQ_DEFAULT_TIMEOUT', 300)
|
||||
@@ -176,11 +187,16 @@ SECURE_HSTS_INCLUDE_SUBDOMAINS = getattr(configuration, 'SECURE_HSTS_INCLUDE_SUB
|
||||
SECURE_HSTS_PRELOAD = getattr(configuration, 'SECURE_HSTS_PRELOAD', False)
|
||||
SECURE_HSTS_SECONDS = getattr(configuration, 'SECURE_HSTS_SECONDS', 0)
|
||||
SECURE_SSL_REDIRECT = getattr(configuration, 'SECURE_SSL_REDIRECT', False)
|
||||
SENTRY_CONFIG = getattr(configuration, 'SENTRY_CONFIG', {})
|
||||
# TODO: Remove in NetBox v4.5
|
||||
SENTRY_DSN = getattr(configuration, 'SENTRY_DSN', None)
|
||||
SENTRY_ENABLED = getattr(configuration, 'SENTRY_ENABLED', False)
|
||||
# TODO: Remove in NetBox v4.5
|
||||
SENTRY_SAMPLE_RATE = getattr(configuration, 'SENTRY_SAMPLE_RATE', 1.0)
|
||||
# TODO: Remove in NetBox v4.5
|
||||
SENTRY_SEND_DEFAULT_PII = getattr(configuration, 'SENTRY_SEND_DEFAULT_PII', False)
|
||||
SENTRY_TAGS = getattr(configuration, 'SENTRY_TAGS', {})
|
||||
# TODO: Remove in NetBox v4.5
|
||||
SENTRY_TRACES_SAMPLE_RATE = getattr(configuration, 'SENTRY_TRACES_SAMPLE_RATE', 0)
|
||||
SESSION_COOKIE_NAME = getattr(configuration, 'SESSION_COOKIE_NAME', 'sessionid')
|
||||
SESSION_COOKIE_PATH = CSRF_COOKIE_PATH
|
||||
@@ -212,6 +228,12 @@ if len(SECRET_KEY) < 50:
|
||||
f" python {BASE_DIR}/generate_secret_key.py"
|
||||
)
|
||||
|
||||
# Validate API token peppers
|
||||
if API_TOKEN_PEPPERS:
|
||||
validate_peppers(API_TOKEN_PEPPERS)
|
||||
else:
|
||||
warnings.warn("API_TOKEN_PEPPERS is not defined. v2 API tokens cannot be used.")
|
||||
|
||||
# Validate update repo URL and timeout
|
||||
if RELEASE_CHECK_URL:
|
||||
try:
|
||||
@@ -598,18 +620,29 @@ if SENTRY_ENABLED:
|
||||
import sentry_sdk
|
||||
except ModuleNotFoundError:
|
||||
raise ImproperlyConfigured("SENTRY_ENABLED is True but the sentry-sdk package is not installed.")
|
||||
if not SENTRY_DSN:
|
||||
raise ImproperlyConfigured("SENTRY_ENABLED is True but SENTRY_DSN has not been defined.")
|
||||
|
||||
# Construct default Sentry initialization parameters from legacy SENTRY_* config parameters
|
||||
sentry_config = {
|
||||
'dsn': SENTRY_DSN,
|
||||
'sample_rate': SENTRY_SAMPLE_RATE,
|
||||
'send_default_pii': SENTRY_SEND_DEFAULT_PII,
|
||||
'traces_sample_rate': SENTRY_TRACES_SAMPLE_RATE,
|
||||
# TODO: Support proxy routing
|
||||
'http_proxy': HTTP_PROXIES.get('http') if HTTP_PROXIES else None,
|
||||
'https_proxy': HTTP_PROXIES.get('https') if HTTP_PROXIES else None,
|
||||
}
|
||||
# Override/extend the default parameters with any provided via SENTRY_CONFIG
|
||||
sentry_config.update(SENTRY_CONFIG)
|
||||
# Check for a DSN
|
||||
if not sentry_config.get('dsn'):
|
||||
raise ImproperlyConfigured(
|
||||
"Sentry is enabled but a DSN has not been specified. Set one under the SENTRY_CONFIG parameter."
|
||||
)
|
||||
|
||||
# Initialize the SDK
|
||||
sentry_sdk.init(
|
||||
dsn=SENTRY_DSN,
|
||||
release=RELEASE.full_version,
|
||||
sample_rate=SENTRY_SAMPLE_RATE,
|
||||
traces_sample_rate=SENTRY_TRACES_SAMPLE_RATE,
|
||||
send_default_pii=SENTRY_SEND_DEFAULT_PII,
|
||||
# TODO: Support proxy routing
|
||||
http_proxy=HTTP_PROXIES.get('http') if HTTP_PROXIES else None,
|
||||
https_proxy=HTTP_PROXIES.get('https') if HTTP_PROXIES else None
|
||||
**sentry_config
|
||||
)
|
||||
# Assign any configured tags
|
||||
for k, v in SENTRY_TAGS.items():
|
||||
|
||||
@@ -270,7 +270,7 @@ class ActionsColumn(tables.Column):
|
||||
if not (self.actions or self.extra_buttons):
|
||||
return ''
|
||||
# Skip dummy records (e.g. available VLANs or IP ranges replacing individual IPs)
|
||||
if type(record) is not model or not getattr(record, 'pk', None):
|
||||
if not isinstance(record, model) or not getattr(record, 'pk', None):
|
||||
return ''
|
||||
|
||||
if request := getattr(table, 'context', {}).get('request'):
|
||||
|
||||
@@ -8,6 +8,7 @@ from rest_framework.test import APIClient
|
||||
|
||||
from core.models import ObjectType
|
||||
from dcim.models import Rack, Site
|
||||
from users.constants import TOKEN_PREFIX
|
||||
from users.models import Group, ObjectPermission, Token, User
|
||||
from utilities.testing import TestCase
|
||||
from utilities.testing.api import APITestCase
|
||||
@@ -16,67 +17,159 @@ from utilities.testing.api import APITestCase
|
||||
class TokenAuthenticationTestCase(APITestCase):
|
||||
|
||||
@override_settings(LOGIN_REQUIRED=True, EXEMPT_VIEW_PERMISSIONS=['*'])
|
||||
def test_token_authentication(self):
|
||||
url = reverse('dcim-api:site-list')
|
||||
|
||||
def test_no_token(self):
|
||||
# Request without a token should return a 403
|
||||
response = self.client.get(url)
|
||||
response = self.client.get(reverse('dcim-api:site-list'))
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
@override_settings(LOGIN_REQUIRED=True, EXEMPT_VIEW_PERMISSIONS=['*'])
|
||||
def test_v1_token_valid(self):
|
||||
# Create a v1 token
|
||||
token = Token.objects.create(version=1, user=self.user)
|
||||
|
||||
# Valid token should return a 200
|
||||
token = Token.objects.create(user=self.user)
|
||||
response = self.client.get(url, HTTP_AUTHORIZATION=f'Token {token.key}')
|
||||
self.assertEqual(response.status_code, 200)
|
||||
header = f'Token {token.token}'
|
||||
response = self.client.get(reverse('dcim-api:site-list'), HTTP_AUTHORIZATION=header)
|
||||
self.assertEqual(response.status_code, 200, response.data)
|
||||
|
||||
# Check that the token's last_used time has been updated
|
||||
token.refresh_from_db()
|
||||
self.assertIsNotNone(token.last_used)
|
||||
|
||||
@override_settings(LOGIN_REQUIRED=True, EXEMPT_VIEW_PERMISSIONS=['*'])
|
||||
def test_v1_token_invalid(self):
|
||||
# Invalid token should return a 403
|
||||
header = 'Token XXXXXXXXXX'
|
||||
response = self.client.get(reverse('dcim-api:site-list'), HTTP_AUTHORIZATION=header)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
self.assertEqual(response.data['detail'], "Invalid v1 token")
|
||||
|
||||
@override_settings(LOGIN_REQUIRED=True, EXEMPT_VIEW_PERMISSIONS=['*'])
|
||||
def test_v2_token_valid(self):
|
||||
# Create a v2 token
|
||||
token = Token.objects.create(version=2, user=self.user)
|
||||
|
||||
# Valid token should return a 200
|
||||
header = f'Bearer {TOKEN_PREFIX}{token.key}.{token.token}'
|
||||
response = self.client.get(reverse('dcim-api:site-list'), HTTP_AUTHORIZATION=header)
|
||||
self.assertEqual(response.status_code, 200, response.data)
|
||||
|
||||
# Check that the token's last_used time has been updated
|
||||
token.refresh_from_db()
|
||||
self.assertIsNotNone(token.last_used)
|
||||
|
||||
@override_settings(LOGIN_REQUIRED=True, EXEMPT_VIEW_PERMISSIONS=['*'])
|
||||
def test_v2_token_invalid(self):
|
||||
# Invalid token should return a 403
|
||||
header = f'Bearer {TOKEN_PREFIX}XXXXXX.XXXXXXXXXX'
|
||||
response = self.client.get(reverse('dcim-api:site-list'), HTTP_AUTHORIZATION=header)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
self.assertEqual(response.data['detail'], "Invalid v2 token")
|
||||
|
||||
@override_settings(LOGIN_REQUIRED=True, EXEMPT_VIEW_PERMISSIONS=['*'])
|
||||
def test_token_expiration(self):
|
||||
url = reverse('dcim-api:site-list')
|
||||
|
||||
# Request without a non-expired token should succeed
|
||||
token = Token.objects.create(user=self.user)
|
||||
response = self.client.get(url, HTTP_AUTHORIZATION=f'Token {token.key}')
|
||||
# Create v1 & v2 tokens
|
||||
future = datetime.datetime(2100, 1, 1, tzinfo=datetime.timezone.utc)
|
||||
token1 = Token.objects.create(version=1, user=self.user, expires=future)
|
||||
token2 = Token.objects.create(version=2, user=self.user, expires=future)
|
||||
|
||||
# Request with a non-expired token should succeed
|
||||
response = self.client.get(url, HTTP_AUTHORIZATION=f'Token {token1.token}')
|
||||
self.assertEqual(response.status_code, 200)
|
||||
response = self.client.get(url, HTTP_AUTHORIZATION=f'Bearer {TOKEN_PREFIX}{token2.key}.{token2.token}')
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
# Request with an expired token should fail
|
||||
token.expires = datetime.datetime(2020, 1, 1, tzinfo=datetime.timezone.utc)
|
||||
token.save()
|
||||
response = self.client.get(url, HTTP_AUTHORIZATION=f'Token {token.key}')
|
||||
past = datetime.datetime(2020, 1, 1, tzinfo=datetime.timezone.utc)
|
||||
token1.expires = past
|
||||
token1.save()
|
||||
token2.expires = past
|
||||
token2.save()
|
||||
response = self.client.get(url, HTTP_AUTHORIZATION=f'Token {token1.key}')
|
||||
self.assertEqual(response.status_code, 403)
|
||||
response = self.client.get(url, HTTP_AUTHORIZATION=f'Bearer {TOKEN_PREFIX}{token2.key}')
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
@override_settings(LOGIN_REQUIRED=True, EXEMPT_VIEW_PERMISSIONS=['*'])
|
||||
def test_token_write_enabled(self):
|
||||
url = reverse('dcim-api:site-list')
|
||||
data = {
|
||||
'name': 'Site 1',
|
||||
'slug': 'site-1',
|
||||
}
|
||||
data = [
|
||||
{
|
||||
'name': 'Site 1',
|
||||
'slug': 'site-1',
|
||||
},
|
||||
{
|
||||
'name': 'Site 2',
|
||||
'slug': 'site-2',
|
||||
},
|
||||
]
|
||||
self.add_permissions('dcim.view_site', 'dcim.add_site')
|
||||
|
||||
# Request with a write-disabled token should fail
|
||||
token = Token.objects.create(user=self.user, write_enabled=False)
|
||||
response = self.client.post(url, data, format='json', HTTP_AUTHORIZATION=f'Token {token.key}')
|
||||
# Create v1 & v2 tokens
|
||||
token1 = Token.objects.create(version=1, user=self.user, write_enabled=False)
|
||||
token2 = Token.objects.create(version=2, user=self.user, write_enabled=False)
|
||||
|
||||
token1_header = f'Token {token1.token}'
|
||||
token2_header = f'Bearer {TOKEN_PREFIX}{token2.key}.{token2.token}'
|
||||
|
||||
# GET request with a write-disabled token should succeed
|
||||
response = self.client.get(url, HTTP_AUTHORIZATION=token1_header)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
response = self.client.get(url, HTTP_AUTHORIZATION=token2_header)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
# POST request with a write-disabled token should fail
|
||||
response = self.client.post(url, data[0], format='json', HTTP_AUTHORIZATION=token1_header)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
response = self.client.post(url, data[1], format='json', HTTP_AUTHORIZATION=token2_header)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
# Request with a write-enabled token should succeed
|
||||
token.write_enabled = True
|
||||
token.save()
|
||||
response = self.client.post(url, data, format='json', HTTP_AUTHORIZATION=f'Token {token.key}')
|
||||
self.assertEqual(response.status_code, 403)
|
||||
# POST request with a write-enabled token should succeed
|
||||
token1.write_enabled = True
|
||||
token1.save()
|
||||
token2.write_enabled = True
|
||||
token2.save()
|
||||
response = self.client.post(url, data[0], format='json', HTTP_AUTHORIZATION=token1_header)
|
||||
self.assertEqual(response.status_code, 201)
|
||||
response = self.client.post(url, data[1], format='json', HTTP_AUTHORIZATION=token2_header)
|
||||
self.assertEqual(response.status_code, 201)
|
||||
|
||||
@override_settings(LOGIN_REQUIRED=True, EXEMPT_VIEW_PERMISSIONS=['*'])
|
||||
def test_token_allowed_ips(self):
|
||||
url = reverse('dcim-api:site-list')
|
||||
|
||||
# Create v1 & v2 tokens
|
||||
token1 = Token.objects.create(version=1, user=self.user, allowed_ips=['192.0.2.0/24'])
|
||||
token2 = Token.objects.create(version=2, user=self.user, allowed_ips=['192.0.2.0/24'])
|
||||
|
||||
# Request from a non-allowed client IP should fail
|
||||
token = Token.objects.create(user=self.user, allowed_ips=['192.0.2.0/24'])
|
||||
response = self.client.get(url, HTTP_AUTHORIZATION=f'Token {token.key}', REMOTE_ADDR='127.0.0.1')
|
||||
response = self.client.get(
|
||||
url,
|
||||
HTTP_AUTHORIZATION=f'Token {token1.token}',
|
||||
REMOTE_ADDR='127.0.0.1'
|
||||
)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
response = self.client.get(
|
||||
url,
|
||||
HTTP_AUTHORIZATION=f'Bearer {TOKEN_PREFIX}{token2.key}.{token2.token}',
|
||||
REMOTE_ADDR='127.0.0.1'
|
||||
)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
# Request with an expired token should fail
|
||||
response = self.client.get(url, HTTP_AUTHORIZATION=f'Token {token.key}', REMOTE_ADDR='192.0.2.1')
|
||||
# Request from an allowed client IP should succeed
|
||||
response = self.client.get(
|
||||
url,
|
||||
HTTP_AUTHORIZATION=f'Token {token1.token}',
|
||||
REMOTE_ADDR='192.0.2.1'
|
||||
)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
response = self.client.get(
|
||||
url,
|
||||
HTTP_AUTHORIZATION=f'Bearer {TOKEN_PREFIX}{token2.key}.{token2.token}',
|
||||
REMOTE_ADDR='192.0.2.1'
|
||||
)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
|
||||
@@ -427,7 +520,7 @@ class ObjectPermissionAPIViewTestCase(TestCase):
|
||||
"""
|
||||
self.user = User.objects.create(username='testuser')
|
||||
self.token = Token.objects.create(user=self.user)
|
||||
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(self.token.key)}
|
||||
self.header = {'HTTP_AUTHORIZATION': f'Bearer {TOKEN_PREFIX}{self.token.key}.{self.token.token}'}
|
||||
|
||||
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
|
||||
def test_get_object(self):
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user