diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml
index 44c797241..5cfc8684d 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yaml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yaml
@@ -1,7 +1,7 @@
---
name: 🐛 Bug Report
description: Report a reproducible bug in the current release of NetBox
-labels: ["type: bug", "needs triage"]
+labels: ["type: bug", "status: needs triage"]
body:
- type: markdown
attributes:
@@ -26,7 +26,7 @@ body:
attributes:
label: NetBox Version
description: What version of NetBox are you currently running?
- placeholder: v3.7.4
+ placeholder: v4.0.5
validations:
required: true
- type: dropdown
@@ -34,10 +34,9 @@ body:
label: Python Version
description: What version of Python are you currently running?
options:
- - "3.8"
- - "3.9"
- "3.10"
- "3.11"
+ - "3.12"
validations:
required: true
- type: textarea
diff --git a/.github/ISSUE_TEMPLATE/documentation_change.yaml b/.github/ISSUE_TEMPLATE/documentation_change.yaml
index 0f80f1716..b5a970782 100644
--- a/.github/ISSUE_TEMPLATE/documentation_change.yaml
+++ b/.github/ISSUE_TEMPLATE/documentation_change.yaml
@@ -1,7 +1,7 @@
---
name: 📖 Documentation Change
description: Suggest an addition or modification to the NetBox documentation
-labels: ["type: documentation", "needs triage"]
+labels: ["type: documentation", "status: needs triage"]
body:
- type: dropdown
attributes:
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml
index a550bff57..c351ec599 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.yaml
+++ b/.github/ISSUE_TEMPLATE/feature_request.yaml
@@ -1,7 +1,7 @@
---
name: ✨ Feature Request
description: Propose a new NetBox feature or enhancement
-labels: ["type: feature", "needs triage"]
+labels: ["type: feature", "status: needs triage"]
body:
- type: markdown
attributes:
@@ -14,7 +14,7 @@ body:
attributes:
label: NetBox version
description: What version of NetBox are you currently running?
- placeholder: v3.7.4
+ placeholder: v4.0.5
validations:
required: true
- type: dropdown
diff --git a/.github/workflows/auto-assign-issue.yml b/.github/workflows/auto-assign-issue.yml
index 20e054806..309f79800 100644
--- a/.github/workflows/auto-assign-issue.yml
+++ b/.github/workflows/auto-assign-issue.yml
@@ -12,9 +12,10 @@ jobs:
auto-assign:
runs-on: ubuntu-latest
steps:
- - uses: pozil/auto-assign-issue@v1
- if: "contains(github.event.issue.labels.*.name, 'type: bug') || contains(github.event.issue.labels.*.name, 'type: feature')"
+ - uses: pozil/auto-assign-issue@v2
+ if: "contains(github.event.issue.labels.*.name, 'status: needs triage')"
with:
- assignees: abhi1693,arthanson,DanSheps,jeffgdotorg,jeremystretch
+ # Weighted assignments
+ assignees: arthanson:3, jeffgdotorg:3, jeremystretch:3, DanSheps
numOfAssignee: 1
abortIfPreviousAssignees: true
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d794786f1..a84359bf9 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,7 +1,18 @@
name: CI
-on: [push, pull_request]
+
+on:
+ push:
+ paths-ignore:
+ - 'contrib/**'
+ - 'docs/**'
+ pull_request:
+ paths-ignore:
+ - 'contrib/**'
+ - 'docs/**'
+
permissions:
contents: read
+
jobs:
build:
runs-on: ubuntu-latest
@@ -34,12 +45,12 @@ jobs:
uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Use Node.js ${{ matrix.node-version }}
- uses: actions/setup-node@v3
+ uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node-version }}
@@ -47,7 +58,7 @@ jobs:
run: npm install -g yarn
- name: Setup Node.js with Yarn Caching
- uses: actions/setup-node@v3
+ uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node-version }}
cache: yarn
diff --git a/.github/workflows/close-incomplete-issues.yml b/.github/workflows/close-incomplete-issues.yml
new file mode 100644
index 000000000..4d31d735e
--- /dev/null
+++ b/.github/workflows/close-incomplete-issues.yml
@@ -0,0 +1,32 @@
+# close-stale-issues (https://github.com/marketplace/actions/close-stale-issues)
+name: Close incomplete issues
+
+on:
+ schedule:
+ - cron: '15 4 * * *'
+ workflow_dispatch:
+
+permissions:
+ actions: write
+ issues: write
+
+jobs:
+ stale:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/stale@v9
+ with:
+ close-issue-message: >
+ This issue is being closed as no further information has been provided. If
+ you would like to revisit this topic, please first modify your original post
+ to include all the requested detail, and then ask that the issue be reopened.
+ days-before-stale: 7
+ days-before-close: 7
+ only-issue-labels: 'status: revisions needed'
+ operations-per-run: 100
+ remove-stale-when-updated: false
+ stale-issue-label: 'pending closure'
+ stale-issue-message: >
+ This is a reminder that additional information is needed in order to further
+ triage this issue. If the requested details are not provided, the issue will
+ soon be closed automatically.
diff --git a/.github/workflows/close-stale-issues.yml b/.github/workflows/close-stale-issues.yml
index a1bbb0b7e..b02ffdacd 100644
--- a/.github/workflows/close-stale-issues.yml
+++ b/.github/workflows/close-stale-issues.yml
@@ -7,6 +7,7 @@ on:
workflow_dispatch:
permissions:
+ actions: write
issues: write
pull-requests: write
@@ -16,18 +17,19 @@ jobs:
steps:
- uses: actions/stale@v9
with:
+ # General parameters
+ operations-per-run: 100
+ remove-stale-when-updated: false
+
+ # Issue parameters
close-issue-message: >
This issue has been automatically closed due to lack of activity. In an
effort to reduce noise, please do not comment any further. Note that the
core maintainers may elect to reopen this issue at a later date if deemed
necessary.
- close-pr-message: >
- This PR has been automatically closed due to lack of activity.
- days-before-stale: 90
- days-before-close: 30
- exempt-issue-labels: 'status: accepted,status: blocked,status: needs milestone'
- operations-per-run: 100
- remove-stale-when-updated: false
+ days-before-issue-stale: 90
+ days-before-issue-close: 30
+ exempt-issue-labels: 'status: accepted,status: backlog,status: blocked'
stale-issue-label: 'pending closure'
stale-issue-message: >
This issue has been automatically marked as stale because it has not had
@@ -37,6 +39,12 @@ jobs:
process by "bumping" the issue; doing so will result in its immediate closure
and you may be barred from participating in any future discussions. Please see
our [contributing guide](https://github.com/netbox-community/netbox/blob/develop/CONTRIBUTING.md).
+
+ # Pull request parameters
+ close-pr-message: >
+ This PR has been automatically closed due to lack of activity.
+ days-before-pr-stale: 15
+ days-before-pr-close: 15
stale-pr-label: 'pending closure'
stale-pr-message: >
This PR has been automatically marked as stale because it has not had
diff --git a/.github/workflows/update-translation-strings.yml b/.github/workflows/update-translation-strings.yml
new file mode 100644
index 000000000..bcd68c887
--- /dev/null
+++ b/.github/workflows/update-translation-strings.yml
@@ -0,0 +1,45 @@
+name: Update translation strings
+
+on:
+ schedule:
+ - cron: '0 5 * * *'
+ workflow_dispatch:
+
+permissions:
+ contents: write
+
+env:
+ LOCALE: "en"
+
+jobs:
+ makemessages:
+ runs-on: ubuntu-latest
+ env:
+ NETBOX_CONFIGURATION: netbox.configuration_testing
+
+ steps:
+ - name: Check out repo
+ uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: 3.11
+
+ - name: Install system dependencies
+ run: sudo apt install -y gettext
+
+ - name: Install Python dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+
+ - name: Run makemessages
+ run: python netbox/manage.py makemessages -l ${{ env.LOCALE }}
+
+ - name: Commit changes
+ uses: EndBug/add-and-commit@v9
+ with:
+ add: 'netbox/translations/'
+ default_author: github_actions
+ message: 'Update source translation strings'
diff --git a/.gitignore b/.gitignore
index 93954fd41..88faab27c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -17,9 +17,11 @@ yarn-error.log*
/venv/
/*.sh
local_requirements.txt
+local_settings.py
!upgrade.sh
fabfile.py
gunicorn.py
+uwsgi.ini
netbox.log
netbox.pid
.DS_Store
diff --git a/README.md b/README.md
index 8d2efed23..4d21003b5 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,7 @@
-
+
-
-
-
-
-
Stats via Repography
-
diff --git a/base_requirements.txt b/base_requirements.txt
index 7c12e8871..9912f1d6b 100644
--- a/base_requirements.txt
+++ b/base_requirements.txt
@@ -131,9 +131,8 @@ social-auth-app-django
strawberry-graphql
# Strawberry GraphQL Django extension
-# https://github.com/strawberry-graphql/strawberry-django/blob/main/CHANGELOG.md
-# Pinned per #15574
-strawberry-graphql-django==0.34.0
+# https://github.com/strawberry-graphql/strawberry-django/releases
+strawberry-graphql-django
# SVG image rendering (used for rack elevations)
# https://github.com/mozman/svgwrite/blob/master/NEWS.rst
diff --git a/contrib/generated_schema.json b/contrib/generated_schema.json
index b6632dd4c..deda2b821 100644
--- a/contrib/generated_schema.json
+++ b/contrib/generated_schema.json
@@ -179,6 +179,9 @@
"usb-micro-ab",
"usb-3-b",
"usb-3-micro-b",
+ "molex-micro-fit-1x2",
+ "molex-micro-fit-2x2",
+ "molex-micro-fit-2x4",
"dc-terminal",
"saf-d-grid",
"neutrik-powercon-20",
@@ -281,6 +284,9 @@
"usb-a",
"usb-micro-b",
"usb-c",
+ "molex-micro-fit-1x2",
+ "molex-micro-fit-2x2",
+ "molex-micro-fit-2x4",
"dc-terminal",
"hdot-cx",
"saf-d-grid",
@@ -317,6 +323,7 @@
"100base-tx",
"100base-t1",
"1000base-t",
+ "1000base-tx",
"2.5gbase-t",
"5gbase-t",
"10gbase-t",
@@ -353,6 +360,8 @@
"800gbase-x-qsfpdd",
"800gbase-x-osfp",
"1000base-kx",
+ "2.5gbase-kx",
+ "5gbase-kr",
"10gbase-kr",
"10gbase-kx4",
"25gbase-kr",
@@ -373,6 +382,8 @@
"gsm",
"cdma",
"lte",
+ "4g",
+ "5g",
"sonet-oc3",
"sonet-oc12",
"sonet-oc48",
@@ -406,12 +417,15 @@
"e3",
"xdsl",
"docsis",
+ "bpon",
+ "epon",
+ "10g-epon",
"gpon",
"xg-pon",
"xgs-pon",
"ng-pon2",
- "epon",
- "10g-epon",
+ "25g-pon",
+ "50g-pon",
"cisco-stackwise",
"cisco-stackwise-plus",
"cisco-flexstack",
diff --git a/contrib/gunicorn.py b/contrib/gunicorn.py
index 89d6943b4..4b2b7c6b0 100644
--- a/contrib/gunicorn.py
+++ b/contrib/gunicorn.py
@@ -14,3 +14,7 @@ timeout = 120
# The maximum number of requests a worker can handle before being respawned
max_requests = 5000
max_requests_jitter = 500
+
+# Uncomment this line to accept HTTP headers containing underscores, e.g. for remote
+# authentication support. See https://docs.gunicorn.org/en/stable/settings.html#header-map
+# header-map = 'dangerous'
diff --git a/contrib/uwsgi.ini b/contrib/uwsgi.ini
index d64803158..a8bedc1d7 100644
--- a/contrib/uwsgi.ini
+++ b/contrib/uwsgi.ini
@@ -11,8 +11,24 @@ master = true
; clear environment on exit
vacuum = true
+; make SIGTERM stop the app (instead of reload)
+die-on-term = true
+
; exit if no app can be loaded
need-app = true
; do not use multiple interpreters
single-interpreter = true
+
+; change to the project directory
+chdir = netbox
+
+; specify the WSGI module to load
+module = netbox.wsgi
+
+; workaround to make uWSGI reloads work with pyuwsgi (not to be used if using uwsgi package instead)
+binary-path = venv/bin/python
+
+; only log internal messages and errors (reverse proxy already logs the requests)
+disable-logging = true
+log-5xx = true
diff --git a/docs/_theme/main.html b/docs/_theme/main.html
index 3ff44b9cb..99907bf42 100644
--- a/docs/_theme/main.html
+++ b/docs/_theme/main.html
@@ -2,8 +2,8 @@
{% block site_meta %}
{{ super() }}
- {# Disable search indexing unless we're building for ReadTheDocs (see #10496) #}
- {% if page.canonical_url != 'https://docs.netbox.dev/' %}
+ {# Disable search indexing unless we're building for public consumption #}
+ {% if not config.extra.build_public %}
{% endif %}
{% endblock %}
diff --git a/docs/administration/authentication/overview.md b/docs/administration/authentication/overview.md
index 3a3b9efc2..a6c3a3159 100644
--- a/docs/administration/authentication/overview.md
+++ b/docs/administration/authentication/overview.md
@@ -26,7 +26,10 @@ REMOTE_AUTH_BACKEND = 'netbox.authentication.RemoteUserBackend'
Another option for remote authentication in NetBox is to enable HTTP header-based user assignment. The front end HTTP server (e.g. nginx or Apache) performs client authentication as a process external to NetBox, and passes information about the authenticated user via HTTP headers. By default, the user is assigned via the `REMOTE_USER` header, but this can be customized via the `REMOTE_AUTH_HEADER` configuration parameter.
-Optionally, user profile information can be supplied by `REMOTE_USER_FIRST_NAME`, `REMOTE_USER_LAST_NAME` and `REMOTE_USER_EMAIL` headers. These are saved to the users profile during the authentication process. These headers can be customized like the `REMOTE_USER` header.
+Optionally, user profile information can be supplied by `REMOTE_USER_FIRST_NAME`, `REMOTE_USER_LAST_NAME` and `REMOTE_USER_EMAIL` headers. These are saved to the user's profile during the authentication process. These headers can be customized like the `REMOTE_USER` header.
+
+!!! warning Verify Header Compatibility
+ Some WSGI servers may drop headers which contain unsupported characters. For instance, gunicorn v22.0 and later silently drops HTTP headers containing underscores. This behavior can be disabled by changing gunicorn's [`header_map`](https://docs.gunicorn.org/en/stable/settings.html#header-map) setting to `dangerous`.
### Single Sign-On (SSO)
diff --git a/docs/configuration/date-time.md b/docs/configuration/date-time.md
deleted file mode 100644
index a23053e08..000000000
--- a/docs/configuration/date-time.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Date & Time Parameters
-
-## TIME_ZONE
-
-Default: UTC
-
-The time zone NetBox will use when dealing with dates and times. It is recommended to use UTC time unless you have a specific need to use a local time zone. Please see the [list of available time zones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones).
-
-## Date and Time Formatting
-
-You may define custom formatting for date and times. For detailed instructions on writing format strings, please see [the Django documentation](https://docs.djangoproject.com/en/stable/ref/templates/builtins/#date). Default formats are listed below.
-
-!!! note
- These system defaults will be overridden by a user's selected language/locale when [localization](./system.md#enable_localization) is enabled.
-
-```python
-DATE_FORMAT = 'N j, Y' # June 26, 2016
-SHORT_DATE_FORMAT = 'Y-m-d' # 2016-06-26
-TIME_FORMAT = 'g:i a' # 1:23 p.m.
-SHORT_TIME_FORMAT = 'H:i:s' # 13:23:00
-DATETIME_FORMAT = 'N j, Y g:i a' # June 26, 2016 1:23 p.m.
-SHORT_DATETIME_FORMAT = 'Y-m-d H:i' # 2016-06-26 13:23
-```
diff --git a/docs/configuration/miscellaneous.md b/docs/configuration/miscellaneous.md
index 7e68bcee7..1f0a2781b 100644
--- a/docs/configuration/miscellaneous.md
+++ b/docs/configuration/miscellaneous.md
@@ -33,9 +33,6 @@ This defines custom content to be displayed on the login page above the login fo
!!! tip "Dynamic Configuration Parameter"
-!!! note
- This parameter was added in NetBox v3.5.
-
This adds a banner to the top of every page when maintenance mode is enabled. HTML is allowed.
---
@@ -115,9 +112,6 @@ Default: True
By default, NetBox will prevent the creation of duplicate prefixes and IP addresses in the global table (that is, those which are not assigned to any VRF). This validation can be disabled by setting `ENFORCE_GLOBAL_UNIQUE` to False.
-!!! info "Changed in v3.7"
- The default value for this parameter was changed from False to True in NetBox v3.7.
-
---
## FILE_UPLOAD_MAX_MEMORY_SIZE
@@ -142,9 +136,6 @@ Setting this to False will disable the GraphQL API.
!!! tip "Dynamic Configuration Parameter"
-!!! note
- This parameter was renamed from `JOBRESULT_RETENTION` in NetBox v3.5.
-
Default: 90
The number of days to retain job results (scripts and reports). Set this to `0` to retain job results in the database indefinitely.
@@ -239,9 +230,6 @@ The maximum execution time of a background task (such as running a custom script
## RQ_RETRY_INTERVAL
-!!! note
- This parameter was added in NetBox v3.5.
-
Default: `60`
This parameter controls how frequently a failed job is retried, up to the maximum number of times specified by `RQ_RETRY_MAX`. This must be either an integer specifying the number of seconds to wait between successive attempts, or a list of such values. For example, `[60, 300, 3600]` will retry the task after 1 minute, 5 minutes, and 1 hour.
@@ -250,9 +238,6 @@ This parameter controls how frequently a failed job is retried, up to the maximu
## RQ_RETRY_MAX
-!!! note
- This parameter was added in NetBox v3.5.
-
Default: `0` (retries disabled)
The maximum number of times a background task will be retried before being marked as failed.
diff --git a/docs/configuration/remote-authentication.md b/docs/configuration/remote-authentication.md
index e7fe56a09..5f28d987f 100644
--- a/docs/configuration/remote-authentication.md
+++ b/docs/configuration/remote-authentication.md
@@ -85,6 +85,9 @@ Default: `'HTTP_REMOTE_USER'`
When remote user authentication is in use, this is the name of the HTTP header which informs NetBox of the currently authenticated user. For example, to use the request header `X-Remote-User` it needs to be set to `HTTP_X_REMOTE_USER`. (Requires `REMOTE_AUTH_ENABLED`.)
+!!! warning Verify Header Compatibility
+ Some WSGI servers may drop headers which contain unsupported characters. For instance, gunicorn v22.0 and later silently drops HTTP headers containing underscores. This behavior can be disabled by changing gunicorn's [`header_map`](https://docs.gunicorn.org/en/stable/settings.html#header-map) setting to `dangerous`.
+
---
## REMOTE_AUTH_USER_EMAIL
diff --git a/docs/configuration/required-parameters.md b/docs/configuration/required-parameters.md
index bda365995..90eb8c0cf 100644
--- a/docs/configuration/required-parameters.md
+++ b/docs/configuration/required-parameters.md
@@ -94,15 +94,25 @@ REDIS = {
}
```
-!!! note
- If you are upgrading from a NetBox release older than v2.7.0, please note that the Redis connection configuration
- settings have changed. Manual modification to bring the `REDIS` section inline with the above specification is
- necessary
-
!!! warning
It is highly recommended to keep the task and cache databases separate. Using the same database number on the
same Redis instance for both may result in queued background tasks being lost during cache flushing events.
+### UNIX Socket Support
+
+Redis may alternatively be configured by specifying a complete URL instead of individual components. This approach supports the use of a UNIX socket connection. For example:
+
+```python
+REDIS = {
+ 'tasks': {
+ 'URL': 'unix:///run/redis-netbox/redis.sock?db=0'
+ },
+ 'caching': {
+ 'URL': 'unix:///run/redis-netbox/redis.sock?db=1'
+ },
+}
+```
+
### Using Redis Sentinel
If you are using [Redis Sentinel](https://redis.io/topics/sentinel) for high-availability purposes, there is minimal
diff --git a/docs/configuration/security.md b/docs/configuration/security.md
index a21b82a04..15702f649 100644
--- a/docs/configuration/security.md
+++ b/docs/configuration/security.md
@@ -159,9 +159,12 @@ Note that enabling this setting causes NetBox to update a user's session in the
## LOGIN_REQUIRED
-Default: False
+Default: True
-Setting this to True will permit only authenticated users to access any part of NetBox. By default, anonymous users are permitted to access most data in NetBox but not make any changes.
+When enabled, only authenticated users are permitted to access any part of NetBox. Disabling this will allow unauthenticated users to access most areas of NetBox (but not make any changes).
+
+!!! info "Changed in NetBox v4.0.2"
+ Prior to NetBox v4.0.2, this setting was disabled by default.
---
@@ -181,6 +184,30 @@ The view name or URL to which a user is redirected after logging out.
---
+## SECURE_HSTS_INCLUDE_SUBDOMAINS
+
+Default: False
+
+If true, the `includeSubDomains` directive will be included in the HTTP Strict Transport Security (HSTS) header. This directive instructs the browser to apply the HSTS policy to all subdomains of the current domain.
+
+---
+
+## SECURE_HSTS_PRELOAD
+
+Default: False
+
+If true, the `preload` directive will be included in the HTTP Strict Transport Security (HSTS) header. This directive instructs the browser to preload the site in HTTPS. Browsers that use the HSTS preload list will force the site to be accessed via HTTPS even if the user types HTTP in the address bar.
+
+---
+
+## SECURE_HSTS_SECONDS
+
+Default: 0
+
+If set to a non-zero integer value, the SecurityMiddleware sets the HTTP Strict Transport Security (HSTS) header on all responses that do not already have it. This will instruct the browser that the website must be accessed via HTTPS, blocking any HTTP request.
+
+---
+
## SECURE_SSL_REDIRECT
Default: False
diff --git a/docs/configuration/system.md b/docs/configuration/system.md
index 806839778..a1e0ebb17 100644
--- a/docs/configuration/system.md
+++ b/docs/configuration/system.md
@@ -16,10 +16,7 @@ BASE_PATH = 'netbox/'
Default: `en-us` (US English)
-Defines the default preferred language/locale for requests that do not specify one. This is used to alter e.g. the display of dates and numbers to fit the user's locale. See [this list](http://www.i18nguy.com/unicode/language-identifiers.html) of standard language codes. (This parameter maps to Django's [`LANGUAGE_CODE`](https://docs.djangoproject.com/en/stable/ref/settings/#language-code) internal setting.)
-
-!!! note
- Altering this parameter will *not* change the language used in NetBox. We hope to provide translation support in a future NetBox release.
+Defines the default preferred language/locale for requests that do not specify one. (This parameter maps to Django's [`LANGUAGE_CODE`](https://docs.djangoproject.com/en/stable/ref/settings/#language-code) internal setting.)
---
@@ -65,14 +62,6 @@ Email is sent from NetBox only for critical events or if configured for [logging
---
-## ENABLE_LOCALIZATION
-
-Default: False
-
-Determines if localization features are enabled or not. This should only be enabled for development or testing purposes as netbox is not yet fully localized. Turning this on will localize numeric and date formats (overriding any configured [system defaults](./date-time.md#date-and-time-formatting)) based on the browser locale as well as translate certain strings from third party modules.
-
----
-
## HTTP_PROXIES
Default: None
@@ -203,3 +192,17 @@ A dictionary of configuration parameters for the storage backend configured as `
If `STORAGE_BACKEND` is not defined, this setting will be ignored.
---
+
+## TIME_ZONE
+
+Default: UTC
+
+The time zone NetBox will use when dealing with dates and times. It is recommended to use UTC time unless you have a specific need to use a local time zone. Please see the [list of available time zones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones).
+
+---
+
+## TRANSLATION_ENABLED
+
+Default: True
+
+Enables language translation for the user interface. (This parameter maps to Django's [USE_I18N](https://docs.djangoproject.com/en/stable/ref/settings/#std-setting-USE_I18N) setting.)
diff --git a/docs/customization/custom-fields.md b/docs/customization/custom-fields.md
index e9ff7bd9f..1f9a4a8bf 100644
--- a/docs/customization/custom-fields.md
+++ b/docs/customization/custom-fields.md
@@ -42,8 +42,6 @@ This parameter has no effect on the API representation of custom field data.
### Visibility & Editing
-!!! info "This feature was improved in NetBox v3.7."
-
When creating a custom field, users can control the conditions under which it may be displayed and edited within the NetBox user interface. The following choices are available for controlling the display of a custom field on an object:
* **Always** (default): The custom field is included when viewing an object.
diff --git a/docs/customization/custom-scripts.md b/docs/customization/custom-scripts.md
index bdc3f9104..2a8f252aa 100644
--- a/docs/customization/custom-scripts.md
+++ b/docs/customization/custom-scripts.md
@@ -65,12 +65,6 @@ class AnotherCustomScript(Script):
script_order = (MyCustomScript, AnotherCustomScript)
```
-## Module Attributes
-
-### `name`
-
-You can define `name` within a script module (the Python file which contains one or more scripts) to set the module name. If `name` is not defined, the module's file name will be used.
-
## Script Attributes
Script attributes are defined under a class named `Meta` within the script. These are optional, but encouraged.
@@ -371,6 +365,14 @@ An IPv4 or IPv6 network with a mask. Returns a `netaddr.IPNetwork` object. Two a
* `min_prefix_length` - Minimum length of the mask
* `max_prefix_length` - Maximum length of the mask
+### DateVar
+
+A calendar date. Returns a `datetime.date` object.
+
+### DateTimeVar
+
+A complete date & time. Returns a `datetime.datetime` object.
+
## Running Custom Scripts
!!! note
diff --git a/docs/development/adding-models.md b/docs/development/adding-models.md
index 7de897a97..823789641 100644
--- a/docs/development/adding-models.md
+++ b/docs/development/adding-models.md
@@ -77,7 +77,7 @@ Create the following for each model:
## 13. GraphQL API components
-Create a Graphene object type for the model in `graphql/types.py` by subclassing the appropriate class from `netbox.graphql.types`.
+Create a GraphQL object type for the model in `graphql/types.py` by subclassing the appropriate class from `netbox.graphql.types`.
Also extend the schema class defined in `graphql/schema.py` with the individual object and object list fields per the established convention.
diff --git a/docs/development/release-checklist.md b/docs/development/release-checklist.md
index 2af640546..4f6e2f25f 100644
--- a/docs/development/release-checklist.md
+++ b/docs/development/release-checklist.md
@@ -59,7 +59,7 @@ Notify the [`netbox-docker`](https://github.com/netbox-community/netbox-docker)
* Increases in minimum versions for service dependencies (PostgreSQL, Redis, etc.)
* Any changes to the reference installation
-### Update Requirements
+### Update Python Dependencies
Before each release, update each of NetBox's Python dependencies to its most recent stable version. These are defined in `requirements.txt`, which is updated from `base_requirements.txt` using `pip`. To do this:
@@ -70,6 +70,10 @@ Before each release, update each of NetBox's Python dependencies to its most rec
In cases where upgrading a dependency to its most recent release is breaking, it should be constrained to its current minor version in `base_requirements.txt` with an explanatory comment and revisited for the next major NetBox release (see the [Address Constrained Dependencies](#address-constrained-dependencies) section above).
+### Update UI Dependencies
+
+Check whether any UI dependencies (JavaScript packages, fonts, etc.) need to be updated by running `yarn outdated` from within the `project-static/` directory. [Upgrade these dependencies](./web-ui.md#updating-dependencies) as necessary, then run `yarn bundle` to generate the necessary files for distribution.
+
### Rebuild the Device Type Definition Schema
Run the following command to update the device type definition validation schema:
@@ -82,15 +86,7 @@ This will automatically update the schema file at `contrib/generated_schema.json
### Update & Compile Translations
-Log into [Transifex](https://app.transifex.com/netbox-community/netbox/dashboard/) to download the updated string maps. Download the resource (portable object, or `.po`) file for each language and save them to `netbox/translations/$lang/LC_MESSAGES/django.po`, overwriting the current files. (Be sure to click the **Download for use** link.)
-
-
-
-Once the resource files for all languages have been updated, compile the machine object (`.mo`) files using the `compilemessages` management command:
-
-```nohighlight
-./manage.py compilemessages
-```
+Updated language translations should be pulled from [Transifex](https://app.transifex.com/netbox-community/netbox/dashboard/) and re-compiled for each new release. Follow the documented process for [updating translated strings](./translations.md#updating-translated-strings) to do this.
### Update Version and Changelog
diff --git a/docs/development/translations.md b/docs/development/translations.md
index e40f996c5..b23e89d71 100644
--- a/docs/development/translations.md
+++ b/docs/development/translations.md
@@ -6,17 +6,38 @@ All language translations in NetBox are generated from the source file found at
Reviewers log into Transifex and navigate to their designated language(s) to translate strings. The initial translation for most strings will be machine-generated via the AWS Translate service. Human reviewers are responsible for reviewing these translations and making corrections where necessary.
-Immediately prior to each NetBox release, the translation maps for all completed languages will be downloaded from Transifex, compiled, and checked into the NetBox code base by a maintainer.
-
## Updating Translation Sources
-To update the English `.po` file from which all translations are derived, use the `makemessages` management command:
+To update the English `.po` file from which all translations are derived, use the `makemessages` management command (ignoring the `project-static/` directory):
```nohighlight
-./manage.py makemessages -l en
+./manage.py makemessages -l en -i "project-static/*"
```
-Then, commit the change and push to the `develop` branch on GitHub. After some time, any new strings will appear for translation on Transifex automatically.
+Then, commit the change and push to the `develop` branch on GitHub. Any new strings will appear for translation on Transifex automatically.
+
+## Updating Translated Strings
+
+Typically, translated strings need to be updated only as part of the NetBox [release process](./release-checklist.md).
+
+To update translated strings, start by initiating a sync from Transifex. From the Transifex dashboard, navigate to Settings > Integrations > GitHub > Manage, and click the **Manual Sync** button at top right.
+
+
+
+Enter a threshold percentage of 1 (to ensure all translations are captured) and select the `develop` branch, then click **Sync**. This will initiate a pull request to GitHub to update any newly modified translation (`.po`) files.
+
+!!! tip
+ The new PR should appear within a few minutes. If it does not, check that there are in fact new translations to be added.
+
+
+
+Once the PR has been merged, the updated strings need to be compiled into new `.mo` files so they can be used by the application. Update the `develop` branch locally to pull in the changes from the Transifex PR, then run Django's [`compilemessages`](https://docs.djangoproject.com/en/stable/ref/django-admin/#django-admin-compilemessages) management command:
+
+```nohighlight
+./manage.py compilemessages
+```
+
+Once any new `.mo` files have been generated, they need to be committed and pushed back up to GitHub. (Again, this is typically done as part of publishing a new NetBox release.)
## Proposing New Languages
diff --git a/docs/development/user-preferences.md b/docs/development/user-preferences.md
index ceb5321a9..deb469bfb 100644
--- a/docs/development/user-preferences.md
+++ b/docs/development/user-preferences.md
@@ -11,4 +11,3 @@ The `users.UserConfig` model holds individual preferences for each user in the f
| pagination.placement | Where to display the paginator controls relative to the table |
| tables.${table}.columns | The ordered list of columns to display when viewing the table |
| tables.${table}.ordering | A list of column names by which the table should be ordered |
-| ui.colormode | Light or dark mode in the user interface |
diff --git a/docs/development/web-ui.md b/docs/development/web-ui.md
index 8afbd96c1..b3b94bd06 100644
--- a/docs/development/web-ui.md
+++ b/docs/development/web-ui.md
@@ -1,25 +1,37 @@
# Web UI Development
+## Code Structure
+
+Most static resources for the NetBox UI are housed within the `netbox/project-static/` directory.
+
+| Path | Description |
+|-----------|----------------------------------------------------|
+| `dist/` | Destination path for installed dependencies |
+| `docs/` | Local build path for documentation |
+| `img/` | Image files |
+| `js/` | Miscellaneous JavaScript resources served directly |
+| `src/` | TypeScript resources (to be compiled into JS) |
+| `styles/` | Sass resources (to be compiled into CSS) |
+
## Front End Technologies
-The NetBox UI is built on languages and frameworks:
+Front end scripting is written in [TypeScript](https://www.typescriptlang.org/), which is a strongly-typed extension to JavaScript. TypeScript is "transpiled" into JavaScript resources which are served to and executed by the client web browser.
-### Styling & HTML Elements
+All UI styling is written in [Sass](https://sass-lang.com/), which is an extension to browser-native [Cascading Stylesheets (CSS)](https://developer.mozilla.org/en-US/docs/Web/CSS). Similar to how TypeScript content is transpiled to JavaScript, Sass resources (`.scss` files) are compiled to CSS.
-#### [Bootstrap](https://getbootstrap.com/) 5
+## Dependencies
-The majority of the NetBox UI is made up of stock Bootstrap components, with some styling modifications and custom components added on an as-needed basis. Bootstrap uses [Sass](https://sass-lang.com/), and NetBox extends Bootstrap's core Sass files for theming and customization.
+The following software is employed by the NetBox user interface.
-### Client-side Scripting
-
-#### [TypeScript](https://www.typescriptlang.org/)
-
-All client-side scripting is transpiled from TypeScript to JavaScript and served by Django. In development, TypeScript is an _extremely_ effective tool for accurately describing and checking the code, which leads to significantly fewer bugs, a better development experience, and more predictable/readable code.
-
-As part of the [bundling](#bundling) process, Bootstrap's JavaScript plugins are imported and bundled alongside NetBox's front-end code.
-
-!!! danger "NetBox is jQuery-free"
- Following the Bootstrap team's deprecation of jQuery in Bootstrap 5, NetBox also no longer uses jQuery in front-end code.
+* [Bootstrap 5](https://getbootstrap.com/) - A popular CSS & JS framework
+* [clipboard.js](https://clipboardjs.com/) - A lightweight package for enabling copy-to-clipboard functionality
+* [flatpickr](https://flatpickr.js.org/) - A lightweight date & time selection widget
+* [gridstack.js](https://gridstackjs.com/) - Enables interactive grid layouts (for the dashboard)
+* [HTMX](https://htmx.org/) - Enables dynamic web interfaces through the use of HTML element attributes
+* [Material Design Icons](https://pictogrammers.com/library/mdi/) - An extensive open source collection of graphical icons, delivered as a web font
+* [query-string](https://www.npmjs.com/package/query-string) - Assists with parsing URL query strings
+* [Tabler](https://tabler.io/) - A web application UI toolkit & theme based on Bootstrap 5
+* [Tom Select](https://tom-select.js.org/) - Provides dynamic selection form fields
## Guidance
@@ -54,6 +66,41 @@ $ yarn
!!! warning "Check Your Working Directory"
You need to be in the `netbox/project-static` directory to run the below `yarn` commands.
+### Updating Dependencies
+
+Run `yarn outdated` to identify outdated dependencies.
+
+```
+$ yarn outdated
+yarn outdated v1.22.19
+info Color legend :
+ " ` tags.
- **/
-MarkdownIt.prototype.renderInline = function (src, env) {
- env = env || {};
- return this.renderer.render(this.parseInline(src, env), this.options, env);
-};
-module.exports = MarkdownIt;
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/parser_block.js":
-/*!*************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/parser_block.js ***!
- \*************************************************************/
-/***/ (function(module, __unused_webpack_exports, __webpack_require__) {
-
-/** internal
- * class ParserBlock
- *
- * Block-level tokenizer.
- **/
-
-
-var Ruler = __webpack_require__(/*! ./ruler */ "../../../node_modules/markdown-it/lib/ruler.js");
-var _rules = [
-// First 2 params - rule name & source. Secondary array - list of rules,
-// which can be terminated by this one.
-['table', __webpack_require__(/*! ./rules_block/table */ "../../../node_modules/markdown-it/lib/rules_block/table.js"), ['paragraph', 'reference']], ['code', __webpack_require__(/*! ./rules_block/code */ "../../../node_modules/markdown-it/lib/rules_block/code.js")], ['fence', __webpack_require__(/*! ./rules_block/fence */ "../../../node_modules/markdown-it/lib/rules_block/fence.js"), ['paragraph', 'reference', 'blockquote', 'list']], ['blockquote', __webpack_require__(/*! ./rules_block/blockquote */ "../../../node_modules/markdown-it/lib/rules_block/blockquote.js"), ['paragraph', 'reference', 'blockquote', 'list']], ['hr', __webpack_require__(/*! ./rules_block/hr */ "../../../node_modules/markdown-it/lib/rules_block/hr.js"), ['paragraph', 'reference', 'blockquote', 'list']], ['list', __webpack_require__(/*! ./rules_block/list */ "../../../node_modules/markdown-it/lib/rules_block/list.js"), ['paragraph', 'reference', 'blockquote']], ['reference', __webpack_require__(/*! ./rules_block/reference */ "../../../node_modules/markdown-it/lib/rules_block/reference.js")], ['html_block', __webpack_require__(/*! ./rules_block/html_block */ "../../../node_modules/markdown-it/lib/rules_block/html_block.js"), ['paragraph', 'reference', 'blockquote']], ['heading', __webpack_require__(/*! ./rules_block/heading */ "../../../node_modules/markdown-it/lib/rules_block/heading.js"), ['paragraph', 'reference', 'blockquote']], ['lheading', __webpack_require__(/*! ./rules_block/lheading */ "../../../node_modules/markdown-it/lib/rules_block/lheading.js")], ['paragraph', __webpack_require__(/*! ./rules_block/paragraph */ "../../../node_modules/markdown-it/lib/rules_block/paragraph.js")]];
-
-/**
- * new ParserBlock()
- **/
-function ParserBlock() {
- /**
- * ParserBlock#ruler -> Ruler
- *
- * [[Ruler]] instance. Keep configuration of block rules.
- **/
- this.ruler = new Ruler();
- for (var i = 0; i < _rules.length; i++) {
- this.ruler.push(_rules[i][0], _rules[i][1], {
- alt: (_rules[i][2] || []).slice()
- });
- }
-}
-
-// Generate tokens for input range
-//
-ParserBlock.prototype.tokenize = function (state, startLine, endLine) {
- var ok,
- i,
- rules = this.ruler.getRules(''),
- len = rules.length,
- line = startLine,
- hasEmptyLines = false,
- maxNesting = state.md.options.maxNesting;
- while (line < endLine) {
- state.line = line = state.skipEmptyLines(line);
- if (line >= endLine) {
- break;
- }
-
- // Termination condition for nested calls.
- // Nested calls currently used for blockquotes & lists
- if (state.sCount[line] < state.blkIndent) {
- break;
- }
-
- // If nesting level exceeded - skip tail to the end. That's not ordinary
- // situation and we should not care about content.
- if (state.level >= maxNesting) {
- state.line = endLine;
- break;
- }
-
- // Try all possible rules.
- // On success, rule should:
- //
- // - update `state.line`
- // - update `state.tokens`
- // - return true
-
- for (i = 0; i < len; i++) {
- ok = rules[i](state, line, endLine, false);
- if (ok) {
- break;
- }
- }
-
- // set state.tight if we had an empty line before current tag
- // i.e. latest empty line should not count
- state.tight = !hasEmptyLines;
-
- // paragraph might "eat" one newline after it in nested lists
- if (state.isEmpty(state.line - 1)) {
- hasEmptyLines = true;
- }
- line = state.line;
- if (line < endLine && state.isEmpty(line)) {
- hasEmptyLines = true;
- line++;
- state.line = line;
- }
- }
-};
-
-/**
- * ParserBlock.parse(str, md, env, outTokens)
- *
- * Process input string and push block tokens into `outTokens`
- **/
-ParserBlock.prototype.parse = function (src, md, env, outTokens) {
- var state;
- if (!src) {
- return;
- }
- state = new this.State(src, md, env, outTokens);
- this.tokenize(state, state.line, state.lineMax);
-};
-ParserBlock.prototype.State = __webpack_require__(/*! ./rules_block/state_block */ "../../../node_modules/markdown-it/lib/rules_block/state_block.js");
-module.exports = ParserBlock;
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/parser_core.js":
-/*!************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/parser_core.js ***!
- \************************************************************/
-/***/ (function(module, __unused_webpack_exports, __webpack_require__) {
-
-/** internal
- * class Core
- *
- * Top-level rules executor. Glues block/inline parsers and does intermediate
- * transformations.
- **/
-
-
-var Ruler = __webpack_require__(/*! ./ruler */ "../../../node_modules/markdown-it/lib/ruler.js");
-var _rules = [['normalize', __webpack_require__(/*! ./rules_core/normalize */ "../../../node_modules/markdown-it/lib/rules_core/normalize.js")], ['block', __webpack_require__(/*! ./rules_core/block */ "../../../node_modules/markdown-it/lib/rules_core/block.js")], ['inline', __webpack_require__(/*! ./rules_core/inline */ "../../../node_modules/markdown-it/lib/rules_core/inline.js")], ['linkify', __webpack_require__(/*! ./rules_core/linkify */ "../../../node_modules/markdown-it/lib/rules_core/linkify.js")], ['replacements', __webpack_require__(/*! ./rules_core/replacements */ "../../../node_modules/markdown-it/lib/rules_core/replacements.js")], ['smartquotes', __webpack_require__(/*! ./rules_core/smartquotes */ "../../../node_modules/markdown-it/lib/rules_core/smartquotes.js")]];
-
-/**
- * new Core()
- **/
-function Core() {
- /**
- * Core#ruler -> Ruler
- *
- * [[Ruler]] instance. Keep configuration of core rules.
- **/
- this.ruler = new Ruler();
- for (var i = 0; i < _rules.length; i++) {
- this.ruler.push(_rules[i][0], _rules[i][1]);
- }
-}
-
-/**
- * Core.process(state)
- *
- * Executes core chain rules.
- **/
-Core.prototype.process = function (state) {
- var i, l, rules;
- rules = this.ruler.getRules('');
- for (i = 0, l = rules.length; i < l; i++) {
- rules[i](state);
- }
-};
-Core.prototype.State = __webpack_require__(/*! ./rules_core/state_core */ "../../../node_modules/markdown-it/lib/rules_core/state_core.js");
-module.exports = Core;
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/parser_inline.js":
-/*!**************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/parser_inline.js ***!
- \**************************************************************/
-/***/ (function(module, __unused_webpack_exports, __webpack_require__) {
-
-/** internal
- * class ParserInline
- *
- * Tokenizes paragraph content.
- **/
-
-
-var Ruler = __webpack_require__(/*! ./ruler */ "../../../node_modules/markdown-it/lib/ruler.js");
-
-////////////////////////////////////////////////////////////////////////////////
-// Parser rules
-
-var _rules = [['text', __webpack_require__(/*! ./rules_inline/text */ "../../../node_modules/markdown-it/lib/rules_inline/text.js")], ['newline', __webpack_require__(/*! ./rules_inline/newline */ "../../../node_modules/markdown-it/lib/rules_inline/newline.js")], ['escape', __webpack_require__(/*! ./rules_inline/escape */ "../../../node_modules/markdown-it/lib/rules_inline/escape.js")], ['backticks', __webpack_require__(/*! ./rules_inline/backticks */ "../../../node_modules/markdown-it/lib/rules_inline/backticks.js")], ['strikethrough', (__webpack_require__(/*! ./rules_inline/strikethrough */ "../../../node_modules/markdown-it/lib/rules_inline/strikethrough.js").tokenize)], ['emphasis', (__webpack_require__(/*! ./rules_inline/emphasis */ "../../../node_modules/markdown-it/lib/rules_inline/emphasis.js").tokenize)], ['link', __webpack_require__(/*! ./rules_inline/link */ "../../../node_modules/markdown-it/lib/rules_inline/link.js")], ['image', __webpack_require__(/*! ./rules_inline/image */ "../../../node_modules/markdown-it/lib/rules_inline/image.js")], ['autolink', __webpack_require__(/*! ./rules_inline/autolink */ "../../../node_modules/markdown-it/lib/rules_inline/autolink.js")], ['html_inline', __webpack_require__(/*! ./rules_inline/html_inline */ "../../../node_modules/markdown-it/lib/rules_inline/html_inline.js")], ['entity', __webpack_require__(/*! ./rules_inline/entity */ "../../../node_modules/markdown-it/lib/rules_inline/entity.js")]];
-var _rules2 = [['balance_pairs', __webpack_require__(/*! ./rules_inline/balance_pairs */ "../../../node_modules/markdown-it/lib/rules_inline/balance_pairs.js")], ['strikethrough', (__webpack_require__(/*! ./rules_inline/strikethrough */ "../../../node_modules/markdown-it/lib/rules_inline/strikethrough.js").postProcess)], ['emphasis', (__webpack_require__(/*! ./rules_inline/emphasis */ "../../../node_modules/markdown-it/lib/rules_inline/emphasis.js").postProcess)], ['text_collapse', __webpack_require__(/*! ./rules_inline/text_collapse */ "../../../node_modules/markdown-it/lib/rules_inline/text_collapse.js")]];
-
-/**
- * new ParserInline()
- **/
-function ParserInline() {
- var i;
-
- /**
- * ParserInline#ruler -> Ruler
- *
- * [[Ruler]] instance. Keep configuration of inline rules.
- **/
- this.ruler = new Ruler();
- for (i = 0; i < _rules.length; i++) {
- this.ruler.push(_rules[i][0], _rules[i][1]);
- }
-
- /**
- * ParserInline#ruler2 -> Ruler
- *
- * [[Ruler]] instance. Second ruler used for post-processing
- * (e.g. in emphasis-like rules).
- **/
- this.ruler2 = new Ruler();
- for (i = 0; i < _rules2.length; i++) {
- this.ruler2.push(_rules2[i][0], _rules2[i][1]);
- }
-}
-
-// Skip single token by running all rules in validation mode;
-// returns `true` if any rule reported success
-//
-ParserInline.prototype.skipToken = function (state) {
- var ok,
- i,
- pos = state.pos,
- rules = this.ruler.getRules(''),
- len = rules.length,
- maxNesting = state.md.options.maxNesting,
- cache = state.cache;
- if (typeof cache[pos] !== 'undefined') {
- state.pos = cache[pos];
- return;
- }
- if (state.level < maxNesting) {
- for (i = 0; i < len; i++) {
- // Increment state.level and decrement it later to limit recursion.
- // It's harmless to do here, because no tokens are created. But ideally,
- // we'd need a separate private state variable for this purpose.
- //
- state.level++;
- ok = rules[i](state, true);
- state.level--;
- if (ok) {
- break;
- }
- }
- } else {
- // Too much nesting, just skip until the end of the paragraph.
- //
- // NOTE: this will cause links to behave incorrectly in the following case,
- // when an amount of `[` is exactly equal to `maxNesting + 1`:
- //
- // [[[[[[[[[[[[[[[[[[[[[foo]()
- //
- // TODO: remove this workaround when CM standard will allow nested links
- // (we can replace it by preventing links from being parsed in
- // validation mode)
- //
- state.pos = state.posMax;
- }
- if (!ok) {
- state.pos++;
- }
- cache[pos] = state.pos;
-};
-
-// Generate tokens for input range
-//
-ParserInline.prototype.tokenize = function (state) {
- var ok,
- i,
- rules = this.ruler.getRules(''),
- len = rules.length,
- end = state.posMax,
- maxNesting = state.md.options.maxNesting;
- while (state.pos < end) {
- // Try all possible rules.
- // On success, rule should:
- //
- // - update `state.pos`
- // - update `state.tokens`
- // - return true
-
- if (state.level < maxNesting) {
- for (i = 0; i < len; i++) {
- ok = rules[i](state, false);
- if (ok) {
- break;
- }
- }
- }
- if (ok) {
- if (state.pos >= end) {
- break;
- }
- continue;
- }
- state.pending += state.src[state.pos++];
- }
- if (state.pending) {
- state.pushPending();
- }
-};
-
-/**
- * ParserInline.parse(str, md, env, outTokens)
- *
- * Process input string and push inline tokens into `outTokens`
- **/
-ParserInline.prototype.parse = function (str, md, env, outTokens) {
- var i, rules, len;
- var state = new this.State(str, md, env, outTokens);
- this.tokenize(state);
- rules = this.ruler2.getRules('');
- len = rules.length;
- for (i = 0; i < len; i++) {
- rules[i](state);
- }
-};
-ParserInline.prototype.State = __webpack_require__(/*! ./rules_inline/state_inline */ "../../../node_modules/markdown-it/lib/rules_inline/state_inline.js");
-module.exports = ParserInline;
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/presets/commonmark.js":
-/*!*******************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/presets/commonmark.js ***!
- \*******************************************************************/
-/***/ (function(module) {
-
-// Commonmark default options
-
-
-
-module.exports = {
- options: {
- html: true,
- // Enable HTML tags in source
- xhtmlOut: true,
- // Use '/' to close single tags ( ` tags.
+ **/
+MarkdownIt.prototype.renderInline = function (src, env) {
+ env = env || {};
+ return this.renderer.render(this.parseInline(src, env), this.options, env);
+};
+module.exports = MarkdownIt;
+
+/***/ }),
+
+/***/ "../node_modules/mdurl/build/index.cjs.js":
+/*!************************************************!*\
+ !*** ../node_modules/mdurl/build/index.cjs.js ***!
+ \************************************************/
+/***/ (function(__unused_webpack_module, exports) {
+
+
+
+/* eslint-disable no-bitwise */
+const decodeCache = {};
+function getDecodeCache(exclude) {
+ let cache = decodeCache[exclude];
+ if (cache) {
+ return cache;
+ }
+ cache = decodeCache[exclude] = [];
+ for (let i = 0; i < 128; i++) {
+ const ch = String.fromCharCode(i);
+ cache.push(ch);
+ }
+ for (let i = 0; i < exclude.length; i++) {
+ const ch = exclude.charCodeAt(i);
+ cache[ch] = '%' + ('0' + ch.toString(16).toUpperCase()).slice(-2);
+ }
+ return cache;
+}
+
+// Decode percent-encoded string.
+//
+function decode(string, exclude) {
+ if (typeof exclude !== 'string') {
+ exclude = decode.defaultChars;
+ }
+ const cache = getDecodeCache(exclude);
+ return string.replace(/(%[a-f0-9]{2})+/gi, function (seq) {
+ let result = '';
+ for (let i = 0, l = seq.length; i < l; i += 3) {
+ const b1 = parseInt(seq.slice(i + 1, i + 3), 16);
+ if (b1 < 0x80) {
+ result += cache[b1];
+ continue;
+ }
+ if ((b1 & 0xE0) === 0xC0 && i + 3 < l) {
+ // 110xxxxx 10xxxxxx
+ const b2 = parseInt(seq.slice(i + 4, i + 6), 16);
+ if ((b2 & 0xC0) === 0x80) {
+ const chr = b1 << 6 & 0x7C0 | b2 & 0x3F;
+ if (chr < 0x80) {
+ result += '\ufffd\ufffd';
+ } else {
+ result += String.fromCharCode(chr);
+ }
+ i += 3;
+ continue;
+ }
+ }
+ if ((b1 & 0xF0) === 0xE0 && i + 6 < l) {
+ // 1110xxxx 10xxxxxx 10xxxxxx
+ const b2 = parseInt(seq.slice(i + 4, i + 6), 16);
+ const b3 = parseInt(seq.slice(i + 7, i + 9), 16);
+ if ((b2 & 0xC0) === 0x80 && (b3 & 0xC0) === 0x80) {
+ const chr = b1 << 12 & 0xF000 | b2 << 6 & 0xFC0 | b3 & 0x3F;
+ if (chr < 0x800 || chr >= 0xD800 && chr <= 0xDFFF) {
+ result += '\ufffd\ufffd\ufffd';
+ } else {
+ result += String.fromCharCode(chr);
+ }
+ i += 6;
+ continue;
+ }
+ }
+ if ((b1 & 0xF8) === 0xF0 && i + 9 < l) {
+ // 111110xx 10xxxxxx 10xxxxxx 10xxxxxx
+ const b2 = parseInt(seq.slice(i + 4, i + 6), 16);
+ const b3 = parseInt(seq.slice(i + 7, i + 9), 16);
+ const b4 = parseInt(seq.slice(i + 10, i + 12), 16);
+ if ((b2 & 0xC0) === 0x80 && (b3 & 0xC0) === 0x80 && (b4 & 0xC0) === 0x80) {
+ let chr = b1 << 18 & 0x1C0000 | b2 << 12 & 0x3F000 | b3 << 6 & 0xFC0 | b4 & 0x3F;
+ if (chr < 0x10000 || chr > 0x10FFFF) {
+ result += '\ufffd\ufffd\ufffd\ufffd';
+ } else {
+ chr -= 0x10000;
+ result += String.fromCharCode(0xD800 + (chr >> 10), 0xDC00 + (chr & 0x3FF));
+ }
+ i += 9;
+ continue;
+ }
+ }
+ result += '\ufffd';
+ }
+ return result;
+ });
+}
+decode.defaultChars = ';/?:@&=+$,#';
+decode.componentChars = '';
+const encodeCache = {};
+
+// Create a lookup array where anything but characters in `chars` string
+// and alphanumeric chars is percent-encoded.
+//
+function getEncodeCache(exclude) {
+ let cache = encodeCache[exclude];
+ if (cache) {
+ return cache;
+ }
+ cache = encodeCache[exclude] = [];
+ for (let i = 0; i < 128; i++) {
+ const ch = String.fromCharCode(i);
+ if (/^[0-9a-z]$/i.test(ch)) {
+ // always allow unencoded alphanumeric characters
+ cache.push(ch);
+ } else {
+ cache.push('%' + ('0' + i.toString(16).toUpperCase()).slice(-2));
+ }
+ }
+ for (let i = 0; i < exclude.length; i++) {
+ cache[exclude.charCodeAt(i)] = exclude[i];
+ }
+ return cache;
+}
+
+// Encode unsafe characters with percent-encoding, skipping already
+// encoded sequences.
+//
+// - string - string to encode
+// - exclude - list of characters to ignore (in addition to a-zA-Z0-9)
+// - keepEscaped - don't encode '%' in a correct escape sequence (default: true)
+//
+function encode(string, exclude, keepEscaped) {
+ if (typeof exclude !== 'string') {
+ // encode(string, keepEscaped)
+ keepEscaped = exclude;
+ exclude = encode.defaultChars;
+ }
+ if (typeof keepEscaped === 'undefined') {
+ keepEscaped = true;
+ }
+ const cache = getEncodeCache(exclude);
+ let result = '';
+ for (let i = 0, l = string.length; i < l; i++) {
+ const code = string.charCodeAt(i);
+ if (keepEscaped && code === 0x25 /* % */ && i + 2 < l) {
+ if (/^[0-9a-f]{2}$/i.test(string.slice(i + 1, i + 3))) {
+ result += string.slice(i, i + 3);
+ i += 2;
+ continue;
+ }
+ }
+ if (code < 128) {
+ result += cache[code];
+ continue;
+ }
+ if (code >= 0xD800 && code <= 0xDFFF) {
+ if (code >= 0xD800 && code <= 0xDBFF && i + 1 < l) {
+ const nextCode = string.charCodeAt(i + 1);
+ if (nextCode >= 0xDC00 && nextCode <= 0xDFFF) {
+ result += encodeURIComponent(string[i] + string[i + 1]);
+ i++;
+ continue;
+ }
+ }
+ result += '%EF%BF%BD';
+ continue;
+ }
+ result += encodeURIComponent(string[i]);
+ }
+ return result;
+}
+encode.defaultChars = ";/?:@&=+$,-_.!~*'()#";
+encode.componentChars = "-_.!~*'()";
+function format(url) {
+ let result = '';
+ result += url.protocol || '';
+ result += url.slashes ? '//' : '';
+ result += url.auth ? url.auth + '@' : '';
+ if (url.hostname && url.hostname.indexOf(':') !== -1) {
+ // ipv6 address
+ result += '[' + url.hostname + ']';
+ } else {
+ result += url.hostname || '';
+ }
+ result += url.port ? ':' + url.port : '';
+ result += url.pathname || '';
+ result += url.search || '';
+ result += url.hash || '';
+ return result;
+}
+
+// Copyright Joyent, Inc. and other Node contributors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a
+// copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to permit
+// persons to whom the Software is furnished to do so, subject to the
+// following conditions:
+//
+// The above copyright notice and this permission notice shall be included
+// in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
+// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+// USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+//
+// Changes from joyent/node:
+//
+// 1. No leading slash in paths,
+// e.g. in `url.parse('http://foo?bar')` pathname is ``, not `/`
+//
+// 2. Backslashes are not replaced with slashes,
+// so `http:\\example.org\` is treated like a relative path
+//
+// 3. Trailing colon is treated like a part of the path,
+// i.e. in `http://example.org:foo` pathname is `:foo`
+//
+// 4. Nothing is URL-encoded in the resulting object,
+// (in joyent/node some chars in auth and paths are encoded)
+//
+// 5. `url.parse()` does not have `parseQueryString` argument
+//
+// 6. Removed extraneous result properties: `host`, `path`, `query`, etc.,
+// which can be constructed using other parts of the url.
+//
+
+function Url() {
+ this.protocol = null;
+ this.slashes = null;
+ this.auth = null;
+ this.port = null;
+ this.hostname = null;
+ this.hash = null;
+ this.search = null;
+ this.pathname = null;
+}
+
+// Reference: RFC 3986, RFC 1808, RFC 2396
+
+// define these here so at least they only have to be
+// compiled once on the first module load.
+const protocolPattern = /^([a-z0-9.+-]+:)/i;
+const portPattern = /:[0-9]*$/;
+
+// Special case for a simple path URL
+/* eslint-disable-next-line no-useless-escape */
+const simplePathPattern = /^(\/\/?(?!\/)[^\?\s]*)(\?[^\s]*)?$/;
+
+// RFC 2396: characters reserved for delimiting URLs.
+// We actually just auto-escape these.
+const delims = ['<', '>', '"', '`', ' ', '\r', '\n', '\t'];
+
+// RFC 2396: characters not allowed for various reasons.
+const unwise = ['{', '}', '|', '\\', '^', '`'].concat(delims);
+
+// Allowed by RFCs, but cause of XSS attacks. Always escape these.
+const autoEscape = ['\''].concat(unwise);
+// Characters that are never ever allowed in a hostname.
+// Note that any invalid chars are also handled, but these
+// are the ones that are *expected* to be seen, so we fast-path
+// them.
+const nonHostChars = ['%', '/', '?', ';', '#'].concat(autoEscape);
+const hostEndingChars = ['/', '?', '#'];
+const hostnameMaxLen = 255;
+const hostnamePartPattern = /^[+a-z0-9A-Z_-]{0,63}$/;
+const hostnamePartStart = /^([+a-z0-9A-Z_-]{0,63})(.*)$/;
+// protocols that can allow "unsafe" and "unwise" chars.
+// protocols that never have a hostname.
+const hostlessProtocol = {
+ javascript: true,
+ 'javascript:': true
+};
+// protocols that always contain a // bit.
+const slashedProtocol = {
+ http: true,
+ https: true,
+ ftp: true,
+ gopher: true,
+ file: true,
+ 'http:': true,
+ 'https:': true,
+ 'ftp:': true,
+ 'gopher:': true,
+ 'file:': true
+};
+function urlParse(url, slashesDenoteHost) {
+ if (url && url instanceof Url) return url;
+ const u = new Url();
+ u.parse(url, slashesDenoteHost);
+ return u;
+}
+Url.prototype.parse = function (url, slashesDenoteHost) {
+ let lowerProto, hec, slashes;
+ let rest = url;
+
+ // trim before proceeding.
+ // This is to support parse stuff like " http://foo.com \n"
+ rest = rest.trim();
+ if (!slashesDenoteHost && url.split('#').length === 1) {
+ // Try fast path regexp
+ const simplePath = simplePathPattern.exec(rest);
+ if (simplePath) {
+ this.pathname = simplePath[1];
+ if (simplePath[2]) {
+ this.search = simplePath[2];
+ }
+ return this;
+ }
+ }
+ let proto = protocolPattern.exec(rest);
+ if (proto) {
+ proto = proto[0];
+ lowerProto = proto.toLowerCase();
+ this.protocol = proto;
+ rest = rest.substr(proto.length);
+ }
+
+ // figure out if it's got a host
+ // user@server is *always* interpreted as a hostname, and url
+ // resolution will treat //foo/bar as host=foo,path=bar because that's
+ // how the browser resolves relative URLs.
+ /* eslint-disable-next-line no-useless-escape */
+ if (slashesDenoteHost || proto || rest.match(/^\/\/[^@\/]+@[^@\/]+/)) {
+ slashes = rest.substr(0, 2) === '//';
+ if (slashes && !(proto && hostlessProtocol[proto])) {
+ rest = rest.substr(2);
+ this.slashes = true;
+ }
+ }
+ if (!hostlessProtocol[proto] && (slashes || proto && !slashedProtocol[proto])) {
+ // there's a hostname.
+ // the first instance of /, ?, ;, or # ends the host.
+ //
+ // If there is an @ in the hostname, then non-host chars *are* allowed
+ // to the left of the last @ sign, unless some host-ending character
+ // comes *before* the @-sign.
+ // URLs are obnoxious.
+ //
+ // ex:
+ // http://a@b@c/ => user:a@b host:c
+ // http://a@b?@c => user:a host:c path:/?@c
+
+ // v0.12 TODO(isaacs): This is not quite how Chrome does things.
+ // Review our test case against browsers more comprehensively.
+
+ // find the first instance of any hostEndingChars
+ let hostEnd = -1;
+ for (let i = 0; i < hostEndingChars.length; i++) {
+ hec = rest.indexOf(hostEndingChars[i]);
+ if (hec !== -1 && (hostEnd === -1 || hec < hostEnd)) {
+ hostEnd = hec;
+ }
+ }
+
+ // at this point, either we have an explicit point where the
+ // auth portion cannot go past, or the last @ char is the decider.
+ let auth, atSign;
+ if (hostEnd === -1) {
+ // atSign can be anywhere.
+ atSign = rest.lastIndexOf('@');
+ } else {
+ // atSign must be in auth portion.
+ // http://a@b/c@d => host:b auth:a path:/c@d
+ atSign = rest.lastIndexOf('@', hostEnd);
+ }
+
+ // Now we have a portion which is definitely the auth.
+ // Pull that off.
+ if (atSign !== -1) {
+ auth = rest.slice(0, atSign);
+ rest = rest.slice(atSign + 1);
+ this.auth = auth;
+ }
+
+ // the host is the remaining to the left of the first non-host char
+ hostEnd = -1;
+ for (let i = 0; i < nonHostChars.length; i++) {
+ hec = rest.indexOf(nonHostChars[i]);
+ if (hec !== -1 && (hostEnd === -1 || hec < hostEnd)) {
+ hostEnd = hec;
+ }
+ }
+ // if we still have not hit it, then the entire thing is a host.
+ if (hostEnd === -1) {
+ hostEnd = rest.length;
+ }
+ if (rest[hostEnd - 1] === ':') {
+ hostEnd--;
+ }
+ const host = rest.slice(0, hostEnd);
+ rest = rest.slice(hostEnd);
+
+ // pull out port.
+ this.parseHost(host);
+
+ // we've indicated that there is a hostname,
+ // so even if it's empty, it has to be present.
+ this.hostname = this.hostname || '';
+
+ // if hostname begins with [ and ends with ]
+ // assume that it's an IPv6 address.
+ const ipv6Hostname = this.hostname[0] === '[' && this.hostname[this.hostname.length - 1] === ']';
+
+ // validate a little.
+ if (!ipv6Hostname) {
+ const hostparts = this.hostname.split(/\./);
+ for (let i = 0, l = hostparts.length; i < l; i++) {
+ const part = hostparts[i];
+ if (!part) {
+ continue;
+ }
+ if (!part.match(hostnamePartPattern)) {
+ let newpart = '';
+ for (let j = 0, k = part.length; j < k; j++) {
+ if (part.charCodeAt(j) > 127) {
+ // we replace non-ASCII char with a temporary placeholder
+ // we need this to make sure size of hostname is not
+ // broken by replacing non-ASCII by nothing
+ newpart += 'x';
+ } else {
+ newpart += part[j];
+ }
+ }
+ // we test again with ASCII char only
+ if (!newpart.match(hostnamePartPattern)) {
+ const validParts = hostparts.slice(0, i);
+ const notHost = hostparts.slice(i + 1);
+ const bit = part.match(hostnamePartStart);
+ if (bit) {
+ validParts.push(bit[1]);
+ notHost.unshift(bit[2]);
+ }
+ if (notHost.length) {
+ rest = notHost.join('.') + rest;
+ }
+ this.hostname = validParts.join('.');
+ break;
+ }
+ }
+ }
+ }
+ if (this.hostname.length > hostnameMaxLen) {
+ this.hostname = '';
+ }
+
+ // strip [ and ] from the hostname
+ // the host field still retains them, though
+ if (ipv6Hostname) {
+ this.hostname = this.hostname.substr(1, this.hostname.length - 2);
+ }
+ }
+
+ // chop off from the tail first.
+ const hash = rest.indexOf('#');
+ if (hash !== -1) {
+ // got a fragment string.
+ this.hash = rest.substr(hash);
+ rest = rest.slice(0, hash);
+ }
+ const qm = rest.indexOf('?');
+ if (qm !== -1) {
+ this.search = rest.substr(qm);
+ rest = rest.slice(0, qm);
+ }
+ if (rest) {
+ this.pathname = rest;
+ }
+ if (slashedProtocol[lowerProto] && this.hostname && !this.pathname) {
+ this.pathname = '';
+ }
+ return this;
+};
+Url.prototype.parseHost = function (host) {
+ let port = portPattern.exec(host);
+ if (port) {
+ port = port[0];
+ if (port !== ':') {
+ this.port = port.substr(1);
+ }
+ host = host.substr(0, host.length - port.length);
+ }
+ if (host) {
+ this.hostname = host;
+ }
+};
+exports.decode = decode;
+exports.encode = encode;
+exports.format = format;
+exports.parse = urlParse;
+
+/***/ }),
+
+/***/ "../node_modules/uc.micro/build/index.cjs.js":
+/*!***************************************************!*\
+ !*** ../node_modules/uc.micro/build/index.cjs.js ***!
+ \***************************************************/
+/***/ (function(__unused_webpack_module, exports) {
+
+
+
+var regex$5 = /[\0-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF]/;
+var regex$4 = /[\0-\x1F\x7F-\x9F]/;
+var regex$3 = /[\xAD\u0600-\u0605\u061C\u06DD\u070F\u0890\u0891\u08E2\u180E\u200B-\u200F\u202A-\u202E\u2060-\u2064\u2066-\u206F\uFEFF\uFFF9-\uFFFB]|\uD804[\uDCBD\uDCCD]|\uD80D[\uDC30-\uDC3F]|\uD82F[\uDCA0-\uDCA3]|\uD834[\uDD73-\uDD7A]|\uDB40[\uDC01\uDC20-\uDC7F]/;
+var regex$2 = /[!-#%-\*,-\/:;\?@\[-\]_\{\}\xA1\xA7\xAB\xB6\xB7\xBB\xBF\u037E\u0387\u055A-\u055F\u0589\u058A\u05BE\u05C0\u05C3\u05C6\u05F3\u05F4\u0609\u060A\u060C\u060D\u061B\u061D-\u061F\u066A-\u066D\u06D4\u0700-\u070D\u07F7-\u07F9\u0830-\u083E\u085E\u0964\u0965\u0970\u09FD\u0A76\u0AF0\u0C77\u0C84\u0DF4\u0E4F\u0E5A\u0E5B\u0F04-\u0F12\u0F14\u0F3A-\u0F3D\u0F85\u0FD0-\u0FD4\u0FD9\u0FDA\u104A-\u104F\u10FB\u1360-\u1368\u1400\u166E\u169B\u169C\u16EB-\u16ED\u1735\u1736\u17D4-\u17D6\u17D8-\u17DA\u1800-\u180A\u1944\u1945\u1A1E\u1A1F\u1AA0-\u1AA6\u1AA8-\u1AAD\u1B5A-\u1B60\u1B7D\u1B7E\u1BFC-\u1BFF\u1C3B-\u1C3F\u1C7E\u1C7F\u1CC0-\u1CC7\u1CD3\u2010-\u2027\u2030-\u2043\u2045-\u2051\u2053-\u205E\u207D\u207E\u208D\u208E\u2308-\u230B\u2329\u232A\u2768-\u2775\u27C5\u27C6\u27E6-\u27EF\u2983-\u2998\u29D8-\u29DB\u29FC\u29FD\u2CF9-\u2CFC\u2CFE\u2CFF\u2D70\u2E00-\u2E2E\u2E30-\u2E4F\u2E52-\u2E5D\u3001-\u3003\u3008-\u3011\u3014-\u301F\u3030\u303D\u30A0\u30FB\uA4FE\uA4FF\uA60D-\uA60F\uA673\uA67E\uA6F2-\uA6F7\uA874-\uA877\uA8CE\uA8CF\uA8F8-\uA8FA\uA8FC\uA92E\uA92F\uA95F\uA9C1-\uA9CD\uA9DE\uA9DF\uAA5C-\uAA5F\uAADE\uAADF\uAAF0\uAAF1\uABEB\uFD3E\uFD3F\uFE10-\uFE19\uFE30-\uFE52\uFE54-\uFE61\uFE63\uFE68\uFE6A\uFE6B\uFF01-\uFF03\uFF05-\uFF0A\uFF0C-\uFF0F\uFF1A\uFF1B\uFF1F\uFF20\uFF3B-\uFF3D\uFF3F\uFF5B\uFF5D\uFF5F-\uFF65]|\uD800[\uDD00-\uDD02\uDF9F\uDFD0]|\uD801\uDD6F|\uD802[\uDC57\uDD1F\uDD3F\uDE50-\uDE58\uDE7F\uDEF0-\uDEF6\uDF39-\uDF3F\uDF99-\uDF9C]|\uD803[\uDEAD\uDF55-\uDF59\uDF86-\uDF89]|\uD804[\uDC47-\uDC4D\uDCBB\uDCBC\uDCBE-\uDCC1\uDD40-\uDD43\uDD74\uDD75\uDDC5-\uDDC8\uDDCD\uDDDB\uDDDD-\uDDDF\uDE38-\uDE3D\uDEA9]|\uD805[\uDC4B-\uDC4F\uDC5A\uDC5B\uDC5D\uDCC6\uDDC1-\uDDD7\uDE41-\uDE43\uDE60-\uDE6C\uDEB9\uDF3C-\uDF3E]|\uD806[\uDC3B\uDD44-\uDD46\uDDE2\uDE3F-\uDE46\uDE9A-\uDE9C\uDE9E-\uDEA2\uDF00-\uDF09]|\uD807[\uDC41-\uDC45\uDC70\uDC71\uDEF7\uDEF8\uDF43-\uDF4F\uDFFF]|\uD809[\uDC70-\uDC74]|\uD80B[\uDFF1\uDFF2]|\uD81A[\uDE6E\uDE6F\uDEF5\uDF37-\uDF3B\uDF44]|\uD81B[\uDE97-\uDE9A\uDFE2]|\uD82F\uDC9F|\uD836[\uDE87-\uDE8B]|\uD83A[\uDD5E\uDD5F]/;
+var regex$1 = /[\$\+<->\^`\|~\xA2-\xA6\xA8\xA9\xAC\xAE-\xB1\xB4\xB8\xD7\xF7\u02C2-\u02C5\u02D2-\u02DF\u02E5-\u02EB\u02ED\u02EF-\u02FF\u0375\u0384\u0385\u03F6\u0482\u058D-\u058F\u0606-\u0608\u060B\u060E\u060F\u06DE\u06E9\u06FD\u06FE\u07F6\u07FE\u07FF\u0888\u09F2\u09F3\u09FA\u09FB\u0AF1\u0B70\u0BF3-\u0BFA\u0C7F\u0D4F\u0D79\u0E3F\u0F01-\u0F03\u0F13\u0F15-\u0F17\u0F1A-\u0F1F\u0F34\u0F36\u0F38\u0FBE-\u0FC5\u0FC7-\u0FCC\u0FCE\u0FCF\u0FD5-\u0FD8\u109E\u109F\u1390-\u1399\u166D\u17DB\u1940\u19DE-\u19FF\u1B61-\u1B6A\u1B74-\u1B7C\u1FBD\u1FBF-\u1FC1\u1FCD-\u1FCF\u1FDD-\u1FDF\u1FED-\u1FEF\u1FFD\u1FFE\u2044\u2052\u207A-\u207C\u208A-\u208C\u20A0-\u20C0\u2100\u2101\u2103-\u2106\u2108\u2109\u2114\u2116-\u2118\u211E-\u2123\u2125\u2127\u2129\u212E\u213A\u213B\u2140-\u2144\u214A-\u214D\u214F\u218A\u218B\u2190-\u2307\u230C-\u2328\u232B-\u2426\u2440-\u244A\u249C-\u24E9\u2500-\u2767\u2794-\u27C4\u27C7-\u27E5\u27F0-\u2982\u2999-\u29D7\u29DC-\u29FB\u29FE-\u2B73\u2B76-\u2B95\u2B97-\u2BFF\u2CE5-\u2CEA\u2E50\u2E51\u2E80-\u2E99\u2E9B-\u2EF3\u2F00-\u2FD5\u2FF0-\u2FFF\u3004\u3012\u3013\u3020\u3036\u3037\u303E\u303F\u309B\u309C\u3190\u3191\u3196-\u319F\u31C0-\u31E3\u31EF\u3200-\u321E\u322A-\u3247\u3250\u3260-\u327F\u328A-\u32B0\u32C0-\u33FF\u4DC0-\u4DFF\uA490-\uA4C6\uA700-\uA716\uA720\uA721\uA789\uA78A\uA828-\uA82B\uA836-\uA839\uAA77-\uAA79\uAB5B\uAB6A\uAB6B\uFB29\uFBB2-\uFBC2\uFD40-\uFD4F\uFDCF\uFDFC-\uFDFF\uFE62\uFE64-\uFE66\uFE69\uFF04\uFF0B\uFF1C-\uFF1E\uFF3E\uFF40\uFF5C\uFF5E\uFFE0-\uFFE6\uFFE8-\uFFEE\uFFFC\uFFFD]|\uD800[\uDD37-\uDD3F\uDD79-\uDD89\uDD8C-\uDD8E\uDD90-\uDD9C\uDDA0\uDDD0-\uDDFC]|\uD802[\uDC77\uDC78\uDEC8]|\uD805\uDF3F|\uD807[\uDFD5-\uDFF1]|\uD81A[\uDF3C-\uDF3F\uDF45]|\uD82F\uDC9C|\uD833[\uDF50-\uDFC3]|\uD834[\uDC00-\uDCF5\uDD00-\uDD26\uDD29-\uDD64\uDD6A-\uDD6C\uDD83\uDD84\uDD8C-\uDDA9\uDDAE-\uDDEA\uDE00-\uDE41\uDE45\uDF00-\uDF56]|\uD835[\uDEC1\uDEDB\uDEFB\uDF15\uDF35\uDF4F\uDF6F\uDF89\uDFA9\uDFC3]|\uD836[\uDC00-\uDDFF\uDE37-\uDE3A\uDE6D-\uDE74\uDE76-\uDE83\uDE85\uDE86]|\uD838[\uDD4F\uDEFF]|\uD83B[\uDCAC\uDCB0\uDD2E\uDEF0\uDEF1]|\uD83C[\uDC00-\uDC2B\uDC30-\uDC93\uDCA0-\uDCAE\uDCB1-\uDCBF\uDCC1-\uDCCF\uDCD1-\uDCF5\uDD0D-\uDDAD\uDDE6-\uDE02\uDE10-\uDE3B\uDE40-\uDE48\uDE50\uDE51\uDE60-\uDE65\uDF00-\uDFFF]|\uD83D[\uDC00-\uDED7\uDEDC-\uDEEC\uDEF0-\uDEFC\uDF00-\uDF76\uDF7B-\uDFD9\uDFE0-\uDFEB\uDFF0]|\uD83E[\uDC00-\uDC0B\uDC10-\uDC47\uDC50-\uDC59\uDC60-\uDC87\uDC90-\uDCAD\uDCB0\uDCB1\uDD00-\uDE53\uDE60-\uDE6D\uDE70-\uDE7C\uDE80-\uDE88\uDE90-\uDEBD\uDEBF-\uDEC5\uDECE-\uDEDB\uDEE0-\uDEE8\uDEF0-\uDEF8\uDF00-\uDF92\uDF94-\uDFCA]/;
+var regex = /[ \xA0\u1680\u2000-\u200A\u2028\u2029\u202F\u205F\u3000]/;
+exports.Any = regex$5;
+exports.Cc = regex$4;
+exports.Cf = regex$3;
+exports.P = regex$2;
+exports.S = regex$1;
+exports.Z = regex;
+
+/***/ }),
+
/***/ "./components/GraphiQL.tsx":
/*!*********************************!*\
!*** ./components/GraphiQL.tsx ***!
@@ -72235,11 +72914,23 @@ var _react = _interopRequireWildcard(__webpack_require__(/*! react */ "react"));
var _react2 = __webpack_require__(/*! @graphiql/react */ "../../graphiql-react/dist/index.js");
function _getRequireWildcardCache(nodeInterop) { if (typeof WeakMap !== "function") return null; var cacheBabelInterop = new WeakMap(); var cacheNodeInterop = new WeakMap(); return (_getRequireWildcardCache = function (nodeInterop) { return nodeInterop ? cacheNodeInterop : cacheBabelInterop; })(nodeInterop); }
function _interopRequireWildcard(obj, nodeInterop) { if (!nodeInterop && obj && obj.__esModule) { return obj; } if (obj === null || typeof obj !== "object" && typeof obj !== "function") { return { default: obj }; } var cache = _getRequireWildcardCache(nodeInterop); if (cache && cache.has(obj)) { return cache.get(obj); } var newObj = {}; var hasPropertyDescriptor = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var key in obj) { if (key !== "default" && Object.prototype.hasOwnProperty.call(obj, key)) { var desc = hasPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : null; if (desc && (desc.get || desc.set)) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } newObj.default = obj; if (cache) { cache.set(obj, newObj); } return newObj; }
-function _extends() { _extends = Object.assign ? Object.assign.bind() : function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); }
+function _extends() { _extends = Object.assign ? Object.assign.bind() : function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } /**
+ * Copyright (c) 2020 GraphQL Contributors.
+ *
+ * This source code is licensed under the MIT license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
const majorVersion = parseInt(_react.default.version.slice(0, 2), 10);
if (majorVersion < 16) {
throw new Error(['GraphiQL 0.18.0 and after is not compatible with React 15 or below.', 'If you are using a CDN source (jsdelivr, unpkg, etc), follow this example:', 'https://github.com/graphql/graphiql/blob/master/examples/graphiql-cdn/index.html#L49'].join('\n'));
}
+
+/**
+ * API docs for this live here:
+ *
+ * https://graphiql-test.netlify.app/typedoc/modules/graphiql.html#graphiqlprops
+ */
+
/**
* The top-level React component for GraphiQL, intended to encompass the entire
* browser viewport.
@@ -72248,6 +72939,7 @@ if (majorVersion < 16) {
*/
function GraphiQL(_ref) {
+ var _props$disableTabs;
let {
dangerouslyAssumeSchemaIsValid,
defaultQuery,
@@ -72309,7 +73001,8 @@ function GraphiQL(_ref) {
validationRules: validationRules,
variables: variables
}, /*#__PURE__*/_react.default.createElement(GraphiQLInterface, _extends({
- showPersistHeadersSettings: shouldPersistHeaders !== false
+ showPersistHeadersSettings: shouldPersistHeaders !== false,
+ disableTabs: (_props$disableTabs = props.disableTabs) !== null && _props$disableTabs !== void 0 ? _props$disableTabs : false
}, props)));
}
@@ -72543,7 +73236,7 @@ function GraphiQLInterface(props) {
className: "graphiql-sessions"
}, /*#__PURE__*/_react.default.createElement("div", {
className: "graphiql-session-header"
- }, /*#__PURE__*/_react.default.createElement(_react2.Tabs, {
+ }, props.disableTabs ? null : /*#__PURE__*/_react.default.createElement(_react2.Tabs, {
values: editorContext.tabs,
onReorder: handleReorder,
"aria-label": "Select active operation"
@@ -83529,16 +84222,6 @@ function _extends() {
}
module.exports = _extends, module.exports.__esModule = true, module.exports["default"] = module.exports;
-/***/ }),
-
-/***/ "../../../node_modules/entities/lib/maps/entities.json":
-/*!*************************************************************!*\
- !*** ../../../node_modules/entities/lib/maps/entities.json ***!
- \*************************************************************/
-/***/ (function(module) {
-
-module.exports = JSON.parse('{"Aacute":"Á","aacute":"á","Abreve":"Ă","abreve":"ă","ac":"∾","acd":"∿","acE":"∾̳","Acirc":"Â","acirc":"â","acute":"´","Acy":"А","acy":"а","AElig":"Æ","aelig":"æ","af":"","Afr":"𝔄","afr":"𝔞","Agrave":"À","agrave":"à","alefsym":"ℵ","aleph":"ℵ","Alpha":"Α","alpha":"α","Amacr":"Ā","amacr":"ā","amalg":"⨿","amp":"&","AMP":"&","andand":"⩕","And":"⩓","and":"∧","andd":"⩜","andslope":"⩘","andv":"⩚","ang":"∠","ange":"⦤","angle":"∠","angmsdaa":"⦨","angmsdab":"⦩","angmsdac":"⦪","angmsdad":"⦫","angmsdae":"⦬","angmsdaf":"⦭","angmsdag":"⦮","angmsdah":"⦯","angmsd":"∡","angrt":"∟","angrtvb":"⊾","angrtvbd":"⦝","angsph":"∢","angst":"Å","angzarr":"⍼","Aogon":"Ą","aogon":"ą","Aopf":"𝔸","aopf":"𝕒","apacir":"⩯","ap":"≈","apE":"⩰","ape":"≊","apid":"≋","apos":"\'","ApplyFunction":"","approx":"≈","approxeq":"≊","Aring":"Å","aring":"å","Ascr":"𝒜","ascr":"𝒶","Assign":"≔","ast":"*","asymp":"≈","asympeq":"≍","Atilde":"Ã","atilde":"ã","Auml":"Ä","auml":"ä","awconint":"∳","awint":"⨑","backcong":"≌","backepsilon":"϶","backprime":"‵","backsim":"∽","backsimeq":"⋍","Backslash":"∖","Barv":"⫧","barvee":"⊽","barwed":"⌅","Barwed":"⌆","barwedge":"⌅","bbrk":"⎵","bbrktbrk":"⎶","bcong":"≌","Bcy":"Б","bcy":"б","bdquo":"„","becaus":"∵","because":"∵","Because":"∵","bemptyv":"⦰","bepsi":"϶","bernou":"ℬ","Bernoullis":"ℬ","Beta":"Β","beta":"β","beth":"ℶ","between":"≬","Bfr":"𝔅","bfr":"𝔟","bigcap":"⋂","bigcirc":"◯","bigcup":"⋃","bigodot":"⨀","bigoplus":"⨁","bigotimes":"⨂","bigsqcup":"⨆","bigstar":"★","bigtriangledown":"▽","bigtriangleup":"△","biguplus":"⨄","bigvee":"⋁","bigwedge":"⋀","bkarow":"⤍","blacklozenge":"⧫","blacksquare":"▪","blacktriangle":"▴","blacktriangledown":"▾","blacktriangleleft":"◂","blacktriangleright":"▸","blank":"␣","blk12":"▒","blk14":"░","blk34":"▓","block":"█","bne":"=⃥","bnequiv":"≡⃥","bNot":"⫭","bnot":"⌐","Bopf":"𝔹","bopf":"𝕓","bot":"⊥","bottom":"⊥","bowtie":"⋈","boxbox":"⧉","boxdl":"┐","boxdL":"╕","boxDl":"╖","boxDL":"╗","boxdr":"┌","boxdR":"╒","boxDr":"╓","boxDR":"╔","boxh":"─","boxH":"═","boxhd":"┬","boxHd":"╤","boxhD":"╥","boxHD":"╦","boxhu":"┴","boxHu":"╧","boxhU":"╨","boxHU":"╩","boxminus":"⊟","boxplus":"⊞","boxtimes":"⊠","boxul":"┘","boxuL":"╛","boxUl":"╜","boxUL":"╝","boxur":"└","boxuR":"╘","boxUr":"╙","boxUR":"╚","boxv":"│","boxV":"║","boxvh":"┼","boxvH":"╪","boxVh":"╫","boxVH":"╬","boxvl":"┤","boxvL":"╡","boxVl":"╢","boxVL":"╣","boxvr":"├","boxvR":"╞","boxVr":"╟","boxVR":"╠","bprime":"‵","breve":"˘","Breve":"˘","brvbar":"¦","bscr":"𝒷","Bscr":"ℬ","bsemi":"⁏","bsim":"∽","bsime":"⋍","bsolb":"⧅","bsol":"\\\\","bsolhsub":"⟈","bull":"•","bullet":"•","bump":"≎","bumpE":"⪮","bumpe":"≏","Bumpeq":"≎","bumpeq":"≏","Cacute":"Ć","cacute":"ć","capand":"⩄","capbrcup":"⩉","capcap":"⩋","cap":"∩","Cap":"⋒","capcup":"⩇","capdot":"⩀","CapitalDifferentialD":"ⅅ","caps":"∩︀","caret":"⁁","caron":"ˇ","Cayleys":"ℭ","ccaps":"⩍","Ccaron":"Č","ccaron":"č","Ccedil":"Ç","ccedil":"ç","Ccirc":"Ĉ","ccirc":"ĉ","Cconint":"∰","ccups":"⩌","ccupssm":"⩐","Cdot":"Ċ","cdot":"ċ","cedil":"¸","Cedilla":"¸","cemptyv":"⦲","cent":"¢","centerdot":"·","CenterDot":"·","cfr":"𝔠","Cfr":"ℭ","CHcy":"Ч","chcy":"ч","check":"✓","checkmark":"✓","Chi":"Χ","chi":"χ","circ":"ˆ","circeq":"≗","circlearrowleft":"↺","circlearrowright":"↻","circledast":"⊛","circledcirc":"⊚","circleddash":"⊝","CircleDot":"⊙","circledR":"®","circledS":"Ⓢ","CircleMinus":"⊖","CirclePlus":"⊕","CircleTimes":"⊗","cir":"○","cirE":"⧃","cire":"≗","cirfnint":"⨐","cirmid":"⫯","cirscir":"⧂","ClockwiseContourIntegral":"∲","CloseCurlyDoubleQuote":"”","CloseCurlyQuote":"’","clubs":"♣","clubsuit":"♣","colon":":","Colon":"∷","Colone":"⩴","colone":"≔","coloneq":"≔","comma":",","commat":"@","comp":"∁","compfn":"∘","complement":"∁","complexes":"ℂ","cong":"≅","congdot":"⩭","Congruent":"≡","conint":"∮","Conint":"∯","ContourIntegral":"∮","copf":"𝕔","Copf":"ℂ","coprod":"∐","Coproduct":"∐","copy":"©","COPY":"©","copysr":"℗","CounterClockwiseContourIntegral":"∳","crarr":"↵","cross":"✗","Cross":"⨯","Cscr":"𝒞","cscr":"𝒸","csub":"⫏","csube":"⫑","csup":"⫐","csupe":"⫒","ctdot":"⋯","cudarrl":"⤸","cudarrr":"⤵","cuepr":"⋞","cuesc":"⋟","cularr":"↶","cularrp":"⤽","cupbrcap":"⩈","cupcap":"⩆","CupCap":"≍","cup":"∪","Cup":"⋓","cupcup":"⩊","cupdot":"⊍","cupor":"⩅","cups":"∪︀","curarr":"↷","curarrm":"⤼","curlyeqprec":"⋞","curlyeqsucc":"⋟","curlyvee":"⋎","curlywedge":"⋏","curren":"¤","curvearrowleft":"↶","curvearrowright":"↷","cuvee":"⋎","cuwed":"⋏","cwconint":"∲","cwint":"∱","cylcty":"⌭","dagger":"†","Dagger":"‡","daleth":"ℸ","darr":"↓","Darr":"↡","dArr":"⇓","dash":"‐","Dashv":"⫤","dashv":"⊣","dbkarow":"⤏","dblac":"˝","Dcaron":"Ď","dcaron":"ď","Dcy":"Д","dcy":"д","ddagger":"‡","ddarr":"⇊","DD":"ⅅ","dd":"ⅆ","DDotrahd":"⤑","ddotseq":"⩷","deg":"°","Del":"∇","Delta":"Δ","delta":"δ","demptyv":"⦱","dfisht":"⥿","Dfr":"𝔇","dfr":"𝔡","dHar":"⥥","dharl":"⇃","dharr":"⇂","DiacriticalAcute":"´","DiacriticalDot":"˙","DiacriticalDoubleAcute":"˝","DiacriticalGrave":"`","DiacriticalTilde":"˜","diam":"⋄","diamond":"⋄","Diamond":"⋄","diamondsuit":"♦","diams":"♦","die":"¨","DifferentialD":"ⅆ","digamma":"ϝ","disin":"⋲","div":"÷","divide":"÷","divideontimes":"⋇","divonx":"⋇","DJcy":"Ђ","djcy":"ђ","dlcorn":"⌞","dlcrop":"⌍","dollar":"$","Dopf":"𝔻","dopf":"𝕕","Dot":"¨","dot":"˙","DotDot":"⃜","doteq":"≐","doteqdot":"≑","DotEqual":"≐","dotminus":"∸","dotplus":"∔","dotsquare":"⊡","doublebarwedge":"⌆","DoubleContourIntegral":"∯","DoubleDot":"¨","DoubleDownArrow":"⇓","DoubleLeftArrow":"⇐","DoubleLeftRightArrow":"⇔","DoubleLeftTee":"⫤","DoubleLongLeftArrow":"⟸","DoubleLongLeftRightArrow":"⟺","DoubleLongRightArrow":"⟹","DoubleRightArrow":"⇒","DoubleRightTee":"⊨","DoubleUpArrow":"⇑","DoubleUpDownArrow":"⇕","DoubleVerticalBar":"∥","DownArrowBar":"⤓","downarrow":"↓","DownArrow":"↓","Downarrow":"⇓","DownArrowUpArrow":"⇵","DownBreve":"̑","downdownarrows":"⇊","downharpoonleft":"⇃","downharpoonright":"⇂","DownLeftRightVector":"⥐","DownLeftTeeVector":"⥞","DownLeftVectorBar":"⥖","DownLeftVector":"↽","DownRightTeeVector":"⥟","DownRightVectorBar":"⥗","DownRightVector":"⇁","DownTeeArrow":"↧","DownTee":"⊤","drbkarow":"⤐","drcorn":"⌟","drcrop":"⌌","Dscr":"𝒟","dscr":"𝒹","DScy":"Ѕ","dscy":"ѕ","dsol":"⧶","Dstrok":"Đ","dstrok":"đ","dtdot":"⋱","dtri":"▿","dtrif":"▾","duarr":"⇵","duhar":"⥯","dwangle":"⦦","DZcy":"Џ","dzcy":"џ","dzigrarr":"⟿","Eacute":"É","eacute":"é","easter":"⩮","Ecaron":"Ě","ecaron":"ě","Ecirc":"Ê","ecirc":"ê","ecir":"≖","ecolon":"≕","Ecy":"Э","ecy":"э","eDDot":"⩷","Edot":"Ė","edot":"ė","eDot":"≑","ee":"ⅇ","efDot":"≒","Efr":"𝔈","efr":"𝔢","eg":"⪚","Egrave":"È","egrave":"è","egs":"⪖","egsdot":"⪘","el":"⪙","Element":"∈","elinters":"⏧","ell":"ℓ","els":"⪕","elsdot":"⪗","Emacr":"Ē","emacr":"ē","empty":"∅","emptyset":"∅","EmptySmallSquare":"◻","emptyv":"∅","EmptyVerySmallSquare":"▫","emsp13":" ","emsp14":" ","emsp":" ","ENG":"Ŋ","eng":"ŋ","ensp":" ","Eogon":"Ę","eogon":"ę","Eopf":"𝔼","eopf":"𝕖","epar":"⋕","eparsl":"⧣","eplus":"⩱","epsi":"ε","Epsilon":"Ε","epsilon":"ε","epsiv":"ϵ","eqcirc":"≖","eqcolon":"≕","eqsim":"≂","eqslantgtr":"⪖","eqslantless":"⪕","Equal":"⩵","equals":"=","EqualTilde":"≂","equest":"≟","Equilibrium":"⇌","equiv":"≡","equivDD":"⩸","eqvparsl":"⧥","erarr":"⥱","erDot":"≓","escr":"ℯ","Escr":"ℰ","esdot":"≐","Esim":"⩳","esim":"≂","Eta":"Η","eta":"η","ETH":"Ð","eth":"ð","Euml":"Ë","euml":"ë","euro":"€","excl":"!","exist":"∃","Exists":"∃","expectation":"ℰ","exponentiale":"ⅇ","ExponentialE":"ⅇ","fallingdotseq":"≒","Fcy":"Ф","fcy":"ф","female":"♀","ffilig":"ffi","fflig":"ff","ffllig":"ffl","Ffr":"𝔉","ffr":"𝔣","filig":"fi","FilledSmallSquare":"◼","FilledVerySmallSquare":"▪","fjlig":"fj","flat":"♭","fllig":"fl","fltns":"▱","fnof":"ƒ","Fopf":"𝔽","fopf":"𝕗","forall":"∀","ForAll":"∀","fork":"⋔","forkv":"⫙","Fouriertrf":"ℱ","fpartint":"⨍","frac12":"½","frac13":"⅓","frac14":"¼","frac15":"⅕","frac16":"⅙","frac18":"⅛","frac23":"⅔","frac25":"⅖","frac34":"¾","frac35":"⅗","frac38":"⅜","frac45":"⅘","frac56":"⅚","frac58":"⅝","frac78":"⅞","frasl":"⁄","frown":"⌢","fscr":"𝒻","Fscr":"ℱ","gacute":"ǵ","Gamma":"Γ","gamma":"γ","Gammad":"Ϝ","gammad":"ϝ","gap":"⪆","Gbreve":"Ğ","gbreve":"ğ","Gcedil":"Ģ","Gcirc":"Ĝ","gcirc":"ĝ","Gcy":"Г","gcy":"г","Gdot":"Ġ","gdot":"ġ","ge":"≥","gE":"≧","gEl":"⪌","gel":"⋛","geq":"≥","geqq":"≧","geqslant":"⩾","gescc":"⪩","ges":"⩾","gesdot":"⪀","gesdoto":"⪂","gesdotol":"⪄","gesl":"⋛︀","gesles":"⪔","Gfr":"𝔊","gfr":"𝔤","gg":"≫","Gg":"⋙","ggg":"⋙","gimel":"ℷ","GJcy":"Ѓ","gjcy":"ѓ","gla":"⪥","gl":"≷","glE":"⪒","glj":"⪤","gnap":"⪊","gnapprox":"⪊","gne":"⪈","gnE":"≩","gneq":"⪈","gneqq":"≩","gnsim":"⋧","Gopf":"𝔾","gopf":"𝕘","grave":"`","GreaterEqual":"≥","GreaterEqualLess":"⋛","GreaterFullEqual":"≧","GreaterGreater":"⪢","GreaterLess":"≷","GreaterSlantEqual":"⩾","GreaterTilde":"≳","Gscr":"𝒢","gscr":"ℊ","gsim":"≳","gsime":"⪎","gsiml":"⪐","gtcc":"⪧","gtcir":"⩺","gt":">","GT":">","Gt":"≫","gtdot":"⋗","gtlPar":"⦕","gtquest":"⩼","gtrapprox":"⪆","gtrarr":"⥸","gtrdot":"⋗","gtreqless":"⋛","gtreqqless":"⪌","gtrless":"≷","gtrsim":"≳","gvertneqq":"≩︀","gvnE":"≩︀","Hacek":"ˇ","hairsp":" ","half":"½","hamilt":"ℋ","HARDcy":"Ъ","hardcy":"ъ","harrcir":"⥈","harr":"↔","hArr":"⇔","harrw":"↭","Hat":"^","hbar":"ℏ","Hcirc":"Ĥ","hcirc":"ĥ","hearts":"♥","heartsuit":"♥","hellip":"…","hercon":"⊹","hfr":"𝔥","Hfr":"ℌ","HilbertSpace":"ℋ","hksearow":"⤥","hkswarow":"⤦","hoarr":"⇿","homtht":"∻","hookleftarrow":"↩","hookrightarrow":"↪","hopf":"𝕙","Hopf":"ℍ","horbar":"―","HorizontalLine":"─","hscr":"𝒽","Hscr":"ℋ","hslash":"ℏ","Hstrok":"Ħ","hstrok":"ħ","HumpDownHump":"≎","HumpEqual":"≏","hybull":"⁃","hyphen":"‐","Iacute":"Í","iacute":"í","ic":"","Icirc":"Î","icirc":"î","Icy":"И","icy":"и","Idot":"İ","IEcy":"Е","iecy":"е","iexcl":"¡","iff":"⇔","ifr":"𝔦","Ifr":"ℑ","Igrave":"Ì","igrave":"ì","ii":"ⅈ","iiiint":"⨌","iiint":"∭","iinfin":"⧜","iiota":"℩","IJlig":"IJ","ijlig":"ij","Imacr":"Ī","imacr":"ī","image":"ℑ","ImaginaryI":"ⅈ","imagline":"ℐ","imagpart":"ℑ","imath":"ı","Im":"ℑ","imof":"⊷","imped":"Ƶ","Implies":"⇒","incare":"℅","in":"∈","infin":"∞","infintie":"⧝","inodot":"ı","intcal":"⊺","int":"∫","Int":"∬","integers":"ℤ","Integral":"∫","intercal":"⊺","Intersection":"⋂","intlarhk":"⨗","intprod":"⨼","InvisibleComma":"","InvisibleTimes":"","IOcy":"Ё","iocy":"ё","Iogon":"Į","iogon":"į","Iopf":"𝕀","iopf":"𝕚","Iota":"Ι","iota":"ι","iprod":"⨼","iquest":"¿","iscr":"𝒾","Iscr":"ℐ","isin":"∈","isindot":"⋵","isinE":"⋹","isins":"⋴","isinsv":"⋳","isinv":"∈","it":"","Itilde":"Ĩ","itilde":"ĩ","Iukcy":"І","iukcy":"і","Iuml":"Ï","iuml":"ï","Jcirc":"Ĵ","jcirc":"ĵ","Jcy":"Й","jcy":"й","Jfr":"𝔍","jfr":"𝔧","jmath":"ȷ","Jopf":"𝕁","jopf":"𝕛","Jscr":"𝒥","jscr":"𝒿","Jsercy":"Ј","jsercy":"ј","Jukcy":"Є","jukcy":"є","Kappa":"Κ","kappa":"κ","kappav":"ϰ","Kcedil":"Ķ","kcedil":"ķ","Kcy":"К","kcy":"к","Kfr":"𝔎","kfr":"𝔨","kgreen":"ĸ","KHcy":"Х","khcy":"х","KJcy":"Ќ","kjcy":"ќ","Kopf":"𝕂","kopf":"𝕜","Kscr":"𝒦","kscr":"𝓀","lAarr":"⇚","Lacute":"Ĺ","lacute":"ĺ","laemptyv":"⦴","lagran":"ℒ","Lambda":"Λ","lambda":"λ","lang":"⟨","Lang":"⟪","langd":"⦑","langle":"⟨","lap":"⪅","Laplacetrf":"ℒ","laquo":"«","larrb":"⇤","larrbfs":"⤟","larr":"←","Larr":"↞","lArr":"⇐","larrfs":"⤝","larrhk":"↩","larrlp":"↫","larrpl":"⤹","larrsim":"⥳","larrtl":"↢","latail":"⤙","lAtail":"⤛","lat":"⪫","late":"⪭","lates":"⪭︀","lbarr":"⤌","lBarr":"⤎","lbbrk":"❲","lbrace":"{","lbrack":"[","lbrke":"⦋","lbrksld":"⦏","lbrkslu":"⦍","Lcaron":"Ľ","lcaron":"ľ","Lcedil":"Ļ","lcedil":"ļ","lceil":"⌈","lcub":"{","Lcy":"Л","lcy":"л","ldca":"⤶","ldquo":"“","ldquor":"„","ldrdhar":"⥧","ldrushar":"⥋","ldsh":"↲","le":"≤","lE":"≦","LeftAngleBracket":"⟨","LeftArrowBar":"⇤","leftarrow":"←","LeftArrow":"←","Leftarrow":"⇐","LeftArrowRightArrow":"⇆","leftarrowtail":"↢","LeftCeiling":"⌈","LeftDoubleBracket":"⟦","LeftDownTeeVector":"⥡","LeftDownVectorBar":"⥙","LeftDownVector":"⇃","LeftFloor":"⌊","leftharpoondown":"↽","leftharpoonup":"↼","leftleftarrows":"⇇","leftrightarrow":"↔","LeftRightArrow":"↔","Leftrightarrow":"⇔","leftrightarrows":"⇆","leftrightharpoons":"⇋","leftrightsquigarrow":"↭","LeftRightVector":"⥎","LeftTeeArrow":"↤","LeftTee":"⊣","LeftTeeVector":"⥚","leftthreetimes":"⋋","LeftTriangleBar":"⧏","LeftTriangle":"⊲","LeftTriangleEqual":"⊴","LeftUpDownVector":"⥑","LeftUpTeeVector":"⥠","LeftUpVectorBar":"⥘","LeftUpVector":"↿","LeftVectorBar":"⥒","LeftVector":"↼","lEg":"⪋","leg":"⋚","leq":"≤","leqq":"≦","leqslant":"⩽","lescc":"⪨","les":"⩽","lesdot":"⩿","lesdoto":"⪁","lesdotor":"⪃","lesg":"⋚︀","lesges":"⪓","lessapprox":"⪅","lessdot":"⋖","lesseqgtr":"⋚","lesseqqgtr":"⪋","LessEqualGreater":"⋚","LessFullEqual":"≦","LessGreater":"≶","lessgtr":"≶","LessLess":"⪡","lesssim":"≲","LessSlantEqual":"⩽","LessTilde":"≲","lfisht":"⥼","lfloor":"⌊","Lfr":"𝔏","lfr":"𝔩","lg":"≶","lgE":"⪑","lHar":"⥢","lhard":"↽","lharu":"↼","lharul":"⥪","lhblk":"▄","LJcy":"Љ","ljcy":"љ","llarr":"⇇","ll":"≪","Ll":"⋘","llcorner":"⌞","Lleftarrow":"⇚","llhard":"⥫","lltri":"◺","Lmidot":"Ŀ","lmidot":"ŀ","lmoustache":"⎰","lmoust":"⎰","lnap":"⪉","lnapprox":"⪉","lne":"⪇","lnE":"≨","lneq":"⪇","lneqq":"≨","lnsim":"⋦","loang":"⟬","loarr":"⇽","lobrk":"⟦","longleftarrow":"⟵","LongLeftArrow":"⟵","Longleftarrow":"⟸","longleftrightarrow":"⟷","LongLeftRightArrow":"⟷","Longleftrightarrow":"⟺","longmapsto":"⟼","longrightarrow":"⟶","LongRightArrow":"⟶","Longrightarrow":"⟹","looparrowleft":"↫","looparrowright":"↬","lopar":"⦅","Lopf":"𝕃","lopf":"𝕝","loplus":"⨭","lotimes":"⨴","lowast":"∗","lowbar":"_","LowerLeftArrow":"↙","LowerRightArrow":"↘","loz":"◊","lozenge":"◊","lozf":"⧫","lpar":"(","lparlt":"⦓","lrarr":"⇆","lrcorner":"⌟","lrhar":"⇋","lrhard":"⥭","lrm":"","lrtri":"⊿","lsaquo":"‹","lscr":"𝓁","Lscr":"ℒ","lsh":"↰","Lsh":"↰","lsim":"≲","lsime":"⪍","lsimg":"⪏","lsqb":"[","lsquo":"‘","lsquor":"‚","Lstrok":"Ł","lstrok":"ł","ltcc":"⪦","ltcir":"⩹","lt":"<","LT":"<","Lt":"≪","ltdot":"⋖","lthree":"⋋","ltimes":"⋉","ltlarr":"⥶","ltquest":"⩻","ltri":"◃","ltrie":"⊴","ltrif":"◂","ltrPar":"⦖","lurdshar":"⥊","luruhar":"⥦","lvertneqq":"≨︀","lvnE":"≨︀","macr":"¯","male":"♂","malt":"✠","maltese":"✠","Map":"⤅","map":"↦","mapsto":"↦","mapstodown":"↧","mapstoleft":"↤","mapstoup":"↥","marker":"▮","mcomma":"⨩","Mcy":"М","mcy":"м","mdash":"—","mDDot":"∺","measuredangle":"∡","MediumSpace":" ","Mellintrf":"ℳ","Mfr":"𝔐","mfr":"𝔪","mho":"℧","micro":"µ","midast":"*","midcir":"⫰","mid":"∣","middot":"·","minusb":"⊟","minus":"−","minusd":"∸","minusdu":"⨪","MinusPlus":"∓","mlcp":"⫛","mldr":"…","mnplus":"∓","models":"⊧","Mopf":"𝕄","mopf":"𝕞","mp":"∓","mscr":"𝓂","Mscr":"ℳ","mstpos":"∾","Mu":"Μ","mu":"μ","multimap":"⊸","mumap":"⊸","nabla":"∇","Nacute":"Ń","nacute":"ń","nang":"∠⃒","nap":"≉","napE":"⩰̸","napid":"≋̸","napos":"ʼn","napprox":"≉","natural":"♮","naturals":"ℕ","natur":"♮","nbsp":" ","nbump":"≎̸","nbumpe":"≏̸","ncap":"⩃","Ncaron":"Ň","ncaron":"ň","Ncedil":"Ņ","ncedil":"ņ","ncong":"≇","ncongdot":"⩭̸","ncup":"⩂","Ncy":"Н","ncy":"н","ndash":"–","nearhk":"⤤","nearr":"↗","neArr":"⇗","nearrow":"↗","ne":"≠","nedot":"≐̸","NegativeMediumSpace":"","NegativeThickSpace":"","NegativeThinSpace":"","NegativeVeryThinSpace":"","nequiv":"≢","nesear":"⤨","nesim":"≂̸","NestedGreaterGreater":"≫","NestedLessLess":"≪","NewLine":"\\n","nexist":"∄","nexists":"∄","Nfr":"𝔑","nfr":"𝔫","ngE":"≧̸","nge":"≱","ngeq":"≱","ngeqq":"≧̸","ngeqslant":"⩾̸","nges":"⩾̸","nGg":"⋙̸","ngsim":"≵","nGt":"≫⃒","ngt":"≯","ngtr":"≯","nGtv":"≫̸","nharr":"↮","nhArr":"⇎","nhpar":"⫲","ni":"∋","nis":"⋼","nisd":"⋺","niv":"∋","NJcy":"Њ","njcy":"њ","nlarr":"↚","nlArr":"⇍","nldr":"‥","nlE":"≦̸","nle":"≰","nleftarrow":"↚","nLeftarrow":"⇍","nleftrightarrow":"↮","nLeftrightarrow":"⇎","nleq":"≰","nleqq":"≦̸","nleqslant":"⩽̸","nles":"⩽̸","nless":"≮","nLl":"⋘̸","nlsim":"≴","nLt":"≪⃒","nlt":"≮","nltri":"⋪","nltrie":"⋬","nLtv":"≪̸","nmid":"∤","NoBreak":"","NonBreakingSpace":" ","nopf":"𝕟","Nopf":"ℕ","Not":"⫬","not":"¬","NotCongruent":"≢","NotCupCap":"≭","NotDoubleVerticalBar":"∦","NotElement":"∉","NotEqual":"≠","NotEqualTilde":"≂̸","NotExists":"∄","NotGreater":"≯","NotGreaterEqual":"≱","NotGreaterFullEqual":"≧̸","NotGreaterGreater":"≫̸","NotGreaterLess":"≹","NotGreaterSlantEqual":"⩾̸","NotGreaterTilde":"≵","NotHumpDownHump":"≎̸","NotHumpEqual":"≏̸","notin":"∉","notindot":"⋵̸","notinE":"⋹̸","notinva":"∉","notinvb":"⋷","notinvc":"⋶","NotLeftTriangleBar":"⧏̸","NotLeftTriangle":"⋪","NotLeftTriangleEqual":"⋬","NotLess":"≮","NotLessEqual":"≰","NotLessGreater":"≸","NotLessLess":"≪̸","NotLessSlantEqual":"⩽̸","NotLessTilde":"≴","NotNestedGreaterGreater":"⪢̸","NotNestedLessLess":"⪡̸","notni":"∌","notniva":"∌","notnivb":"⋾","notnivc":"⋽","NotPrecedes":"⊀","NotPrecedesEqual":"⪯̸","NotPrecedesSlantEqual":"⋠","NotReverseElement":"∌","NotRightTriangleBar":"⧐̸","NotRightTriangle":"⋫","NotRightTriangleEqual":"⋭","NotSquareSubset":"⊏̸","NotSquareSubsetEqual":"⋢","NotSquareSuperset":"⊐̸","NotSquareSupersetEqual":"⋣","NotSubset":"⊂⃒","NotSubsetEqual":"⊈","NotSucceeds":"⊁","NotSucceedsEqual":"⪰̸","NotSucceedsSlantEqual":"⋡","NotSucceedsTilde":"≿̸","NotSuperset":"⊃⃒","NotSupersetEqual":"⊉","NotTilde":"≁","NotTildeEqual":"≄","NotTildeFullEqual":"≇","NotTildeTilde":"≉","NotVerticalBar":"∤","nparallel":"∦","npar":"∦","nparsl":"⫽⃥","npart":"∂̸","npolint":"⨔","npr":"⊀","nprcue":"⋠","nprec":"⊀","npreceq":"⪯̸","npre":"⪯̸","nrarrc":"⤳̸","nrarr":"↛","nrArr":"⇏","nrarrw":"↝̸","nrightarrow":"↛","nRightarrow":"⇏","nrtri":"⋫","nrtrie":"⋭","nsc":"⊁","nsccue":"⋡","nsce":"⪰̸","Nscr":"𝒩","nscr":"𝓃","nshortmid":"∤","nshortparallel":"∦","nsim":"≁","nsime":"≄","nsimeq":"≄","nsmid":"∤","nspar":"∦","nsqsube":"⋢","nsqsupe":"⋣","nsub":"⊄","nsubE":"⫅̸","nsube":"⊈","nsubset":"⊂⃒","nsubseteq":"⊈","nsubseteqq":"⫅̸","nsucc":"⊁","nsucceq":"⪰̸","nsup":"⊅","nsupE":"⫆̸","nsupe":"⊉","nsupset":"⊃⃒","nsupseteq":"⊉","nsupseteqq":"⫆̸","ntgl":"≹","Ntilde":"Ñ","ntilde":"ñ","ntlg":"≸","ntriangleleft":"⋪","ntrianglelefteq":"⋬","ntriangleright":"⋫","ntrianglerighteq":"⋭","Nu":"Ν","nu":"ν","num":"#","numero":"№","numsp":" ","nvap":"≍⃒","nvdash":"⊬","nvDash":"⊭","nVdash":"⊮","nVDash":"⊯","nvge":"≥⃒","nvgt":">⃒","nvHarr":"⤄","nvinfin":"⧞","nvlArr":"⤂","nvle":"≤⃒","nvlt":"<⃒","nvltrie":"⊴⃒","nvrArr":"⤃","nvrtrie":"⊵⃒","nvsim":"∼⃒","nwarhk":"⤣","nwarr":"↖","nwArr":"⇖","nwarrow":"↖","nwnear":"⤧","Oacute":"Ó","oacute":"ó","oast":"⊛","Ocirc":"Ô","ocirc":"ô","ocir":"⊚","Ocy":"О","ocy":"о","odash":"⊝","Odblac":"Ő","odblac":"ő","odiv":"⨸","odot":"⊙","odsold":"⦼","OElig":"Œ","oelig":"œ","ofcir":"⦿","Ofr":"𝔒","ofr":"𝔬","ogon":"˛","Ograve":"Ò","ograve":"ò","ogt":"⧁","ohbar":"⦵","ohm":"Ω","oint":"∮","olarr":"↺","olcir":"⦾","olcross":"⦻","oline":"‾","olt":"⧀","Omacr":"Ō","omacr":"ō","Omega":"Ω","omega":"ω","Omicron":"Ο","omicron":"ο","omid":"⦶","ominus":"⊖","Oopf":"𝕆","oopf":"𝕠","opar":"⦷","OpenCurlyDoubleQuote":"“","OpenCurlyQuote":"‘","operp":"⦹","oplus":"⊕","orarr":"↻","Or":"⩔","or":"∨","ord":"⩝","order":"ℴ","orderof":"ℴ","ordf":"ª","ordm":"º","origof":"⊶","oror":"⩖","orslope":"⩗","orv":"⩛","oS":"Ⓢ","Oscr":"𝒪","oscr":"ℴ","Oslash":"Ø","oslash":"ø","osol":"⊘","Otilde":"Õ","otilde":"õ","otimesas":"⨶","Otimes":"⨷","otimes":"⊗","Ouml":"Ö","ouml":"ö","ovbar":"⌽","OverBar":"‾","OverBrace":"⏞","OverBracket":"⎴","OverParenthesis":"⏜","para":"¶","parallel":"∥","par":"∥","parsim":"⫳","parsl":"⫽","part":"∂","PartialD":"∂","Pcy":"П","pcy":"п","percnt":"%","period":".","permil":"‰","perp":"⊥","pertenk":"‱","Pfr":"𝔓","pfr":"𝔭","Phi":"Φ","phi":"φ","phiv":"ϕ","phmmat":"ℳ","phone":"☎","Pi":"Π","pi":"π","pitchfork":"⋔","piv":"ϖ","planck":"ℏ","planckh":"ℎ","plankv":"ℏ","plusacir":"⨣","plusb":"⊞","pluscir":"⨢","plus":"+","plusdo":"∔","plusdu":"⨥","pluse":"⩲","PlusMinus":"±","plusmn":"±","plussim":"⨦","plustwo":"⨧","pm":"±","Poincareplane":"ℌ","pointint":"⨕","popf":"𝕡","Popf":"ℙ","pound":"£","prap":"⪷","Pr":"⪻","pr":"≺","prcue":"≼","precapprox":"⪷","prec":"≺","preccurlyeq":"≼","Precedes":"≺","PrecedesEqual":"⪯","PrecedesSlantEqual":"≼","PrecedesTilde":"≾","preceq":"⪯","precnapprox":"⪹","precneqq":"⪵","precnsim":"⋨","pre":"⪯","prE":"⪳","precsim":"≾","prime":"′","Prime":"″","primes":"ℙ","prnap":"⪹","prnE":"⪵","prnsim":"⋨","prod":"∏","Product":"∏","profalar":"⌮","profline":"⌒","profsurf":"⌓","prop":"∝","Proportional":"∝","Proportion":"∷","propto":"∝","prsim":"≾","prurel":"⊰","Pscr":"𝒫","pscr":"𝓅","Psi":"Ψ","psi":"ψ","puncsp":" ","Qfr":"𝔔","qfr":"𝔮","qint":"⨌","qopf":"𝕢","Qopf":"ℚ","qprime":"⁗","Qscr":"𝒬","qscr":"𝓆","quaternions":"ℍ","quatint":"⨖","quest":"?","questeq":"≟","quot":"\\"","QUOT":"\\"","rAarr":"⇛","race":"∽̱","Racute":"Ŕ","racute":"ŕ","radic":"√","raemptyv":"⦳","rang":"⟩","Rang":"⟫","rangd":"⦒","range":"⦥","rangle":"⟩","raquo":"»","rarrap":"⥵","rarrb":"⇥","rarrbfs":"⤠","rarrc":"⤳","rarr":"→","Rarr":"↠","rArr":"⇒","rarrfs":"⤞","rarrhk":"↪","rarrlp":"↬","rarrpl":"⥅","rarrsim":"⥴","Rarrtl":"⤖","rarrtl":"↣","rarrw":"↝","ratail":"⤚","rAtail":"⤜","ratio":"∶","rationals":"ℚ","rbarr":"⤍","rBarr":"⤏","RBarr":"⤐","rbbrk":"❳","rbrace":"}","rbrack":"]","rbrke":"⦌","rbrksld":"⦎","rbrkslu":"⦐","Rcaron":"Ř","rcaron":"ř","Rcedil":"Ŗ","rcedil":"ŗ","rceil":"⌉","rcub":"}","Rcy":"Р","rcy":"р","rdca":"⤷","rdldhar":"⥩","rdquo":"”","rdquor":"”","rdsh":"↳","real":"ℜ","realine":"ℛ","realpart":"ℜ","reals":"ℝ","Re":"ℜ","rect":"▭","reg":"®","REG":"®","ReverseElement":"∋","ReverseEquilibrium":"⇋","ReverseUpEquilibrium":"⥯","rfisht":"⥽","rfloor":"⌋","rfr":"𝔯","Rfr":"ℜ","rHar":"⥤","rhard":"⇁","rharu":"⇀","rharul":"⥬","Rho":"Ρ","rho":"ρ","rhov":"ϱ","RightAngleBracket":"⟩","RightArrowBar":"⇥","rightarrow":"→","RightArrow":"→","Rightarrow":"⇒","RightArrowLeftArrow":"⇄","rightarrowtail":"↣","RightCeiling":"⌉","RightDoubleBracket":"⟧","RightDownTeeVector":"⥝","RightDownVectorBar":"⥕","RightDownVector":"⇂","RightFloor":"⌋","rightharpoondown":"⇁","rightharpoonup":"⇀","rightleftarrows":"⇄","rightleftharpoons":"⇌","rightrightarrows":"⇉","rightsquigarrow":"↝","RightTeeArrow":"↦","RightTee":"⊢","RightTeeVector":"⥛","rightthreetimes":"⋌","RightTriangleBar":"⧐","RightTriangle":"⊳","RightTriangleEqual":"⊵","RightUpDownVector":"⥏","RightUpTeeVector":"⥜","RightUpVectorBar":"⥔","RightUpVector":"↾","RightVectorBar":"⥓","RightVector":"⇀","ring":"˚","risingdotseq":"≓","rlarr":"⇄","rlhar":"⇌","rlm":"","rmoustache":"⎱","rmoust":"⎱","rnmid":"⫮","roang":"⟭","roarr":"⇾","robrk":"⟧","ropar":"⦆","ropf":"𝕣","Ropf":"ℝ","roplus":"⨮","rotimes":"⨵","RoundImplies":"⥰","rpar":")","rpargt":"⦔","rppolint":"⨒","rrarr":"⇉","Rrightarrow":"⇛","rsaquo":"›","rscr":"𝓇","Rscr":"ℛ","rsh":"↱","Rsh":"↱","rsqb":"]","rsquo":"’","rsquor":"’","rthree":"⋌","rtimes":"⋊","rtri":"▹","rtrie":"⊵","rtrif":"▸","rtriltri":"⧎","RuleDelayed":"⧴","ruluhar":"⥨","rx":"℞","Sacute":"Ś","sacute":"ś","sbquo":"‚","scap":"⪸","Scaron":"Š","scaron":"š","Sc":"⪼","sc":"≻","sccue":"≽","sce":"⪰","scE":"⪴","Scedil":"Ş","scedil":"ş","Scirc":"Ŝ","scirc":"ŝ","scnap":"⪺","scnE":"⪶","scnsim":"⋩","scpolint":"⨓","scsim":"≿","Scy":"С","scy":"с","sdotb":"⊡","sdot":"⋅","sdote":"⩦","searhk":"⤥","searr":"↘","seArr":"⇘","searrow":"↘","sect":"§","semi":";","seswar":"⤩","setminus":"∖","setmn":"∖","sext":"✶","Sfr":"𝔖","sfr":"𝔰","sfrown":"⌢","sharp":"♯","SHCHcy":"Щ","shchcy":"щ","SHcy":"Ш","shcy":"ш","ShortDownArrow":"↓","ShortLeftArrow":"←","shortmid":"∣","shortparallel":"∥","ShortRightArrow":"→","ShortUpArrow":"↑","shy":"","Sigma":"Σ","sigma":"σ","sigmaf":"ς","sigmav":"ς","sim":"∼","simdot":"⩪","sime":"≃","simeq":"≃","simg":"⪞","simgE":"⪠","siml":"⪝","simlE":"⪟","simne":"≆","simplus":"⨤","simrarr":"⥲","slarr":"←","SmallCircle":"∘","smallsetminus":"∖","smashp":"⨳","smeparsl":"⧤","smid":"∣","smile":"⌣","smt":"⪪","smte":"⪬","smtes":"⪬︀","SOFTcy":"Ь","softcy":"ь","solbar":"⌿","solb":"⧄","sol":"/","Sopf":"𝕊","sopf":"𝕤","spades":"♠","spadesuit":"♠","spar":"∥","sqcap":"⊓","sqcaps":"⊓︀","sqcup":"⊔","sqcups":"⊔︀","Sqrt":"√","sqsub":"⊏","sqsube":"⊑","sqsubset":"⊏","sqsubseteq":"⊑","sqsup":"⊐","sqsupe":"⊒","sqsupset":"⊐","sqsupseteq":"⊒","square":"□","Square":"□","SquareIntersection":"⊓","SquareSubset":"⊏","SquareSubsetEqual":"⊑","SquareSuperset":"⊐","SquareSupersetEqual":"⊒","SquareUnion":"⊔","squarf":"▪","squ":"□","squf":"▪","srarr":"→","Sscr":"𝒮","sscr":"𝓈","ssetmn":"∖","ssmile":"⌣","sstarf":"⋆","Star":"⋆","star":"☆","starf":"★","straightepsilon":"ϵ","straightphi":"ϕ","strns":"¯","sub":"⊂","Sub":"⋐","subdot":"⪽","subE":"⫅","sube":"⊆","subedot":"⫃","submult":"⫁","subnE":"⫋","subne":"⊊","subplus":"⪿","subrarr":"⥹","subset":"⊂","Subset":"⋐","subseteq":"⊆","subseteqq":"⫅","SubsetEqual":"⊆","subsetneq":"⊊","subsetneqq":"⫋","subsim":"⫇","subsub":"⫕","subsup":"⫓","succapprox":"⪸","succ":"≻","succcurlyeq":"≽","Succeeds":"≻","SucceedsEqual":"⪰","SucceedsSlantEqual":"≽","SucceedsTilde":"≿","succeq":"⪰","succnapprox":"⪺","succneqq":"⪶","succnsim":"⋩","succsim":"≿","SuchThat":"∋","sum":"∑","Sum":"∑","sung":"♪","sup1":"¹","sup2":"²","sup3":"³","sup":"⊃","Sup":"⋑","supdot":"⪾","supdsub":"⫘","supE":"⫆","supe":"⊇","supedot":"⫄","Superset":"⊃","SupersetEqual":"⊇","suphsol":"⟉","suphsub":"⫗","suplarr":"⥻","supmult":"⫂","supnE":"⫌","supne":"⊋","supplus":"⫀","supset":"⊃","Supset":"⋑","supseteq":"⊇","supseteqq":"⫆","supsetneq":"⊋","supsetneqq":"⫌","supsim":"⫈","supsub":"⫔","supsup":"⫖","swarhk":"⤦","swarr":"↙","swArr":"⇙","swarrow":"↙","swnwar":"⤪","szlig":"ß","Tab":"\\t","target":"⌖","Tau":"Τ","tau":"τ","tbrk":"⎴","Tcaron":"Ť","tcaron":"ť","Tcedil":"Ţ","tcedil":"ţ","Tcy":"Т","tcy":"т","tdot":"⃛","telrec":"⌕","Tfr":"𝔗","tfr":"𝔱","there4":"∴","therefore":"∴","Therefore":"∴","Theta":"Θ","theta":"θ","thetasym":"ϑ","thetav":"ϑ","thickapprox":"≈","thicksim":"∼","ThickSpace":" ","ThinSpace":" ","thinsp":" ","thkap":"≈","thksim":"∼","THORN":"Þ","thorn":"þ","tilde":"˜","Tilde":"∼","TildeEqual":"≃","TildeFullEqual":"≅","TildeTilde":"≈","timesbar":"⨱","timesb":"⊠","times":"×","timesd":"⨰","tint":"∭","toea":"⤨","topbot":"⌶","topcir":"⫱","top":"⊤","Topf":"𝕋","topf":"𝕥","topfork":"⫚","tosa":"⤩","tprime":"‴","trade":"™","TRADE":"™","triangle":"▵","triangledown":"▿","triangleleft":"◃","trianglelefteq":"⊴","triangleq":"≜","triangleright":"▹","trianglerighteq":"⊵","tridot":"◬","trie":"≜","triminus":"⨺","TripleDot":"⃛","triplus":"⨹","trisb":"⧍","tritime":"⨻","trpezium":"⏢","Tscr":"𝒯","tscr":"𝓉","TScy":"Ц","tscy":"ц","TSHcy":"Ћ","tshcy":"ћ","Tstrok":"Ŧ","tstrok":"ŧ","twixt":"≬","twoheadleftarrow":"↞","twoheadrightarrow":"↠","Uacute":"Ú","uacute":"ú","uarr":"↑","Uarr":"↟","uArr":"⇑","Uarrocir":"⥉","Ubrcy":"Ў","ubrcy":"ў","Ubreve":"Ŭ","ubreve":"ŭ","Ucirc":"Û","ucirc":"û","Ucy":"У","ucy":"у","udarr":"⇅","Udblac":"Ű","udblac":"ű","udhar":"⥮","ufisht":"⥾","Ufr":"𝔘","ufr":"𝔲","Ugrave":"Ù","ugrave":"ù","uHar":"⥣","uharl":"↿","uharr":"↾","uhblk":"▀","ulcorn":"⌜","ulcorner":"⌜","ulcrop":"⌏","ultri":"◸","Umacr":"Ū","umacr":"ū","uml":"¨","UnderBar":"_","UnderBrace":"⏟","UnderBracket":"⎵","UnderParenthesis":"⏝","Union":"⋃","UnionPlus":"⊎","Uogon":"Ų","uogon":"ų","Uopf":"𝕌","uopf":"𝕦","UpArrowBar":"⤒","uparrow":"↑","UpArrow":"↑","Uparrow":"⇑","UpArrowDownArrow":"⇅","updownarrow":"↕","UpDownArrow":"↕","Updownarrow":"⇕","UpEquilibrium":"⥮","upharpoonleft":"↿","upharpoonright":"↾","uplus":"⊎","UpperLeftArrow":"↖","UpperRightArrow":"↗","upsi":"υ","Upsi":"ϒ","upsih":"ϒ","Upsilon":"Υ","upsilon":"υ","UpTeeArrow":"↥","UpTee":"⊥","upuparrows":"⇈","urcorn":"⌝","urcorner":"⌝","urcrop":"⌎","Uring":"Ů","uring":"ů","urtri":"◹","Uscr":"𝒰","uscr":"𝓊","utdot":"⋰","Utilde":"Ũ","utilde":"ũ","utri":"▵","utrif":"▴","uuarr":"⇈","Uuml":"Ü","uuml":"ü","uwangle":"⦧","vangrt":"⦜","varepsilon":"ϵ","varkappa":"ϰ","varnothing":"∅","varphi":"ϕ","varpi":"ϖ","varpropto":"∝","varr":"↕","vArr":"⇕","varrho":"ϱ","varsigma":"ς","varsubsetneq":"⊊︀","varsubsetneqq":"⫋︀","varsupsetneq":"⊋︀","varsupsetneqq":"⫌︀","vartheta":"ϑ","vartriangleleft":"⊲","vartriangleright":"⊳","vBar":"⫨","Vbar":"⫫","vBarv":"⫩","Vcy":"В","vcy":"в","vdash":"⊢","vDash":"⊨","Vdash":"⊩","VDash":"⊫","Vdashl":"⫦","veebar":"⊻","vee":"∨","Vee":"⋁","veeeq":"≚","vellip":"⋮","verbar":"|","Verbar":"‖","vert":"|","Vert":"‖","VerticalBar":"∣","VerticalLine":"|","VerticalSeparator":"❘","VerticalTilde":"≀","VeryThinSpace":" ","Vfr":"𝔙","vfr":"𝔳","vltri":"⊲","vnsub":"⊂⃒","vnsup":"⊃⃒","Vopf":"𝕍","vopf":"𝕧","vprop":"∝","vrtri":"⊳","Vscr":"𝒱","vscr":"𝓋","vsubnE":"⫋︀","vsubne":"⊊︀","vsupnE":"⫌︀","vsupne":"⊋︀","Vvdash":"⊪","vzigzag":"⦚","Wcirc":"Ŵ","wcirc":"ŵ","wedbar":"⩟","wedge":"∧","Wedge":"⋀","wedgeq":"≙","weierp":"℘","Wfr":"𝔚","wfr":"𝔴","Wopf":"𝕎","wopf":"𝕨","wp":"℘","wr":"≀","wreath":"≀","Wscr":"𝒲","wscr":"𝓌","xcap":"⋂","xcirc":"◯","xcup":"⋃","xdtri":"▽","Xfr":"𝔛","xfr":"𝔵","xharr":"⟷","xhArr":"⟺","Xi":"Ξ","xi":"ξ","xlarr":"⟵","xlArr":"⟸","xmap":"⟼","xnis":"⋻","xodot":"⨀","Xopf":"𝕏","xopf":"𝕩","xoplus":"⨁","xotime":"⨂","xrarr":"⟶","xrArr":"⟹","Xscr":"𝒳","xscr":"𝓍","xsqcup":"⨆","xuplus":"⨄","xutri":"△","xvee":"⋁","xwedge":"⋀","Yacute":"Ý","yacute":"ý","YAcy":"Я","yacy":"я","Ycirc":"Ŷ","ycirc":"ŷ","Ycy":"Ы","ycy":"ы","yen":"¥","Yfr":"𝔜","yfr":"𝔶","YIcy":"Ї","yicy":"ї","Yopf":"𝕐","yopf":"𝕪","Yscr":"𝒴","yscr":"𝓎","YUcy":"Ю","yucy":"ю","yuml":"ÿ","Yuml":"Ÿ","Zacute":"Ź","zacute":"ź","Zcaron":"Ž","zcaron":"ž","Zcy":"З","zcy":"з","Zdot":"Ż","zdot":"ż","zeetrf":"ℨ","ZeroWidthSpace":"","Zeta":"Ζ","zeta":"ζ","zfr":"𝔷","Zfr":"ℨ","ZHcy":"Ж","zhcy":"ж","zigrarr":"⇝","zopf":"𝕫","Zopf":"ℤ","Zscr":"𝒵","zscr":"𝓏","zwj":"","zwnj":""}');
-
/***/ })
/******/ });
diff --git a/netbox/project-static/dist/graphiql/index.umd.js b/netbox/project-static/dist/graphiql/index.umd.js
index a30153f93..5089eab84 100644
--- a/netbox/project-static/dist/graphiql/index.umd.js
+++ b/netbox/project-static/dist/graphiql/index.umd.js
@@ -1 +1 @@
-(function(z,W){typeof exports=="object"&&typeof module<"u"?W(exports,require("@graphiql/react"),require("react"),require("graphql")):typeof define=="function"&&define.amd?define(["exports","@graphiql/react","react","graphql"],W):(z=typeof globalThis<"u"?globalThis:z||self,W(z.GraphiQLPluginExplorer={},z.GraphiQL.React,z.React,z.GraphiQL.GraphQL))})(this,function(z,W,N,De){"use strict";function ve(i){const t=Object.create(null,{[Symbol.toStringTag]:{value:"Module"}});if(i){for(const s in i)if(s!=="default"){const n=Object.getOwnPropertyDescriptor(i,s);Object.defineProperty(t,s,n.get?n:{enumerable:!0,get:()=>i[s]})}}return t.default=i,Object.freeze(t)}const Te=ve(N),Ne=ve(De);function ge(i){return i&&Object.prototype.hasOwnProperty.call(i,"default")&&Object.keys(i).length===1?i.default:i}var re={},ie={};const Le=ge(Te),Me=ge(Ne);Object.defineProperty(ie,"__esModule",{value:!0});var je=typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?function(i){return typeof i}:function(i){return i&&typeof Symbol=="function"&&i.constructor===Symbol&&i!==Symbol.prototype?"symbol":typeof i},ye=function(){function i(t,s){var n=[],e=!0,l=!1,c=void 0;try{for(var f=t[Symbol.iterator](),u;!(e=(u=f.next()).done)&&(n.push(u.value),!(s&&n.length===s));e=!0);}catch(r){l=!0,c=r}finally{try{!e&&f.return&&f.return()}finally{if(l)throw c}}return n}return function(t,s){if(Array.isArray(t))return t;if(Symbol.iterator in Object(t))return i(t,s);throw new TypeError("Invalid attempt to destructure non-iterable instance")}}(),k=Object.assign||function(i){for(var t=1;t DOM element.
- """
- if record.enabled:
- return 'enabled'
- else:
- return 'disabled'
-
-
-def get_interface_connected_attribute(record):
- """
- Get interface disconnected state as string to attach to DOM element.
- """
- if record.mark_connected or record.cable:
- return 'connected'
- else:
- return 'disconnected'
-
-
#
# Device roles
#
@@ -341,6 +305,10 @@ class ModularDeviceComponentTable(DeviceComponentTable):
verbose_name=_('Module'),
linkify=True
)
+ inventory_items = columns.ManyToManyColumn(
+ linkify_item=True,
+ verbose_name=_('Inventory Items'),
+ )
class CableTerminationTable(NetBoxTable):
@@ -363,6 +331,14 @@ class CableTerminationTable(NetBoxTable):
verbose_name=_('Mark Connected'),
)
+ class Meta:
+ row_attrs = {
+ 'data-name': lambda record: record.name,
+ 'data-mark-connected': lambda record: "true" if record.mark_connected else "false",
+ 'data-cable-status': lambda record: record.cable.status if record.cable else "",
+ 'data-type': lambda record: record.type
+ }
+
def value_link_peer(self, value):
return ', '.join([
f"{termination.parent_object} > {termination}" for termination in value
@@ -394,7 +370,7 @@ class ConsolePortTable(ModularDeviceComponentTable, PathEndpointTable):
model = models.ConsolePort
fields = (
'pk', 'id', 'name', 'device', 'module_bay', 'module', 'label', 'type', 'speed', 'description',
- 'mark_connected', 'cable', 'cable_color', 'link_peer', 'connection', 'tags', 'created', 'last_updated',
+ 'mark_connected', 'cable', 'cable_color', 'link_peer', 'connection', 'inventory_items', 'tags', 'created', 'last_updated',
)
default_columns = ('pk', 'name', 'device', 'label', 'type', 'speed', 'description')
@@ -410,16 +386,13 @@ class DeviceConsolePortTable(ConsolePortTable):
extra_buttons=CONSOLEPORT_BUTTONS
)
- class Meta(DeviceComponentTable.Meta):
+ class Meta(CableTerminationTable.Meta, DeviceComponentTable.Meta):
model = models.ConsolePort
fields = (
'pk', 'id', 'name', 'module_bay', 'module', 'label', 'type', 'speed', 'description', 'mark_connected',
'cable', 'cable_color', 'link_peer', 'connection', 'tags', 'actions'
)
default_columns = ('pk', 'name', 'label', 'type', 'speed', 'description', 'cable', 'connection')
- row_attrs = {
- 'class': get_cabletermination_row_class
- }
class ConsoleServerPortTable(ModularDeviceComponentTable, PathEndpointTable):
@@ -438,7 +411,7 @@ class ConsoleServerPortTable(ModularDeviceComponentTable, PathEndpointTable):
model = models.ConsoleServerPort
fields = (
'pk', 'id', 'name', 'device', 'module_bay', 'module', 'label', 'type', 'speed', 'description',
- 'mark_connected', 'cable', 'cable_color', 'link_peer', 'connection', 'tags', 'created', 'last_updated',
+ 'mark_connected', 'cable', 'cable_color', 'link_peer', 'connection', 'inventory_items', 'tags', 'created', 'last_updated',
)
default_columns = ('pk', 'name', 'device', 'label', 'type', 'speed', 'description')
@@ -455,16 +428,13 @@ class DeviceConsoleServerPortTable(ConsoleServerPortTable):
extra_buttons=CONSOLESERVERPORT_BUTTONS
)
- class Meta(DeviceComponentTable.Meta):
+ class Meta(CableTerminationTable.Meta, DeviceComponentTable.Meta):
model = models.ConsoleServerPort
fields = (
'pk', 'id', 'name', 'module_bay', 'module', 'label', 'type', 'speed', 'description', 'mark_connected',
'cable', 'cable_color', 'link_peer', 'connection', 'tags', 'actions',
)
default_columns = ('pk', 'name', 'label', 'type', 'speed', 'description', 'cable', 'connection')
- row_attrs = {
- 'class': get_cabletermination_row_class
- }
class PowerPortTable(ModularDeviceComponentTable, PathEndpointTable):
@@ -489,8 +459,8 @@ class PowerPortTable(ModularDeviceComponentTable, PathEndpointTable):
model = models.PowerPort
fields = (
'pk', 'id', 'name', 'device', 'module_bay', 'module', 'label', 'type', 'description', 'mark_connected',
- 'maximum_draw', 'allocated_draw', 'cable', 'cable_color', 'link_peer', 'connection', 'tags', 'created',
- 'last_updated',
+ 'maximum_draw', 'allocated_draw', 'cable', 'cable_color', 'link_peer', 'connection', 'inventory_items',
+ 'tags', 'created', 'last_updated',
)
default_columns = ('pk', 'name', 'device', 'label', 'type', 'maximum_draw', 'allocated_draw', 'description')
@@ -507,7 +477,7 @@ class DevicePowerPortTable(PowerPortTable):
extra_buttons=POWERPORT_BUTTONS
)
- class Meta(DeviceComponentTable.Meta):
+ class Meta(CableTerminationTable.Meta, DeviceComponentTable.Meta):
model = models.PowerPort
fields = (
'pk', 'id', 'name', 'module_bay', 'module', 'label', 'type', 'maximum_draw', 'allocated_draw',
@@ -516,9 +486,6 @@ class DevicePowerPortTable(PowerPortTable):
default_columns = (
'pk', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'description', 'cable', 'connection',
)
- row_attrs = {
- 'class': get_cabletermination_row_class
- }
class PowerOutletTable(ModularDeviceComponentTable, PathEndpointTable):
@@ -541,8 +508,8 @@ class PowerOutletTable(ModularDeviceComponentTable, PathEndpointTable):
model = models.PowerOutlet
fields = (
'pk', 'id', 'name', 'device', 'module_bay', 'module', 'label', 'type', 'description', 'power_port',
- 'feed_leg', 'mark_connected', 'cable', 'cable_color', 'link_peer', 'connection', 'tags', 'created',
- 'last_updated',
+ 'feed_leg', 'mark_connected', 'cable', 'cable_color', 'link_peer', 'connection', 'inventory_items',
+ 'tags', 'created', 'last_updated',
)
default_columns = ('pk', 'name', 'device', 'label', 'type', 'power_port', 'feed_leg', 'description')
@@ -558,7 +525,7 @@ class DevicePowerOutletTable(PowerOutletTable):
extra_buttons=POWEROUTLET_BUTTONS
)
- class Meta(DeviceComponentTable.Meta):
+ class Meta(CableTerminationTable.Meta, DeviceComponentTable.Meta):
model = models.PowerOutlet
fields = (
'pk', 'id', 'name', 'module_bay', 'module', 'label', 'type', 'power_port', 'feed_leg', 'description',
@@ -567,9 +534,6 @@ class DevicePowerOutletTable(PowerOutletTable):
default_columns = (
'pk', 'name', 'label', 'type', 'power_port', 'feed_leg', 'description', 'cable', 'connection',
)
- row_attrs = {
- 'class': get_cabletermination_row_class
- }
class BaseInterfaceTable(NetBoxTable):
@@ -646,10 +610,6 @@ class InterfaceTable(ModularDeviceComponentTable, BaseInterfaceTable, PathEndpoi
verbose_name=_('VRF'),
linkify=True
)
- inventory_items = tables.ManyToManyColumn(
- linkify_item=True,
- verbose_name=_('Inventory Items'),
- )
tags = columns.TagColumn(
url_name='dcim:interface_list'
)
@@ -706,11 +666,12 @@ class DeviceInterfaceTable(InterfaceTable):
'cable', 'connection',
)
row_attrs = {
- 'class': get_interface_row_class,
'data-name': lambda record: record.name,
- 'data-enabled': get_interface_state_attribute,
- 'data-type': lambda record: record.type,
- 'data-connected': get_interface_connected_attribute
+ 'data-enabled': lambda record: "enabled" if record.enabled else "disabled",
+ 'data-virtual': lambda record: "true" if record.is_virtual else "false",
+ 'data-mark-connected': lambda record: "true" if record.mark_connected else "false",
+ 'data-cable-status': lambda record: record.cable.status if record.cable else "",
+ 'data-type': lambda record: record.type
}
@@ -740,8 +701,8 @@ class FrontPortTable(ModularDeviceComponentTable, CableTerminationTable):
model = models.FrontPort
fields = (
'pk', 'id', 'name', 'device', 'module_bay', 'module', 'label', 'type', 'color', 'rear_port',
- 'rear_port_position', 'description', 'mark_connected', 'cable', 'cable_color', 'link_peer', 'tags',
- 'created', 'last_updated',
+ 'rear_port_position', 'description', 'mark_connected', 'cable', 'cable_color', 'link_peer',
+ 'inventory_items', 'tags', 'created', 'last_updated',
)
default_columns = (
'pk', 'name', 'device', 'label', 'type', 'color', 'rear_port', 'rear_port_position', 'description',
@@ -760,7 +721,7 @@ class DeviceFrontPortTable(FrontPortTable):
extra_buttons=FRONTPORT_BUTTONS
)
- class Meta(DeviceComponentTable.Meta):
+ class Meta(CableTerminationTable.Meta, DeviceComponentTable.Meta):
model = models.FrontPort
fields = (
'pk', 'id', 'name', 'module_bay', 'module', 'label', 'type', 'rear_port', 'rear_port_position',
@@ -769,9 +730,6 @@ class DeviceFrontPortTable(FrontPortTable):
default_columns = (
'pk', 'name', 'label', 'type', 'rear_port', 'rear_port_position', 'description', 'cable', 'link_peer',
)
- row_attrs = {
- 'class': get_cabletermination_row_class
- }
class RearPortTable(ModularDeviceComponentTable, CableTerminationTable):
@@ -793,7 +751,7 @@ class RearPortTable(ModularDeviceComponentTable, CableTerminationTable):
model = models.RearPort
fields = (
'pk', 'id', 'name', 'device', 'module_bay', 'module', 'label', 'type', 'color', 'positions', 'description',
- 'mark_connected', 'cable', 'cable_color', 'link_peer', 'tags', 'created', 'last_updated',
+ 'mark_connected', 'cable', 'cable_color', 'link_peer', 'inventory_items', 'tags', 'created', 'last_updated',
)
default_columns = ('pk', 'name', 'device', 'label', 'type', 'color', 'description')
@@ -810,7 +768,7 @@ class DeviceRearPortTable(RearPortTable):
extra_buttons=REARPORT_BUTTONS
)
- class Meta(DeviceComponentTable.Meta):
+ class Meta(CableTerminationTable.Meta, DeviceComponentTable.Meta):
model = models.RearPort
fields = (
'pk', 'id', 'name', 'module_bay', 'module', 'label', 'type', 'positions', 'description', 'mark_connected',
@@ -819,9 +777,6 @@ class DeviceRearPortTable(RearPortTable):
default_columns = (
'pk', 'name', 'label', 'type', 'positions', 'description', 'cable', 'link_peer',
)
- row_attrs = {
- 'class': get_cabletermination_row_class
- }
class DeviceBayTable(DeviceComponentTable):
diff --git a/netbox/dcim/tests/test_api.py b/netbox/dcim/tests/test_api.py
index 0a3931696..52b850b24 100644
--- a/netbox/dcim/tests/test_api.py
+++ b/netbox/dcim/tests/test_api.py
@@ -10,6 +10,7 @@ from dcim.models import *
from extras.models import ConfigTemplate
from ipam.models import ASN, RIR, VLAN, VRF
from netbox.api.serializers import GenericObjectSerializer
+from tenancy.models import Tenant
from utilities.testing import APITestCase, APIViewTestCases, create_test_device
from virtualization.models import Cluster, ClusterType
from wireless.choices import WirelessChannelChoices
@@ -152,6 +153,7 @@ class SiteTest(APIViewTestCases.APIViewTestCase):
Site.objects.bulk_create(sites)
rir = RIR.objects.create(name='RFC 6996', is_private=True)
+ tenant = Tenant.objects.create(name='Tenant 1', slug='tenant-1')
asns = [
ASN(asn=65000 + i, rir=rir) for i in range(8)
@@ -166,6 +168,7 @@ class SiteTest(APIViewTestCases.APIViewTestCase):
'group': groups[1].pk,
'status': SiteStatusChoices.STATUS_ACTIVE,
'asns': [asns[0].pk, asns[1].pk],
+ 'tenant': tenant.pk,
},
{
'name': 'Site 5',
@@ -230,7 +233,7 @@ class LocationTest(APIViewTestCases.APIViewTestCase):
'name': 'Test Location 6',
'slug': 'test-location-6',
'site': sites[1].pk,
- 'parent': parent_locations[1].pk,
+ # Omit parent to test uniqueness constraint
'status': LocationStatusChoices.STATUS_PLANNED,
},
]
@@ -2307,6 +2310,6 @@ class VirtualDeviceContextTest(APIViewTestCases.APIViewTestCase):
'device': devices[1].pk,
'status': 'active',
'name': 'VDC 3',
- 'identifier': 3,
+ # Omit identifier to test uniqueness constraint
},
]
diff --git a/netbox/dcim/tests/test_cablepaths.py b/netbox/dcim/tests/test_cablepaths.py
index 49a71022e..cd7b0e6d7 100644
--- a/netbox/dcim/tests/test_cablepaths.py
+++ b/netbox/dcim/tests/test_cablepaths.py
@@ -394,6 +394,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 2)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 2
cable2.delete()
path1 = self.assertPathExists(
@@ -450,6 +453,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 2)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 2
cable2.delete()
path1 = self.assertPathExists(
@@ -558,6 +564,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 4)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 3
cable3.delete()
@@ -673,6 +682,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 4)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 3
cable3.delete()
@@ -804,6 +816,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 4)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 3
cable3.delete()
@@ -931,6 +946,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 4)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 5
cable5.delete()
@@ -1034,6 +1052,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 4)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 3
cable3.delete()
@@ -1093,6 +1114,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 3)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 1
cable1.delete()
@@ -1135,6 +1159,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 1)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
def test_210_interface_to_circuittermination(self):
"""
[IF1] --C1-- [CT1]
@@ -1156,6 +1183,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 1)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 1
cable1.delete()
self.assertEqual(CablePath.objects.count(), 0)
@@ -1212,6 +1242,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 2)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 2
cable2.delete()
path1 = self.assertPathExists(
@@ -1277,6 +1310,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 2)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 2
cable2.delete()
path1 = self.assertPathExists(
@@ -1314,6 +1350,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 1)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 1
cable1.delete()
self.assertEqual(CablePath.objects.count(), 0)
@@ -1342,6 +1381,9 @@ class CablePathTestCase(TestCase):
self.assertEqual(CablePath.objects.count(), 1)
self.assertTrue(CablePath.objects.first().is_complete)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 1
cable1.delete()
self.assertEqual(CablePath.objects.count(), 0)
@@ -1439,6 +1481,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 4)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cables 3-4
cable3.delete()
cable4.delete()
@@ -1495,6 +1540,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 2)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 2
cable2.delete()
path1 = self.assertPathExists(
@@ -1578,6 +1626,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 2)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 2
cable2.delete()
@@ -1697,6 +1748,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 4)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
# Delete cable 3
cable3.delete()
@@ -1784,6 +1838,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 2)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
def test_220_interface_to_interface_duplex_via_multiple_front_and_rear_ports(self):
"""
[IF1] --C1-- [FP1] [RP1] --C2-- [RP2] [FP2] --C3-- [IF2]
@@ -1877,6 +1934,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 3)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
def test_221_non_symmetric_paths(self):
"""
[IF1] --C1-- [FP1] [RP1] --C2-- [RP2] [FP2] --C3-- -------------------------------------- [IF2]
@@ -1997,6 +2057,9 @@ class CablePathTestCase(TestCase):
)
self.assertEqual(CablePath.objects.count(), 3)
+ # Test SVG generation
+ CableTraceSVG(interface1).render()
+
def test_301_create_path_via_existing_cable(self):
"""
[IF1] --C1-- [FP1] [RP1] --C2-- [RP2] [FP2] --C3-- [IF2]
diff --git a/netbox/dcim/tests/test_filtersets.py b/netbox/dcim/tests/test_filtersets.py
index 96ea020b3..df0dc7c7e 100644
--- a/netbox/dcim/tests/test_filtersets.py
+++ b/netbox/dcim/tests/test_filtersets.py
@@ -50,9 +50,9 @@ class DeviceComponentTemplateFilterSetTests:
params = {'description': ['foobar1', 'foobar2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
- def test_devicetype_id(self):
+ def test_device_type_id(self):
device_types = DeviceType.objects.all()[:2]
- params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
+ params = {'device_type_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
@@ -1753,9 +1753,9 @@ class InventoryItemTemplateTestCase(TestCase, DeviceComponentTemplateFilterSetTe
params = {'name': ['Inventory Item 1', 'Inventory Item 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
- def test_devicetype_id(self):
+ def test_device_type_id(self):
device_types = DeviceType.objects.all()[:2]
- params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
+ params = {'device_type_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_label(self):
@@ -2103,6 +2103,9 @@ class DeviceTestCase(TestCase, ChangeLoggedFilterSetTests):
Device.objects.filter(pk=devices[0].pk).update(virtual_chassis=virtual_chassis, vc_position=1, vc_priority=1)
Device.objects.filter(pk=devices[1].pk).update(virtual_chassis=virtual_chassis, vc_position=2, vc_priority=2)
+ # VirtualDeviceContext assignment for filtering
+ VirtualDeviceContext.objects.create(device=devices[0], name="VDC 1", identifier=1, status='active')
+
def test_q(self):
params = {'q': 'foobar1'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
@@ -2336,6 +2339,12 @@ class DeviceTestCase(TestCase, ChangeLoggedFilterSetTests):
params = {'tenant_group': [tenant_groups[0].slug, tenant_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
+ def test_has_virtual_device_context(self):
+ params = {'has_virtual_device_context': 'true'}
+ self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
+ params = {'has_virtual_device_context': 'false'}
+ self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
+
class ModuleTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = Module.objects.all()
diff --git a/netbox/dcim/views.py b/netbox/dcim/views.py
index 120bbcb59..670995231 100644
--- a/netbox/dcim/views.py
+++ b/netbox/dcim/views.py
@@ -28,7 +28,9 @@ from utilities.permissions import get_permission_for_model
from utilities.query import count_related
from utilities.query_functions import CollateAsChar
from utilities.views import GetReturnURLMixin, ObjectPermissionRequiredMixin, ViewTab, register_model_view
+from virtualization.filtersets import VirtualMachineFilterSet
from virtualization.models import VirtualMachine
+from virtualization.tables import VirtualMachineTable
from . import filtersets, forms, tables
from .choices import DeviceFaceChoices
from .models import *
@@ -2085,6 +2087,24 @@ class DeviceRenderConfigView(generic.ObjectView):
}
+@register_model_view(Device, 'virtual-machines')
+class DeviceVirtualMachinesView(generic.ObjectChildrenView):
+ queryset = Device.objects.all()
+ child_model = VirtualMachine
+ table = VirtualMachineTable
+ filterset = VirtualMachineFilterSet
+ tab = ViewTab(
+ label=_('Virtual Machines'),
+ badge=lambda obj: VirtualMachine.objects.filter(cluster=obj.cluster, device=obj).count(),
+ weight=2200,
+ hide_if_empty=True,
+ permission='virtualization.view_virtualmachine'
+ )
+
+ def get_children(self, request, parent):
+ return self.child_model.objects.restrict(request.user, 'view').filter(cluster=parent.cluster, device=parent)
+
+
class DeviceBulkImportView(generic.BulkImportView):
queryset = Device.objects.all()
model_form = forms.DeviceImportForm
@@ -2965,7 +2985,6 @@ class InventoryItemChildrenView(generic.ObjectChildrenView):
child_model = InventoryItem
table = tables.InventoryItemTable
filterset = filtersets.InventoryItemFilterSet
- template_name = 'generic/object_children.html'
tab = ViewTab(
label=_('Children'),
badge=lambda obj: obj.child_items.count(),
@@ -3160,12 +3179,6 @@ class CableListView(generic.ObjectListView):
filterset = filtersets.CableFilterSet
filterset_form = forms.CableFilterForm
table = tables.CableTable
- actions = {
- 'import': {'add'},
- 'export': {'view'},
- 'bulk_edit': {'change'},
- 'bulk_delete': {'delete'},
- }
@register_model_view(Cable)
@@ -3177,34 +3190,29 @@ class CableView(generic.ObjectView):
class CableEditView(generic.ObjectEditView):
queryset = Cable.objects.all()
template_name = 'dcim/cable_edit.html'
+ htmx_template_name = 'dcim/htmx/cable_edit.html'
- def dispatch(self, request, *args, **kwargs):
-
- # If creating a new Cable, initialize the form class using URL query params
- if 'pk' not in kwargs:
- self.form = forms.get_cable_form(
- a_type=CABLE_TERMINATION_TYPES.get(request.GET.get('a_terminations_type')),
- b_type=CABLE_TERMINATION_TYPES.get(request.GET.get('b_terminations_type'))
- )
-
- return super().dispatch(request, *args, **kwargs)
-
- def get_object(self, **kwargs):
+ def alter_object(self, obj, request, url_args, url_kwargs):
"""
- Hack into get_object() to set the form class when editing an existing Cable, since ObjectEditView
+ Hack into alter_object() to set the form class when editing an existing Cable, since ObjectEditView
doesn't currently provide a hook for dynamic class resolution.
"""
- obj = super().get_object(**kwargs)
+ a_terminations_type = CABLE_TERMINATION_TYPES.get(
+ request.GET.get('a_terminations_type') or request.POST.get('a_terminations_type')
+ )
+ b_terminations_type = CABLE_TERMINATION_TYPES.get(
+ request.GET.get('b_terminations_type') or request.POST.get('b_terminations_type')
+ )
if obj.pk:
- # TODO: Optimize this logic
- termination_a = obj.terminations.filter(cable_end='A').first()
- a_type = termination_a.termination._meta.model if termination_a else None
- termination_b = obj.terminations.filter(cable_end='B').first()
- b_type = termination_b.termination._meta.model if termination_b else None
- self.form = forms.get_cable_form(a_type, b_type)
+ if not a_terminations_type and (termination_a := obj.terminations.filter(cable_end='A').first()):
+ a_terminations_type = termination_a.termination._meta.model
+ if not b_terminations_type and (termination_b := obj.terminations.filter(cable_end='B').first()):
+ b_terminations_type = termination_b.termination._meta.model
- return obj
+ self.form = forms.get_cable_form(a_terminations_type, b_terminations_type)
+
+ return super().alter_object(obj, request, url_args, url_kwargs)
def get_extra_addanother_params(self, request):
diff --git a/netbox/extras/api/serializers_/change_logging.py b/netbox/extras/api/serializers_/change_logging.py
index 32585637c..46fb901ff 100644
--- a/netbox/extras/api/serializers_/change_logging.py
+++ b/netbox/extras/api/serializers_/change_logging.py
@@ -30,6 +30,16 @@ class ObjectChangeSerializer(BaseModelSerializer):
changed_object = serializers.SerializerMethodField(
read_only=True
)
+ prechange_data = serializers.JSONField(
+ source='prechange_data_clean',
+ read_only=True,
+ allow_null=True
+ )
+ postchange_data = serializers.JSONField(
+ source='postchange_data_clean',
+ read_only=True,
+ allow_null=True
+ )
class Meta:
model = ObjectChange
diff --git a/netbox/extras/api/serializers_/customfields.py b/netbox/extras/api/serializers_/customfields.py
index 79bb39557..082047e94 100644
--- a/netbox/extras/api/serializers_/customfields.py
+++ b/netbox/extras/api/serializers_/customfields.py
@@ -65,7 +65,7 @@ class CustomFieldSerializer(ValidatedModelSerializer):
'id', 'url', 'display', 'object_types', 'type', 'related_object_type', 'data_type', 'name', 'label',
'group_name', 'description', 'required', 'search_weight', 'filter_logic', 'ui_visible', 'ui_editable',
'is_cloneable', 'default', 'weight', 'validation_minimum', 'validation_maximum', 'validation_regex',
- 'choice_set', 'created', 'last_updated',
+ 'choice_set', 'comments', 'created', 'last_updated',
]
brief_fields = ('id', 'url', 'display', 'name', 'description')
diff --git a/netbox/extras/api/serializers_/events.py b/netbox/extras/api/serializers_/events.py
index 4285b12e6..469da3e8c 100644
--- a/netbox/extras/api/serializers_/events.py
+++ b/netbox/extras/api/serializers_/events.py
@@ -47,8 +47,7 @@ class EventRuleSerializer(NetBoxModelSerializer):
# We need to manually instantiate the serializer for scripts
if instance.action_type == EventRuleActionChoices.SCRIPT:
script = instance.action_object
- instance = script.python_class() if script.python_class else None
- return ScriptSerializer(instance, nested=True, context=context).data
+ return ScriptSerializer(script, nested=True, context=context).data
else:
serializer = get_serializer_for_model(instance.action_object_type.model_class())
return serializer(instance.action_object, nested=True, context=context).data
diff --git a/netbox/extras/api/serializers_/journaling.py b/netbox/extras/api/serializers_/journaling.py
index 46ab0477b..1a44e7e2e 100644
--- a/netbox/extras/api/serializers_/journaling.py
+++ b/netbox/extras/api/serializers_/journaling.py
@@ -43,7 +43,7 @@ class JournalEntrySerializer(NetBoxModelSerializer):
def validate(self, data):
# Validate that the parent object exists
- if 'assigned_object_type' in data and 'assigned_object_id' in data:
+ if not self.nested and 'assigned_object_type' in data and 'assigned_object_id' in data:
try:
data['assigned_object_type'].get_object_for_this_type(id=data['assigned_object_id'])
except ObjectDoesNotExist:
@@ -51,10 +51,7 @@ class JournalEntrySerializer(NetBoxModelSerializer):
f"Invalid assigned_object: {data['assigned_object_type']} ID {data['assigned_object_id']}"
)
- # Enforce model validation
- super().validate(data)
-
- return data
+ return super().validate(data)
@extend_schema_field(serializers.JSONField(allow_null=True))
def get_assigned_object(self, instance):
diff --git a/netbox/extras/api/views.py b/netbox/extras/api/views.py
index 0a5303741..05087b2d5 100644
--- a/netbox/extras/api/views.py
+++ b/netbox/extras/api/views.py
@@ -1,3 +1,4 @@
+from django.http import Http404
from django.shortcuts import get_object_or_404
from django_rq.queues import get_connection
from rest_framework import status
@@ -215,21 +216,32 @@ class ScriptViewSet(ModelViewSet):
_ignore_model_permissions = True
lookup_value_regex = '[^/]+' # Allow dots
+ def _get_script(self, pk):
+ # If pk is numeric, retrieve script by ID
+ if pk.isnumeric():
+ return get_object_or_404(self.queryset, pk=pk)
+
+ # Default to retrieval by module & name
+ try:
+ module_name, script_name = pk.split('.', maxsplit=1)
+ except ValueError:
+ raise Http404
+ return get_object_or_404(self.queryset, module__file_path=f'{module_name}.py', name=script_name)
+
def retrieve(self, request, pk):
- script = get_object_or_404(self.queryset, pk=pk)
+ script = self._get_script(pk)
serializer = serializers.ScriptDetailSerializer(script, context={'request': request})
return Response(serializer.data)
def post(self, request, pk):
"""
- Run a Script identified by the id and return the pending Job as the result
+ Run a Script identified by its numeric PK or module & name and return the pending Job as the result
"""
-
if not request.user.has_perm('extras.run_script'):
raise PermissionDenied("This user does not have permission to run scripts.")
- script = get_object_or_404(self.queryset, pk=pk)
+ script = self._get_script(pk)
input_serializer = serializers.ScriptInputSerializer(
data=request.data,
context={'script': script}
@@ -240,9 +252,9 @@ class ScriptViewSet(ModelViewSet):
raise RQWorkerNotRunningException()
if input_serializer.is_valid():
- script.result = Job.enqueue(
+ Job.enqueue(
run_script,
- instance=script.module,
+ instance=script,
name=script.python_class.class_name,
user=request.user,
data=input_serializer.data['data'],
diff --git a/netbox/extras/context_managers.py b/netbox/extras/context_managers.py
index 8de47465e..e72cb8cc2 100644
--- a/netbox/extras/context_managers.py
+++ b/netbox/extras/context_managers.py
@@ -13,13 +13,14 @@ def event_tracking(request):
:param request: WSGIRequest object with a unique `id` set
"""
current_request.set(request)
- events_queue.set([])
+ events_queue.set({})
yield
# Flush queued webhooks to RQ
- flush_events(events_queue.get())
+ if events := list(events_queue.get().values()):
+ flush_events(events)
# Clear context vars
current_request.set(None)
- events_queue.set([])
+ events_queue.set({})
diff --git a/netbox/extras/dashboard/widgets.py b/netbox/extras/dashboard/widgets.py
index 23f082ce2..cc11664e6 100644
--- a/netbox/extras/dashboard/widgets.py
+++ b/netbox/extras/dashboard/widgets.py
@@ -1,3 +1,4 @@
+import logging
import uuid
from functools import cached_property
from hashlib import sha256
@@ -32,6 +33,8 @@ __all__ = (
'WidgetConfigForm',
)
+logger = logging.getLogger('netbox.data_backends')
+
def get_object_type_choices():
return [
@@ -54,8 +57,15 @@ def get_models_from_content_types(content_types):
models = []
for content_type_id in content_types:
app_label, model_name = content_type_id.split('.')
- content_type = ObjectType.objects.get_by_natural_key(app_label, model_name)
- models.append(content_type.model_class())
+ try:
+ content_type = ObjectType.objects.get_by_natural_key(app_label, model_name)
+ if content_type.model_class():
+ models.append(content_type.model_class())
+ else:
+ logger.debug(f"Dashboard Widget model_class not found: {app_label}:{model_name}")
+ except ObjectType.DoesNotExist:
+ logger.debug(f"Dashboard Widget ObjectType not found: {app_label}:{model_name}")
+
return models
@@ -255,6 +265,7 @@ class ObjectListWidget(DashboardWidget):
parameters = self.config.get('url_params') or {}
if page_size := self.config.get('page_size'):
parameters['per_page'] = page_size
+ parameters['embedded'] = True
if parameters:
try:
@@ -318,7 +329,7 @@ class RSSFeedWidget(DashboardWidget):
try:
response = requests.get(
url=self.config['feed_url'],
- headers={'User-Agent': f'NetBox/{settings.VERSION}'},
+ headers={'User-Agent': f'NetBox/{settings.RELEASE.version}'},
proxies=settings.HTTP_PROXIES,
timeout=3
)
diff --git a/netbox/extras/events.py b/netbox/extras/events.py
index a33ac213c..22ce26ba9 100644
--- a/netbox/extras/events.py
+++ b/netbox/extras/events.py
@@ -58,15 +58,21 @@ def enqueue_object(queue, instance, user, request_id, action):
if model_name not in registry['model_features']['event_rules'].get(app_label, []):
return
- queue.append({
- 'content_type': ContentType.objects.get_for_model(instance),
- 'object_id': instance.pk,
- 'event': action,
- 'data': serialize_for_event(instance),
- 'snapshots': get_snapshots(instance, action),
- 'username': user.username,
- 'request_id': request_id
- })
+ assert instance.pk is not None
+ key = f'{app_label}.{model_name}:{instance.pk}'
+ if key in queue:
+ queue[key]['data'] = serialize_for_event(instance)
+ queue[key]['snapshots']['postchange'] = get_snapshots(instance, action)['postchange']
+ else:
+ queue[key] = {
+ 'content_type': ContentType.objects.get_for_model(instance),
+ 'object_id': instance.pk,
+ 'event': action,
+ 'data': serialize_for_event(instance),
+ 'snapshots': get_snapshots(instance, action),
+ 'username': user.username,
+ 'request_id': request_id
+ }
def process_event_rules(event_rules, model_name, event, data, username=None, snapshots=None, request_id=None):
@@ -118,7 +124,7 @@ def process_event_rules(event_rules, model_name, event, data, username=None, sna
# Enqueue a Job to record the script's execution
Job.enqueue(
"extras.scripts.run_script",
- instance=script.module,
+ instance=event_rule.action_object,
name=script.name,
user=user,
data=data
@@ -163,14 +169,14 @@ def process_event_queue(events):
)
-def flush_events(queue):
+def flush_events(events):
"""
- Flush a list of object representation to RQ for webhook processing.
+ Flush a list of object representations to RQ for event processing.
"""
- if queue:
+ if events:
for name in settings.EVENTS_PIPELINE:
try:
func = import_string(name)
- func(queue)
+ func(events)
except Exception as e:
logger.error(_("Cannot import events pipeline {name} error: {error}").format(name=name, error=e))
diff --git a/netbox/extras/filtersets.py b/netbox/extras/filtersets.py
index 4674335c9..c3ac3e6ab 100644
--- a/netbox/extras/filtersets.py
+++ b/netbox/extras/filtersets.py
@@ -165,7 +165,8 @@ class CustomFieldFilterSet(ChangeLoggedModelFilterSet):
Q(name__icontains=value) |
Q(label__icontains=value) |
Q(group_name__icontains=value) |
- Q(description__icontains=value)
+ Q(description__icontains=value) |
+ Q(comments__icontains=value)
)
@@ -588,10 +589,6 @@ class ConfigContextFilterSet(ChangeLoggedModelFilterSet):
label=_('Data file (ID)'),
)
- # TODO: Remove in v4.1
- role = device_role
- role_id = device_role_id
-
class Meta:
model = ConfigContext
fields = ('id', 'name', 'is_active', 'description', 'weight', 'auto_sync_enabled', 'data_synced')
diff --git a/netbox/extras/forms/bulk_edit.py b/netbox/extras/forms/bulk_edit.py
index 9479fef99..7e9f452e8 100644
--- a/netbox/extras/forms/bulk_edit.py
+++ b/netbox/extras/forms/bulk_edit.py
@@ -5,7 +5,7 @@ from extras.choices import *
from extras.models import *
from netbox.forms import NetBoxModelBulkEditForm
from utilities.forms import BulkEditForm, add_blank_choice
-from utilities.forms.fields import ColorField, DynamicModelChoiceField
+from utilities.forms.fields import ColorField, CommentField, DynamicModelChoiceField
from utilities.forms.widgets import BulkEditNullBooleanSelect
__all__ = (
@@ -64,6 +64,7 @@ class CustomFieldBulkEditForm(BulkEditForm):
required=False,
widget=BulkEditNullBooleanSelect()
)
+ comments = CommentField()
nullable_fields = ('group_name', 'description', 'choice_set')
@@ -316,8 +317,4 @@ class JournalEntryBulkEditForm(BulkEditForm):
choices=add_blank_choice(JournalEntryKindChoices),
required=False
)
- comments = forms.CharField(
- label=_('Comments'),
- required=False,
- widget=forms.Textarea()
- )
+ comments = CommentField()
diff --git a/netbox/extras/forms/bulk_import.py b/netbox/extras/forms/bulk_import.py
index 55f71dbd2..c09eed3da 100644
--- a/netbox/extras/forms/bulk_import.py
+++ b/netbox/extras/forms/bulk_import.py
@@ -71,7 +71,7 @@ class CustomFieldImportForm(CSVModelForm):
fields = (
'name', 'label', 'group_name', 'type', 'object_types', 'related_object_type', 'required', 'description',
'search_weight', 'filter_logic', 'default', 'choice_set', 'weight', 'validation_minimum',
- 'validation_maximum', 'validation_regex', 'ui_visible', 'ui_editable', 'is_cloneable',
+ 'validation_maximum', 'validation_regex', 'ui_visible', 'ui_editable', 'is_cloneable', 'comments',
)
@@ -116,6 +116,12 @@ class CustomLinkImportForm(CSVModelForm):
queryset=ObjectType.objects.with_feature('custom_links'),
help_text=_("One or more assigned object types")
)
+ button_class = CSVChoiceField(
+ label=_('button class'),
+ required=False,
+ choices=CustomLinkButtonClassChoices,
+ help_text=_('The class of the first link in a group will be used for the dropdown button')
+ )
class Meta:
model = CustomLink
diff --git a/netbox/extras/forms/filtersets.py b/netbox/extras/forms/filtersets.py
index d4235c465..e6b001f2c 100644
--- a/netbox/extras/forms/filtersets.py
+++ b/netbox/extras/forms/filtersets.py
@@ -464,13 +464,10 @@ class JournalEntryFilterForm(NetBoxModelFilterSetForm):
required=False,
label=_('User')
)
- assigned_object_type_id = DynamicModelMultipleChoiceField(
- queryset=ObjectType.objects.all(),
+ assigned_object_type_id = ContentTypeMultipleChoiceField(
+ queryset=ObjectType.objects.with_feature('journaling'),
required=False,
label=_('Object Type'),
- widget=APISelectMultiple(
- api_url='/api/extras/content-types/',
- )
)
kind = forms.ChoiceField(
label=_('Kind'),
@@ -507,11 +504,8 @@ class ObjectChangeFilterForm(SavedFiltersMixin, FilterForm):
required=False,
label=_('User')
)
- changed_object_type_id = DynamicModelMultipleChoiceField(
- queryset=ObjectType.objects.all(),
+ changed_object_type_id = ContentTypeMultipleChoiceField(
+ queryset=ObjectType.objects.with_feature('change_logging'),
required=False,
label=_('Object Type'),
- widget=APISelectMultiple(
- api_url='/api/extras/content-types/',
- )
)
diff --git a/netbox/extras/forms/model_forms.py b/netbox/extras/forms/model_forms.py
index 680bec1e4..ebd6e6c08 100644
--- a/netbox/extras/forms/model_forms.py
+++ b/netbox/extras/forms/model_forms.py
@@ -53,6 +53,7 @@ class CustomFieldForm(forms.ModelForm):
queryset=CustomFieldChoiceSet.objects.all(),
required=False
)
+ comments = CommentField()
fieldsets = (
FieldSet(
@@ -121,7 +122,7 @@ class CustomFieldChoiceSetForm(forms.ModelForm):
label = label.replace('\\:', ':')
except ValueError:
value, label = line, line
- data.append((value, label))
+ data.append((value.strip(), label.strip()))
return data
@@ -272,15 +273,13 @@ class EventRuleForm(NetBoxModelForm):
required=False,
help_text=_('Enter parameters to pass to the action in JSON format.')
)
+ comments = CommentField()
fieldsets = (
FieldSet('name', 'description', 'object_types', 'enabled', 'tags', name=_('Event Rule')),
FieldSet('type_create', 'type_update', 'type_delete', 'type_job_start', 'type_job_end', name=_('Events')),
FieldSet('conditions', name=_('Conditions')),
- FieldSet(
- 'action_type', 'action_choice', 'action_object_type', 'action_object_id', 'action_data',
- name=_('Action')
- ),
+ FieldSet('action_type', 'action_choice', 'action_data', name=_('Action')),
)
class Meta:
diff --git a/netbox/extras/management/commands/runscript.py b/netbox/extras/management/commands/runscript.py
index 160e8813f..ef1bd5141 100644
--- a/netbox/extras/management/commands/runscript.py
+++ b/netbox/extras/management/commands/runscript.py
@@ -85,6 +85,7 @@ class Command(BaseCommand):
module_name, script_name = script.split('.', 1)
module, script = get_module_and_script(module_name, script_name)
+ script = script.python_class
# Take user from command line if provided and exists, other
if options['user']:
diff --git a/netbox/extras/migrations/0087_squashed_0098.py b/netbox/extras/migrations/0087_squashed_0098.py
index 57ad3af00..55f276ecd 100644
--- a/netbox/extras/migrations/0087_squashed_0098.py
+++ b/netbox/extras/migrations/0087_squashed_0098.py
@@ -68,6 +68,7 @@ class Migration(migrations.Migration):
],
options={
'proxy': True,
+ 'ordering': ('file_root', 'file_path'),
'indexes': [],
'constraints': [],
},
@@ -79,6 +80,7 @@ class Migration(migrations.Migration):
],
options={
'proxy': True,
+ 'ordering': ('file_root', 'file_path'),
'indexes': [],
'constraints': [],
},
diff --git a/netbox/extras/migrations/0109_script_model.py b/netbox/extras/migrations/0109_script_model.py
index 7570077a7..6bfd2c14c 100644
--- a/netbox/extras/migrations/0109_script_model.py
+++ b/netbox/extras/migrations/0109_script_model.py
@@ -60,7 +60,10 @@ def get_module_scripts(scriptmodule):
return cls.full_name.split(".", maxsplit=1)[1]
loader = SourceFileLoader(get_python_name(scriptmodule), get_full_path(scriptmodule))
- module = loader.load_module()
+ try:
+ module = loader.load_module()
+ except FileNotFoundError:
+ return {}
scripts = {}
ordered = getattr(module, 'script_order', [])
diff --git a/netbox/extras/migrations/0114_customfield_add_comments.py b/netbox/extras/migrations/0114_customfield_add_comments.py
new file mode 100644
index 000000000..cd85db1ba
--- /dev/null
+++ b/netbox/extras/migrations/0114_customfield_add_comments.py
@@ -0,0 +1,18 @@
+# Generated by Django 5.0.3 on 2024-04-19 18:37
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('extras', '0113_customfield_rename_object_type'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='customfield',
+ name='comments',
+ field=models.TextField(blank=True),
+ ),
+ ]
diff --git a/netbox/extras/migrations/0115_convert_dashboard_widgets.py b/netbox/extras/migrations/0115_convert_dashboard_widgets.py
new file mode 100644
index 000000000..c85c83ecf
--- /dev/null
+++ b/netbox/extras/migrations/0115_convert_dashboard_widgets.py
@@ -0,0 +1,29 @@
+# Generated by Django 5.0.4 on 2024-04-24 20:09
+
+from django.db import migrations
+
+
+def update_dashboard_widgets(apps, schema_editor):
+ Dashboard = apps.get_model('extras', 'Dashboard')
+
+ for dashboard in Dashboard.objects.all():
+ for key, widget in dashboard.config.items():
+ if models := widget['config'].get('models'):
+ models = list(map(lambda x: x.replace('users.netboxgroup', 'users.group'), models))
+ models = list(map(lambda x: x.replace('users.netboxuser', 'users.user'), models))
+ dashboard.config[key]['config']['models'] = models
+ dashboard.save()
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('extras', '0114_customfield_add_comments'),
+ ]
+
+ operations = [
+ migrations.RunPython(
+ code=update_dashboard_widgets,
+ reverse_code=migrations.RunPython.noop
+ ),
+ ]
diff --git a/netbox/extras/models/change_logging.py b/netbox/extras/models/change_logging.py
index ebcebc09a..8451a0d15 100644
--- a/netbox/extras/models/change_logging.py
+++ b/netbox/extras/models/change_logging.py
@@ -1,12 +1,17 @@
+from functools import cached_property
+
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
+from mptt.models import MPTTModel
from core.models import ObjectType
from extras.choices import *
+from netbox.models.features import ChangeLoggingMixin
+from utilities.data import shallow_compare_dict
from ..querysets import ObjectChangeQuerySet
__all__ = (
@@ -136,6 +141,71 @@ class ObjectChange(models.Model):
def get_action_color(self):
return ObjectChangeActionChoices.colors.get(self.action)
- @property
+ @cached_property
def has_changes(self):
return self.prechange_data != self.postchange_data
+
+ @cached_property
+ def diff_exclude_fields(self):
+ """
+ Return a set of attributes which should be ignored when calculating a diff
+ between the pre- and post-change data. (For instance, it would not make
+ sense to compare the "last updated" times as these are expected to differ.)
+ """
+ model = self.changed_object_type.model_class()
+ attrs = set()
+
+ # Exclude auto-populated change tracking fields
+ if issubclass(model, ChangeLoggingMixin):
+ attrs.update({'created', 'last_updated'})
+
+ # Exclude MPTT-internal fields
+ if issubclass(model, MPTTModel):
+ attrs.update({'level', 'lft', 'rght', 'tree_id'})
+
+ return attrs
+
+ def get_clean_data(self, prefix):
+ """
+ Return only the pre-/post-change attributes which are relevant for calculating a diff.
+ """
+ ret = {}
+ change_data = getattr(self, f'{prefix}_data') or {}
+ for k, v in change_data.items():
+ if k not in self.diff_exclude_fields and not k.startswith('_'):
+ ret[k] = v
+ return ret
+
+ @cached_property
+ def prechange_data_clean(self):
+ return self.get_clean_data('prechange')
+
+ @cached_property
+ def postchange_data_clean(self):
+ return self.get_clean_data('postchange')
+
+ def diff(self):
+ """
+ Return a dictionary of pre- and post-change values for attributes which have changed.
+ """
+ prechange_data = self.prechange_data_clean
+ postchange_data = self.postchange_data_clean
+
+ # Determine which attributes have changed
+ if self.action == ObjectChangeActionChoices.ACTION_CREATE:
+ changed_attrs = sorted(postchange_data.keys())
+ elif self.action == ObjectChangeActionChoices.ACTION_DELETE:
+ changed_attrs = sorted(prechange_data.keys())
+ else:
+ # TODO: Support deep (recursive) comparison
+ changed_data = shallow_compare_dict(prechange_data, postchange_data)
+ changed_attrs = sorted(changed_data.keys())
+
+ return {
+ 'pre': {
+ k: prechange_data.get(k) for k in changed_attrs
+ },
+ 'post': {
+ k: postchange_data.get(k) for k in changed_attrs
+ },
+ }
diff --git a/netbox/extras/models/customfields.py b/netbox/extras/models/customfields.py
index a14c71c63..974affb2e 100644
--- a/netbox/extras/models/customfields.py
+++ b/netbox/extras/models/customfields.py
@@ -1,4 +1,5 @@
import decimal
+import json
import re
from datetime import datetime, date
@@ -205,6 +206,10 @@ class CustomField(CloningMixin, ExportTemplatesMixin, ChangeLoggedModel):
verbose_name=_('is cloneable'),
help_text=_('Replicate this value when cloning objects')
)
+ comments = models.TextField(
+ verbose_name=_('comments'),
+ blank=True
+ )
objects = CustomFieldManager()
@@ -484,7 +489,7 @@ class CustomField(CloningMixin, ExportTemplatesMixin, ChangeLoggedModel):
# JSON
elif self.type == CustomFieldTypeChoices.TYPE_JSON:
- field = JSONField(required=required, initial=initial)
+ field = JSONField(required=required, initial=json.dumps(initial) if initial else '')
# Object
elif self.type == CustomFieldTypeChoices.TYPE_OBJECT:
diff --git a/netbox/extras/models/models.py b/netbox/extras/models/models.py
index 16f10b485..49249eaa0 100644
--- a/netbox/extras/models/models.py
+++ b/netbox/extras/models/models.py
@@ -732,7 +732,7 @@ class JournalEntry(CustomFieldsMixin, CustomLinksMixin, TagsMixin, ExportTemplat
def __str__(self):
created = timezone.localtime(self.created)
- return f"{date_format(created, format='SHORT_DATETIME_FORMAT')} ({self.get_kind_display()})"
+ return f"{created.date().isoformat()} {created.time().isoformat(timespec='minutes')} ({self.get_kind_display()})"
def get_absolute_url(self):
return reverse('extras:journalentry', args=[self.pk])
diff --git a/netbox/extras/models/scripts.py b/netbox/extras/models/scripts.py
index 551a8b4f0..98d79c53c 100644
--- a/netbox/extras/models/scripts.py
+++ b/netbox/extras/models/scripts.py
@@ -96,9 +96,18 @@ class ScriptModule(PythonModuleMixin, JobsMixin, ManagedFile):
Proxy model for script module files.
"""
objects = ScriptModuleManager()
+ error = None
+
+ event_rules = GenericRelation(
+ to='extras.EventRule',
+ content_type_field='action_object_type',
+ object_id_field='action_object_id',
+ for_concrete_model=False
+ )
class Meta:
proxy = True
+ ordering = ('file_root', 'file_path')
verbose_name = _('script module')
verbose_name_plural = _('script modules')
@@ -118,6 +127,7 @@ class ScriptModule(PythonModuleMixin, JobsMixin, ManagedFile):
try:
module = self.get_module()
except Exception as e:
+ self.error = e
logger.debug(f"Failed to load script: {self.python_name} error: {e}")
module = None
@@ -165,8 +175,8 @@ class ScriptModule(PythonModuleMixin, JobsMixin, ManagedFile):
def save(self, *args, **kwargs):
self.file_root = ManagedFileRootPathChoices.SCRIPTS
+ super().save(*args, **kwargs)
self.sync_classes()
- return super().save(*args, **kwargs)
@receiver(post_save, sender=ScriptModule)
diff --git a/netbox/extras/models/staging.py b/netbox/extras/models/staging.py
index 6e381ce70..7ffbde089 100644
--- a/netbox/extras/models/staging.py
+++ b/netbox/extras/models/staging.py
@@ -4,6 +4,7 @@ from django.contrib.auth import get_user_model
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db import models, transaction
from django.utils.translation import gettext_lazy as _
+from mptt.models import MPTTModel
from extras.choices import ChangeActionChoices
from netbox.models import ChangeLoggedModel
@@ -124,6 +125,11 @@ class StagedChange(CustomValidationMixin, EventRulesMixin, models.Model):
instance = self.model.objects.get(pk=self.object_id)
logger.info(f'Deleting {self.model._meta.verbose_name} {instance}')
instance.delete()
+
+ # Rebuild the MPTT tree where applicable
+ if issubclass(self.model, MPTTModel):
+ self.model.objects.rebuild()
+
apply.alters_data = True
def get_action_color(self):
diff --git a/netbox/extras/scripts.py b/netbox/extras/scripts.py
index 71faa47e2..0e74c3f0d 100644
--- a/netbox/extras/scripts.py
+++ b/netbox/extras/scripts.py
@@ -24,6 +24,7 @@ from ipam.validators import MaxPrefixLengthValidator, MinPrefixLengthValidator,
from utilities.exceptions import AbortScript, AbortTransaction
from utilities.forms import add_blank_choice
from utilities.forms.fields import DynamicModelChoiceField, DynamicModelMultipleChoiceField
+from utilities.forms.widgets import DatePicker, DateTimePicker
from .context_managers import event_tracking
from .forms import ScriptForm
from .utils import is_report
@@ -33,6 +34,8 @@ __all__ = (
'BaseScript',
'BooleanVar',
'ChoiceVar',
+ 'DateVar',
+ 'DateTimeVar',
'FileVar',
'IntegerVar',
'IPAddressVar',
@@ -174,6 +177,28 @@ class ChoiceVar(ScriptVariable):
self.field_attrs['choices'] = add_blank_choice(choices)
+class DateVar(ScriptVariable):
+ """
+ A date.
+ """
+ form_field = forms.DateField
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.form_field.widget = DatePicker()
+
+
+class DateTimeVar(ScriptVariable):
+ """
+ A date and a time.
+ """
+ form_field = forms.DateTimeField
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.form_field.widget = DateTimePicker()
+
+
class MultiChoiceVar(ScriptVariable):
"""
Like ChoiceVar, but allows for the selection of multiple choices.
diff --git a/netbox/extras/search.py b/netbox/extras/search.py
index fff59fa77..9203b9144 100644
--- a/netbox/extras/search.py
+++ b/netbox/extras/search.py
@@ -2,6 +2,18 @@ from netbox.search import SearchIndex, register_search
from . import models
+@register_search
+class CustomFieldIndex(SearchIndex):
+ model = models.CustomField
+ fields = (
+ ('name', 100),
+ ('label', 100),
+ ('description', 500),
+ ('comments', 5000),
+ )
+ display_attrs = ('description',)
+
+
@register_search
class JournalEntryIndex(SearchIndex):
model = models.JournalEntry
diff --git a/netbox/extras/signals.py b/netbox/extras/signals.py
index 2813ed7ae..9d439ace9 100644
--- a/netbox/extras/signals.py
+++ b/netbox/extras/signals.py
@@ -55,18 +55,6 @@ def run_validators(instance, validators):
clear_events = Signal()
-def is_same_object(instance, webhook_data, request_id):
- """
- Compare the given instance to the most recent queued webhook object, returning True
- if they match. This check is used to avoid creating duplicate webhook entries.
- """
- return (
- ContentType.objects.get_for_model(instance) == webhook_data['content_type'] and
- instance.pk == webhook_data['object_id'] and
- request_id == webhook_data['request_id']
- )
-
-
@receiver((post_save, m2m_changed))
def handle_changed_object(sender, instance, **kwargs):
"""
@@ -112,14 +100,13 @@ def handle_changed_object(sender, instance, **kwargs):
objectchange.request_id = request.id
objectchange.save()
- # If this is an M2M change, update the previously queued webhook (from post_save)
+ # Ensure that we're working with fresh M2M assignments
+ if m2m_changed:
+ instance.refresh_from_db()
+
+ # Enqueue the object for event processing
queue = events_queue.get()
- if m2m_changed and queue and is_same_object(instance, queue[-1], request.id):
- instance.refresh_from_db() # Ensure that we're working with fresh M2M assignments
- queue[-1]['data'] = serialize_for_event(instance)
- queue[-1]['snapshots']['postchange'] = get_snapshots(instance, action)['postchange']
- else:
- enqueue_object(queue, instance, request.user, request.id, action)
+ enqueue_object(queue, instance, request.user, request.id, action)
events_queue.set(queue)
# Increment metric counters
@@ -179,7 +166,7 @@ def handle_deleted_object(sender, instance, **kwargs):
obj.snapshot() # Ensure the change record includes the "before" state
getattr(obj, related_field_name).remove(instance)
- # Enqueue webhooks
+ # Enqueue the object for event processing
queue = events_queue.get()
enqueue_object(queue, instance, request.user, request.id, ObjectChangeActionChoices.ACTION_DELETE)
events_queue.set(queue)
@@ -195,7 +182,7 @@ def clear_events_queue(sender, **kwargs):
"""
logger = logging.getLogger('events')
logger.info(f"Clearing {len(events_queue.get())} queued events ({sender})")
- events_queue.set([])
+ events_queue.set({})
#
diff --git a/netbox/extras/tables/tables.py b/netbox/extras/tables/tables.py
index a0f504931..8c78ad0de 100644
--- a/netbox/extras/tables/tables.py
+++ b/netbox/extras/tables/tables.py
@@ -1,10 +1,10 @@
import json
import django_tables2 as tables
-from django.conf import settings
from django.utils.translation import gettext_lazy as _
from extras.models import *
+from netbox.constants import EMPTY_TABLE_TEXT
from netbox.tables import BaseTable, NetBoxTable, columns
from .template_code import *
@@ -78,7 +78,7 @@ class CustomFieldTable(NetBoxTable):
fields = (
'pk', 'id', 'name', 'object_types', 'label', 'type', 'related_object_type', 'group_name', 'required',
'default', 'description', 'search_weight', 'filter_logic', 'ui_visible', 'ui_editable', 'is_cloneable',
- 'weight', 'choice_set', 'choices', 'created', 'last_updated',
+ 'weight', 'choice_set', 'choices', 'comments', 'created', 'last_updated',
)
default_columns = ('pk', 'name', 'object_types', 'label', 'group_name', 'type', 'required', 'description')
@@ -419,23 +419,43 @@ class ConfigTemplateTable(NetBoxTable):
tags = columns.TagColumn(
url_name='extras:configtemplate_list'
)
+ role_count = columns.LinkedCountColumn(
+ viewname='dcim:devicerole_list',
+ url_params={'config_template_id': 'pk'},
+ verbose_name=_('Device Roles')
+ )
+ platform_count = columns.LinkedCountColumn(
+ viewname='dcim:platform_list',
+ url_params={'config_template_id': 'pk'},
+ verbose_name=_('Platforms')
+ )
+ device_count = columns.LinkedCountColumn(
+ viewname='dcim:device_list',
+ url_params={'config_template_id': 'pk'},
+ verbose_name=_('Devices')
+ )
+ vm_count = columns.LinkedCountColumn(
+ viewname='virtualization:virtualmachine_list',
+ url_params={'config_template_id': 'pk'},
+ verbose_name=_('Virtual Machines')
+ )
class Meta(NetBoxTable.Meta):
model = ConfigTemplate
fields = (
- 'pk', 'id', 'name', 'description', 'data_source', 'data_file', 'data_synced', 'created', 'last_updated',
- 'tags',
+ 'pk', 'id', 'name', 'description', 'data_source', 'data_file', 'data_synced', 'role_count',
+ 'platform_count', 'device_count', 'vm_count', 'created', 'last_updated', 'tags',
)
default_columns = (
- 'pk', 'name', 'description', 'is_synced',
+ 'pk', 'name', 'description', 'is_synced', 'device_count', 'vm_count',
)
class ObjectChangeTable(NetBoxTable):
- time = tables.DateTimeColumn(
+ time = columns.DateTimeColumn(
verbose_name=_('Time'),
- linkify=True,
- format=settings.SHORT_DATETIME_FORMAT
+ timespec='minutes',
+ linkify=True
)
user_name = tables.Column(
verbose_name=_('Username')
@@ -475,10 +495,10 @@ class ObjectChangeTable(NetBoxTable):
class JournalEntryTable(NetBoxTable):
- created = tables.DateTimeColumn(
+ created = columns.DateTimeColumn(
verbose_name=_('Created'),
- linkify=True,
- format=settings.SHORT_DATETIME_FORMAT
+ timespec='minutes',
+ linkify=True
)
assigned_object_type = columns.ContentTypeColumn(
verbose_name=_('Object Type')
@@ -525,12 +545,12 @@ class ScriptResultsTable(BaseTable):
template_code="""{% load log_levels %}{% log_level record.status %}""",
verbose_name=_('Level')
)
- message = tables.Column(
+ message = columns.MarkdownColumn(
verbose_name=_('Message')
)
class Meta(BaseTable.Meta):
- empty_text = _('No results found')
+ empty_text = _(EMPTY_TABLE_TEXT)
fields = (
'index', 'time', 'status', 'message',
)
@@ -546,27 +566,22 @@ class ReportResultsTable(BaseTable):
time = tables.Column(
verbose_name=_('Time')
)
- status = tables.Column(
- empty_values=(),
- verbose_name=_('Level')
- )
status = tables.TemplateColumn(
template_code="""{% load log_levels %}{% log_level record.status %}""",
verbose_name=_('Level')
)
-
object = tables.Column(
verbose_name=_('Object')
)
url = tables.Column(
verbose_name=_('URL')
)
- message = tables.Column(
+ message = columns.MarkdownColumn(
verbose_name=_('Message')
)
class Meta(BaseTable.Meta):
- empty_text = _('No results found')
+ empty_text = _(EMPTY_TABLE_TEXT)
fields = (
'index', 'method', 'time', 'status', 'object', 'url', 'message',
)
diff --git a/netbox/extras/tests/test_changelog.py b/netbox/extras/tests/test_changelog.py
index d9d6f1f45..aac526e0f 100644
--- a/netbox/extras/tests/test_changelog.py
+++ b/netbox/extras/tests/test_changelog.py
@@ -75,6 +75,10 @@ class ChangeLogViewTest(ModelViewTestCase):
self.assertEqual(oc.postchange_data['custom_fields']['cf2'], form_data['cf_cf2'])
self.assertEqual(oc.postchange_data['tags'], ['Tag 1', 'Tag 2'])
+ # Check that private attributes were included in raw data but not display data
+ self.assertIn('_name', oc.postchange_data)
+ self.assertNotIn('_name', oc.postchange_data_clean)
+
def test_update_object(self):
site = Site(name='Site 1', slug='site-1')
site.save()
@@ -112,6 +116,12 @@ class ChangeLogViewTest(ModelViewTestCase):
self.assertEqual(oc.postchange_data['custom_fields']['cf2'], form_data['cf_cf2'])
self.assertEqual(oc.postchange_data['tags'], ['Tag 3'])
+ # Check that private attributes were included in raw data but not display data
+ self.assertIn('_name', oc.prechange_data)
+ self.assertNotIn('_name', oc.prechange_data_clean)
+ self.assertIn('_name', oc.postchange_data)
+ self.assertNotIn('_name', oc.postchange_data_clean)
+
def test_delete_object(self):
site = Site(
name='Site 1',
@@ -142,6 +152,10 @@ class ChangeLogViewTest(ModelViewTestCase):
self.assertEqual(oc.prechange_data['tags'], ['Tag 1', 'Tag 2'])
self.assertEqual(oc.postchange_data, None)
+ # Check that private attributes were included in raw data but not display data
+ self.assertIn('_name', oc.prechange_data)
+ self.assertNotIn('_name', oc.prechange_data_clean)
+
def test_bulk_update_objects(self):
sites = (
Site(name='Site 1', slug='site-1', status=SiteStatusChoices.STATUS_ACTIVE),
@@ -338,6 +352,10 @@ class ChangeLogAPITest(APITestCase):
self.assertEqual(oc.postchange_data['custom_fields'], data['custom_fields'])
self.assertEqual(oc.postchange_data['tags'], ['Tag 1', 'Tag 2'])
+ # Check that private attributes were included in raw data but not display data
+ self.assertIn('_name', oc.postchange_data)
+ self.assertNotIn('_name', oc.postchange_data_clean)
+
def test_update_object(self):
site = Site(name='Site 1', slug='site-1')
site.save()
@@ -370,6 +388,12 @@ class ChangeLogAPITest(APITestCase):
self.assertEqual(oc.postchange_data['custom_fields'], data['custom_fields'])
self.assertEqual(oc.postchange_data['tags'], ['Tag 3'])
+ # Check that private attributes were included in raw data but not display data
+ self.assertIn('_name', oc.prechange_data)
+ self.assertNotIn('_name', oc.prechange_data_clean)
+ self.assertIn('_name', oc.postchange_data)
+ self.assertNotIn('_name', oc.postchange_data_clean)
+
def test_delete_object(self):
site = Site(
name='Site 1',
@@ -398,6 +422,10 @@ class ChangeLogAPITest(APITestCase):
self.assertEqual(oc.prechange_data['tags'], ['Tag 1', 'Tag 2'])
self.assertEqual(oc.postchange_data, None)
+ # Check that private attributes were included in raw data but not display data
+ self.assertIn('_name', oc.prechange_data)
+ self.assertNotIn('_name', oc.prechange_data_clean)
+
def test_bulk_create_objects(self):
data = (
{
diff --git a/netbox/extras/tests/test_event_rules.py b/netbox/extras/tests/test_event_rules.py
index 8cea2078a..a1dd8b48e 100644
--- a/netbox/extras/tests/test_event_rules.py
+++ b/netbox/extras/tests/test_event_rules.py
@@ -4,6 +4,7 @@ from unittest.mock import patch
import django_rq
from django.http import HttpResponse
+from django.test import RequestFactory
from django.urls import reverse
from requests import Session
from rest_framework import status
@@ -12,6 +13,7 @@ from core.models import ObjectType
from dcim.choices import SiteStatusChoices
from dcim.models import Site
from extras.choices import EventRuleActionChoices, ObjectChangeActionChoices
+from extras.context_managers import event_tracking
from extras.events import enqueue_object, flush_events, serialize_for_event
from extras.models import EventRule, Tag, Webhook
from extras.webhooks import generate_signature, send_webhook
@@ -360,7 +362,7 @@ class EventRuleTest(APITestCase):
return HttpResponse()
# Enqueue a webhook for processing
- webhooks_queue = []
+ webhooks_queue = {}
site = Site.objects.create(name='Site 1', slug='site-1')
enqueue_object(
webhooks_queue,
@@ -369,7 +371,7 @@ class EventRuleTest(APITestCase):
request_id=request_id,
action=ObjectChangeActionChoices.ACTION_CREATE
)
- flush_events(webhooks_queue)
+ flush_events(list(webhooks_queue.values()))
# Retrieve the job from queue
job = self.queue.jobs[0]
@@ -377,3 +379,24 @@ class EventRuleTest(APITestCase):
# Patch the Session object with our dummy_send() method, then process the webhook for sending
with patch.object(Session, 'send', dummy_send) as mock_send:
send_webhook(**job.kwargs)
+
+ def test_duplicate_triggers(self):
+ """
+ Test for erroneous duplicate event triggers resulting from saving an object multiple times
+ within the span of a single request.
+ """
+ url = reverse('dcim:site_add')
+ request = RequestFactory().get(url)
+ request.id = uuid.uuid4()
+ request.user = self.user
+
+ self.assertEqual(self.queue.count, 0, msg="Unexpected jobs found in queue")
+
+ with event_tracking(request):
+ site = Site(name='Site 1', slug='site-1')
+ site.save()
+
+ # Save the site a second time
+ site.save()
+
+ self.assertEqual(self.queue.count, 1, msg="Duplicate jobs found in queue")
diff --git a/netbox/extras/tests/test_scripts.py b/netbox/extras/tests/test_scripts.py
index 64971f1dc..bed8f0fc5 100644
--- a/netbox/extras/tests/test_scripts.py
+++ b/netbox/extras/tests/test_scripts.py
@@ -1,4 +1,5 @@
import tempfile
+from datetime import date, datetime, timezone
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
@@ -322,3 +323,47 @@ class ScriptVariablesTest(TestCase):
form = TestScript().as_form(data, None)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['var1'], IPNetwork(data['var1']))
+
+ def test_datevar(self):
+
+ class TestScript(Script):
+
+ var1 = DateVar()
+ var2 = DateVar(required=False)
+
+ # Test date validation
+ data = {'var1': 'not a date'}
+ form = TestScript().as_form(data, None)
+ self.assertFalse(form.is_valid())
+ self.assertIn('var1', form.errors)
+
+ # Validate valid data
+ input_date = date(2024, 4, 1)
+ data = {'var1': input_date}
+ form = TestScript().as_form(data, None)
+ self.assertTrue(form.is_valid())
+ self.assertEqual(form.cleaned_data['var1'], input_date)
+ # Validate required=False works for this Var type
+ self.assertEqual(form.cleaned_data['var2'], None)
+
+ def test_datetimevar(self):
+
+ class TestScript(Script):
+
+ var1 = DateTimeVar()
+ var2 = DateTimeVar(required=False)
+
+ # Test datetime validation
+ data = {'var1': 'not a datetime'}
+ form = TestScript().as_form(data, None)
+ self.assertFalse(form.is_valid())
+ self.assertIn('var1', form.errors)
+
+ # Validate valid data
+ input_datetime = datetime(2024, 4, 1, 8, 0, 0, 0, timezone.utc)
+ data = {'var1': input_datetime}
+ form = TestScript().as_form(data, None)
+ self.assertTrue(form.is_valid())
+ self.assertEqual(form.cleaned_data['var1'], input_datetime)
+ # Validate required=False works for this Var type
+ self.assertEqual(form.cleaned_data['var2'], None)
diff --git a/netbox/extras/views.py b/netbox/extras/views.py
index be3937512..82f519c00 100644
--- a/netbox/extras/views.py
+++ b/netbox/extras/views.py
@@ -13,6 +13,7 @@ from core.choices import ManagedFileRootPathChoices
from core.forms import ManagedFileForm
from core.models import Job
from core.tables import JobTable
+from dcim.models import Device, DeviceRole, Platform
from extras.dashboard.forms import DashboardWidgetAddForm, DashboardWidgetForm
from extras.dashboard.utils import get_widget_class
from netbox.constants import DEFAULT_ACTION_PERMISSIONS
@@ -28,6 +29,7 @@ from utilities.request import copy_safe_request
from utilities.rqworker import get_workers_for_queue
from utilities.templatetags.builtins.filters import render_markdown
from utilities.views import ContentTypePermissionRequiredMixin, get_viewname, register_model_view
+from virtualization.models import VirtualMachine
from . import filtersets, forms, tables
from .models import *
from .scripts import run_script
@@ -627,7 +629,12 @@ class ObjectConfigContextView(generic.ObjectView):
#
class ConfigTemplateListView(generic.ObjectListView):
- queryset = ConfigTemplate.objects.all()
+ queryset = ConfigTemplate.objects.annotate(
+ device_count=count_related(Device, 'config_template'),
+ vm_count=count_related(VirtualMachine, 'config_template'),
+ role_count=count_related(DeviceRole, 'config_template'),
+ platform_count=count_related(Platform, 'config_template'),
+ )
filterset = filtersets.ConfigTemplateFilterSet
filterset_form = forms.ConfigTemplateFilterForm
table = tables.ConfigTemplateTable
@@ -716,15 +723,15 @@ class ObjectChangeView(generic.ObjectView):
if not instance.prechange_data and instance.action in ['update', 'delete'] and prev_change:
non_atomic_change = True
- prechange_data = prev_change.postchange_data
+ prechange_data = prev_change.postchange_data_clean
else:
non_atomic_change = False
- prechange_data = instance.prechange_data
+ prechange_data = instance.prechange_data_clean
if prechange_data and instance.postchange_data:
diff_added = shallow_compare_dict(
prechange_data or dict(),
- instance.postchange_data or dict(),
+ instance.postchange_data_clean or dict(),
exclude=['last_updated'],
)
diff_removed = {
@@ -1035,7 +1042,9 @@ class ScriptListView(ContentTypePermissionRequiredMixin, View):
return 'extras.view_script'
def get(self, request):
- script_modules = ScriptModule.objects.restrict(request.user).prefetch_related('jobs')
+ script_modules = ScriptModule.objects.restrict(request.user).prefetch_related(
+ 'data_source', 'data_file', 'jobs'
+ )
return render(request, 'extras/script_list.html', {
'model': ScriptModule,
@@ -1043,12 +1052,27 @@ class ScriptListView(ContentTypePermissionRequiredMixin, View):
})
-class ScriptView(generic.ObjectView):
+class BaseScriptView(generic.ObjectView):
queryset = Script.objects.all()
+ def _get_script_class(self, script):
+ """
+ Return an instance of the Script's Python class
+ """
+ if script_class := script.python_class:
+ return script_class()
+
+
+class ScriptView(BaseScriptView):
+
def get(self, request, **kwargs):
script = self.get_object(**kwargs)
- script_class = script.python_class()
+ script_class = self._get_script_class(script)
+ if not script_class:
+ return render(request, 'extras/script.html', {
+ 'script': script,
+ })
+
form = script_class.as_form(initial=normalize_querydict(request.GET))
return render(request, 'extras/script.html', {
@@ -1060,11 +1084,16 @@ class ScriptView(generic.ObjectView):
def post(self, request, **kwargs):
script = self.get_object(**kwargs)
- script_class = script.python_class()
if not request.user.has_perm('extras.run_script', obj=script):
return HttpResponseForbidden()
+ script_class = self._get_script_class(script)
+ if not script_class:
+ return render(request, 'extras/script.html', {
+ 'script': script,
+ })
+
form = script_class.as_form(request.POST, request.FILES)
# Allow execution only if RQ worker process is running
@@ -1094,21 +1123,22 @@ class ScriptView(generic.ObjectView):
})
-class ScriptSourceView(generic.ObjectView):
+class ScriptSourceView(BaseScriptView):
queryset = Script.objects.all()
def get(self, request, **kwargs):
script = self.get_object(**kwargs)
+ script_class = self._get_script_class(script)
return render(request, 'extras/script/source.html', {
'script': script,
- 'script_class': script.python_class(),
+ 'script_class': script_class,
'job_count': script.jobs.count(),
'tab': 'source',
})
-class ScriptJobsView(generic.ObjectView):
+class ScriptJobsView(BaseScriptView):
queryset = Script.objects.all()
def get(self, request, **kwargs):
diff --git a/netbox/ipam/api/serializers_/ip.py b/netbox/ipam/api/serializers_/ip.py
index e5fa81314..7c53d68ca 100644
--- a/netbox/ipam/api/serializers_/ip.py
+++ b/netbox/ipam/api/serializers_/ip.py
@@ -100,7 +100,7 @@ class AvailablePrefixSerializer(serializers.Serializer):
"""
family = serializers.IntegerField(read_only=True)
prefix = serializers.CharField(read_only=True)
- vrf = VRFSerializer(nested=True, read_only=True)
+ vrf = VRFSerializer(nested=True, read_only=True, allow_null=True)
def to_representation(self, instance):
if self.context.get('vrf'):
@@ -183,7 +183,7 @@ class AvailableIPSerializer(serializers.Serializer):
"""
family = serializers.IntegerField(read_only=True)
address = serializers.CharField(read_only=True)
- vrf = VRFSerializer(nested=True, read_only=True)
+ vrf = VRFSerializer(nested=True, read_only=True, allow_null=True)
description = serializers.CharField(required=False)
def to_representation(self, instance):
diff --git a/netbox/ipam/api/serializers_/vlans.py b/netbox/ipam/api/serializers_/vlans.py
index a400f949b..f35d294cf 100644
--- a/netbox/ipam/api/serializers_/vlans.py
+++ b/netbox/ipam/api/serializers_/vlans.py
@@ -82,7 +82,7 @@ class AvailableVLANSerializer(serializers.Serializer):
Representation of a VLAN which does not exist in the database.
"""
vid = serializers.IntegerField(read_only=True)
- group = VLANGroupSerializer(nested=True, read_only=True)
+ group = VLANGroupSerializer(nested=True, read_only=True, allow_null=True)
def to_representation(self, instance):
return {
diff --git a/netbox/ipam/filtersets.py b/netbox/ipam/filtersets.py
index d58f5bfc9..5cdfac34e 100644
--- a/netbox/ipam/filtersets.py
+++ b/netbox/ipam/filtersets.py
@@ -912,10 +912,6 @@ class VLANGroupFilterSet(OrganizationalModelFilterSet):
method='filter_scope'
)
- # TODO: Remove in v4.1
- sitegroup = site_group
- clustergroup = cluster_group
-
class Meta:
model = VLANGroup
fields = ('id', 'name', 'slug', 'min_vid', 'max_vid', 'description', 'scope_id')
@@ -1106,10 +1102,6 @@ class ServiceFilterSet(NetBoxModelFilterSet):
lookup_expr='contains'
)
- # TODO: Remove in v4.1
- ipaddress = ip_address
- ipaddress_id = ip_address_id
-
class Meta:
model = Service
fields = ('id', 'name', 'protocol', 'description')
diff --git a/netbox/ipam/forms/filtersets.py b/netbox/ipam/forms/filtersets.py
index 6610bcaf3..80fb04226 100644
--- a/netbox/ipam/forms/filtersets.py
+++ b/netbox/ipam/forms/filtersets.py
@@ -10,7 +10,7 @@ from tenancy.forms import TenancyFilterForm
from utilities.forms import BOOLEAN_WITH_BLANK_CHOICES, add_blank_choice
from utilities.forms.fields import DynamicModelChoiceField, DynamicModelMultipleChoiceField, TagFilterField
from utilities.forms.rendering import FieldSet
-from virtualization.models import VirtualMachine
+from virtualization.models import VirtualMachine, ClusterGroup, Cluster
from vpn.models import L2VPN
__all__ = (
@@ -168,6 +168,7 @@ class PrefixFilterForm(TenancyFilterForm, NetBoxModelFilterSetForm):
'within_include', 'family', 'status', 'role_id', 'mask_length', 'is_pool', 'mark_utilized',
name=_('Addressing')
),
+ FieldSet('vlan_id', name=_('VLAN Assignment')),
FieldSet('vrf_id', 'present_in_vrf_id', name=_('VRF')),
FieldSet('region_id', 'site_group_id', 'site_id', name=_('Location')),
FieldSet('tenant_group_id', 'tenant_id', name=_('Tenant')),
@@ -249,6 +250,12 @@ class PrefixFilterForm(TenancyFilterForm, NetBoxModelFilterSetForm):
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
+ vlan_id = DynamicModelMultipleChoiceField(
+ queryset=VLAN.objects.all(),
+ required=False,
+ label=_('VLAN'),
+ )
+
tag = TagFilterField(model)
@@ -405,6 +412,7 @@ class VLANGroupFilterForm(NetBoxModelFilterSetForm):
fieldsets = (
FieldSet('q', 'filter_id', 'tag'),
FieldSet('region', 'sitegroup', 'site', 'location', 'rack', name=_('Location')),
+ FieldSet('cluster_group', 'cluster', name=_('Cluster')),
FieldSet('min_vid', 'max_vid', name=_('VLAN ID')),
)
model = VLANGroup
@@ -445,6 +453,17 @@ class VLANGroupFilterForm(NetBoxModelFilterSetForm):
max_value=VLAN_VID_MAX,
label=_('Maximum VID')
)
+ cluster = DynamicModelMultipleChoiceField(
+ queryset=Cluster.objects.all(),
+ required=False,
+ label=_('Cluster')
+ )
+ cluster_group = DynamicModelMultipleChoiceField(
+ queryset=ClusterGroup.objects.all(),
+ required=False,
+ label=_('Cluster group')
+ )
+
tag = TagFilterField(model)
diff --git a/netbox/ipam/forms/model_forms.py b/netbox/ipam/forms/model_forms.py
index 0db9576f1..4e405a035 100644
--- a/netbox/ipam/forms/model_forms.py
+++ b/netbox/ipam/forms/model_forms.py
@@ -355,6 +355,15 @@ class IPAddressForm(TenancyForm, NetBoxModelForm):
):
self.initial['primary_for_parent'] = True
+ if type(instance.assigned_object) is Interface:
+ self.fields['interface'].widget.add_query_params({
+ 'device_id': instance.assigned_object.device.pk,
+ })
+ elif type(instance.assigned_object) is VMInterface:
+ self.fields['vminterface'].widget.add_query_params({
+ 'virtual_machine_id': instance.assigned_object.virtual_machine.pk,
+ })
+
# Disable object assignment fields if the IP address is designated as primary
if self.initial.get('primary_for_parent'):
self.fields['interface'].disabled = True
@@ -533,6 +542,24 @@ class FHRPGroupAssignmentForm(forms.ModelForm):
for ipaddress in ipaddresses:
self.fields['group'].widget.add_query_param('related_ip', ipaddress.pk)
+ def clean_group(self):
+ group = self.cleaned_data['group']
+
+ conflicting_assignments = FHRPGroupAssignment.objects.filter(
+ interface_type=self.instance.interface_type,
+ interface_id=self.instance.interface_id,
+ group=group
+ )
+ if self.instance.id:
+ conflicting_assignments = conflicting_assignments.exclude(id=self.instance.id)
+
+ if conflicting_assignments.exists():
+ raise forms.ValidationError(
+ _('Assignment already exists')
+ )
+
+ return group
+
class VLANGroupForm(NetBoxModelForm):
scope_type = ContentTypeChoiceField(
diff --git a/netbox/ipam/graphql/types.py b/netbox/ipam/graphql/types.py
index 6c269721e..36e09eaac 100644
--- a/netbox/ipam/graphql/types.py
+++ b/netbox/ipam/graphql/types.py
@@ -133,7 +133,7 @@ class IPAddressType(NetBoxObjectType, BaseIPAddressFamilyType):
Annotated["InterfaceType", strawberry.lazy('dcim.graphql.types')],
Annotated["FHRPGroupType", strawberry.lazy('ipam.graphql.types')],
Annotated["VMInterfaceType", strawberry.lazy('virtualization.graphql.types')],
- ], strawberry.union("IPAddressAssignmentType")]:
+ ], strawberry.union("IPAddressAssignmentType")] | None:
return self.assigned_object
@@ -261,7 +261,7 @@ class VLANGroupType(OrganizationalObjectType):
Annotated["RegionType", strawberry.lazy('dcim.graphql.types')],
Annotated["SiteType", strawberry.lazy('dcim.graphql.types')],
Annotated["SiteGroupType", strawberry.lazy('dcim.graphql.types')],
- ], strawberry.union("VLANGroupScopeType")]:
+ ], strawberry.union("VLANGroupScopeType")] | None:
return self.scope
diff --git a/netbox/ipam/models/ip.py b/netbox/ipam/models/ip.py
index 422c5ba37..0b8e3a8df 100644
--- a/netbox/ipam/models/ip.py
+++ b/netbox/ipam/models/ip.py
@@ -18,6 +18,7 @@ from ipam.querysets import PrefixQuerySet
from ipam.validators import DNSValidator
from netbox.config import get_config
from netbox.models import OrganizationalModel, PrimaryModel
+from netbox.models.features import ContactsMixin
__all__ = (
'Aggregate',
@@ -74,7 +75,7 @@ class RIR(OrganizationalModel):
return reverse('ipam:rir', args=[self.pk])
-class Aggregate(GetAvailablePrefixesMixin, PrimaryModel):
+class Aggregate(ContactsMixin, GetAvailablePrefixesMixin, PrimaryModel):
"""
An aggregate exists at the root level of the IP address space hierarchy in NetBox. Aggregates are used to organize
the hierarchy and track the overall utilization of available address space. Each Aggregate is assigned to a RIR.
@@ -206,7 +207,7 @@ class Role(OrganizationalModel):
return reverse('ipam:role', args=[self.pk])
-class Prefix(GetAvailablePrefixesMixin, PrimaryModel):
+class Prefix(ContactsMixin, GetAvailablePrefixesMixin, PrimaryModel):
"""
A Prefix represents an IPv4 or IPv6 network, including mask length. Prefixes can optionally be assigned to Sites and
VRFs. A Prefix must be assigned a status and may optionally be assigned a used-define Role. A Prefix can also be
@@ -486,7 +487,7 @@ class Prefix(GetAvailablePrefixesMixin, PrimaryModel):
return min(utilization, 100)
-class IPRange(PrimaryModel):
+class IPRange(ContactsMixin, PrimaryModel):
"""
A range of IP addresses, defined by start and end addresses.
"""
@@ -574,7 +575,7 @@ class IPRange(PrimaryModel):
if not self.end_address > self.start_address:
raise ValidationError({
'end_address': _(
- "Ending address must be lower than the starting address ({start_address})"
+ "Ending address must be greater than the starting address ({start_address})"
).format(start_address=self.start_address)
})
@@ -692,10 +693,10 @@ class IPRange(PrimaryModel):
ip.address.ip for ip in self.get_child_ips()
]).size
- return int(float(child_count) / self.size * 100)
+ return min(float(child_count) / self.size * 100, 100)
-class IPAddress(PrimaryModel):
+class IPAddress(ContactsMixin, PrimaryModel):
"""
An IPAddress represents an individual IPv4 or IPv6 address and its mask. The mask length should match what is
configured in the real world. (Typically, only loopback interfaces are configured with /32 or /128 masks.) Like
diff --git a/netbox/ipam/models/services.py b/netbox/ipam/models/services.py
index 37b559801..71f34c66c 100644
--- a/netbox/ipam/models/services.py
+++ b/netbox/ipam/models/services.py
@@ -8,6 +8,7 @@ from django.utils.translation import gettext_lazy as _
from ipam.choices import *
from ipam.constants import *
from netbox.models import PrimaryModel
+from netbox.models.features import ContactsMixin
from utilities.data import array_to_string
__all__ = (
@@ -62,7 +63,7 @@ class ServiceTemplate(ServiceBase, PrimaryModel):
return reverse('ipam:servicetemplate', args=[self.pk])
-class Service(ServiceBase, PrimaryModel):
+class Service(ContactsMixin, ServiceBase, PrimaryModel):
"""
A Service represents a layer-four service (e.g. HTTP or SSH) running on a Device or VirtualMachine. A Service may
optionally be tied to one or more specific IPAddresses belonging to its parent.
diff --git a/netbox/ipam/tables/ip.py b/netbox/ipam/tables/ip.py
index 9e940ae9e..10dea3a92 100644
--- a/netbox/ipam/tables/ip.py
+++ b/netbox/ipam/tables/ip.py
@@ -378,7 +378,7 @@ class IPAddressTable(TenancyColumnsMixin, NetBoxTable):
orderable=False,
verbose_name=_('NAT (Inside)')
)
- nat_outside = tables.ManyToManyColumn(
+ nat_outside = columns.ManyToManyColumn(
linkify_item=True,
orderable=False,
verbose_name=_('NAT (Outside)')
diff --git a/netbox/ipam/tests/test_api.py b/netbox/ipam/tests/test_api.py
index 16ee7bbf0..2cf7a2f1c 100644
--- a/netbox/ipam/tests/test_api.py
+++ b/netbox/ipam/tests/test_api.py
@@ -648,6 +648,9 @@ class IPAddressTest(APIViewTestCases.APIViewTestCase):
bulk_update_data = {
'description': 'New description',
}
+ graphql_filter = {
+ 'address': {'lookup': 'i_exact', 'value': '192.168.0.1/24'},
+ }
@classmethod
def setUpTestData(cls):
diff --git a/netbox/ipam/tests/test_filtersets.py b/netbox/ipam/tests/test_filtersets.py
index 3a46423a5..8f07a241a 100644
--- a/netbox/ipam/tests/test_filtersets.py
+++ b/netbox/ipam/tests/test_filtersets.py
@@ -1525,8 +1525,8 @@ class VLANGroupTestCase(TestCase, ChangeLoggedFilterSetTests):
params = {'region': Region.objects.first().pk}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
- def test_sitegroup(self):
- params = {'sitegroup': SiteGroup.objects.first().pk}
+ def test_site_group(self):
+ params = {'site_group': SiteGroup.objects.first().pk}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_site(self):
@@ -1541,8 +1541,8 @@ class VLANGroupTestCase(TestCase, ChangeLoggedFilterSetTests):
params = {'rack': Rack.objects.first().pk}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
- def test_clustergroup(self):
- params = {'clustergroup': ClusterGroup.objects.first().pk}
+ def test_cluster_group(self):
+ params = {'cluster_group': ClusterGroup.objects.first().pk}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_cluster(self):
diff --git a/netbox/ipam/views.py b/netbox/ipam/views.py
index 24d82d186..f94c3c6d7 100644
--- a/netbox/ipam/views.py
+++ b/netbox/ipam/views.py
@@ -9,6 +9,7 @@ from circuits.models import Provider
from dcim.filtersets import InterfaceFilterSet
from dcim.models import Interface, Site
from netbox.views import generic
+from tenancy.views import ObjectContactsView
from utilities.query import count_related
from utilities.tables import get_table_ordering
from utilities.views import ViewTab, register_model_view
@@ -214,7 +215,6 @@ class ASNRangeASNsView(generic.ObjectChildrenView):
child_model = ASN
table = tables.ASNTable
filterset = filtersets.ASNFilterSet
- template_name = 'generic/object_children.html'
tab = ViewTab(
label=_('ASNs'),
badge=lambda x: x.get_child_asns().count(),
@@ -406,6 +406,11 @@ class AggregateBulkDeleteView(generic.BulkDeleteView):
table = tables.AggregateTable
+@register_model_view(Aggregate, 'contacts')
+class AggregateContactsView(ObjectContactsView):
+ queryset = Aggregate.objects.all()
+
+
#
# Prefix/VLAN roles
#
@@ -644,6 +649,11 @@ class PrefixBulkDeleteView(generic.BulkDeleteView):
table = tables.PrefixTable
+@register_model_view(Prefix, 'contacts')
+class PrefixContactsView(ObjectContactsView):
+ queryset = Prefix.objects.all()
+
+
#
# IP Ranges
#
@@ -727,6 +737,11 @@ class IPRangeBulkDeleteView(generic.BulkDeleteView):
table = tables.IPRangeTable
+@register_model_view(IPRange, 'contacts')
+class IPRangeContactsView(ObjectContactsView):
+ queryset = IPRange.objects.all()
+
+
#
# IP addresses
#
@@ -781,6 +796,7 @@ class IPAddressView(generic.ObjectView):
class IPAddressEditView(generic.ObjectEditView):
queryset = IPAddress.objects.all()
form = forms.IPAddressForm
+ template_name = 'ipam/ipaddress_edit.html'
def alter_object(self, obj, request, url_args, url_kwargs):
@@ -882,7 +898,6 @@ class IPAddressRelatedIPsView(generic.ObjectChildrenView):
child_model = IPAddress
table = tables.IPAddressTable
filterset = filtersets.IPAddressFilterSet
- template_name = 'generic/object_children.html'
tab = ViewTab(
label=_('Related IPs'),
badge=lambda x: x.get_related_ips().count(),
@@ -894,6 +909,11 @@ class IPAddressRelatedIPsView(generic.ObjectChildrenView):
return parent.get_related_ips().restrict(request.user, 'view')
+@register_model_view(IPAddress, 'contacts')
+class IPAddressContactsView(ObjectContactsView):
+ queryset = IPAddress.objects.all()
+
+
#
# VLAN groups
#
@@ -954,7 +974,6 @@ class VLANGroupVLANsView(generic.ObjectChildrenView):
child_model = VLAN
table = tables.VLANTable
filterset = filtersets.VLANFilterSet
- template_name = 'generic/object_children.html'
tab = ViewTab(
label=_('VLANs'),
badge=lambda x: x.get_child_vlans().count(),
@@ -1110,7 +1129,6 @@ class VLANInterfacesView(generic.ObjectChildrenView):
child_model = Interface
table = tables.VLANDevicesTable
filterset = InterfaceFilterSet
- template_name = 'generic/object_children.html'
tab = ViewTab(
label=_('Device Interfaces'),
badge=lambda x: x.get_interfaces().count(),
@@ -1128,7 +1146,6 @@ class VLANVMInterfacesView(generic.ObjectChildrenView):
child_model = VMInterface
table = tables.VLANVirtualMachinesTable
filterset = VMInterfaceFilterSet
- template_name = 'generic/object_children.html'
tab = ViewTab(
label=_('VM Interfaces'),
badge=lambda x: x.get_vminterfaces().count(),
@@ -1263,3 +1280,8 @@ class ServiceBulkDeleteView(generic.BulkDeleteView):
queryset = Service.objects.prefetch_related('device', 'virtual_machine')
filterset = filtersets.ServiceFilterSet
table = tables.ServiceTable
+
+
+@register_model_view(Service, 'contacts')
+class ServiceContactsView(ObjectContactsView):
+ queryset = Service.objects.all()
diff --git a/netbox/netbox/api/views.py b/netbox/netbox/api/views.py
index cfbe82f14..d58d1affe 100644
--- a/netbox/netbox/api/views.py
+++ b/netbox/netbox/api/views.py
@@ -66,7 +66,7 @@ class StatusView(APIView):
return Response({
'django-version': DJANGO_VERSION,
'installed-apps': installed_apps,
- 'netbox-version': settings.VERSION,
+ 'netbox-version': settings.RELEASE.full_version,
'plugins': get_installed_plugins(),
'python-version': platform.python_version(),
'rq-workers-running': Worker.count(get_connection('default')),
diff --git a/netbox/netbox/authentication.py b/netbox/netbox/authentication/__init__.py
similarity index 98%
rename from netbox/netbox/authentication.py
rename to netbox/netbox/authentication/__init__.py
index 2b66639c8..55fd91d4d 100644
--- a/netbox/netbox/authentication.py
+++ b/netbox/netbox/authentication/__init__.py
@@ -14,6 +14,7 @@ from users.models import Group, ObjectPermission
from utilities.permissions import (
permission_is_exempt, qs_filter_from_constraints, resolve_permission, resolve_permission_type,
)
+from .misc import _mirror_groups
UserModel = get_user_model()
@@ -313,7 +314,7 @@ class RemoteUserBackend(_RemoteUserBackend):
# Create a new instance of django-auth-ldap's LDAPBackend with our own ObjectPermissions
try:
- from django_auth_ldap.backend import LDAPBackend as LDAPBackend_
+ from django_auth_ldap.backend import _LDAPUser, LDAPBackend as LDAPBackend_
class NBLDAPBackend(ObjectPermissionMixin, LDAPBackend_):
def get_permission_filter(self, user_obj):
@@ -323,6 +324,10 @@ try:
hasattr(user_obj.ldap_user, "group_names")):
permission_filter = permission_filter | Q(groups__name__in=user_obj.ldap_user.group_names)
return permission_filter
+
+ # Patch with our modified _mirror_groups() method to support our custom Group model
+ _LDAPUser._mirror_groups = _mirror_groups
+
except ModuleNotFoundError:
pass
diff --git a/netbox/netbox/authentication/misc.py b/netbox/netbox/authentication/misc.py
new file mode 100644
index 000000000..fe89b8e39
--- /dev/null
+++ b/netbox/netbox/authentication/misc.py
@@ -0,0 +1,67 @@
+# Copyright (c) 2009, Peter Sagerson
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# - Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# - Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from users.models import Group
+
+
+# Copied from django_auth_ldap.backend._LDAPUser and modified to support our
+# custom Group model.
+def _mirror_groups(self):
+ """
+ Mirrors the user's LDAP groups in the Django database and updates the
+ user's membership.
+ """
+ target_group_names = frozenset(self._get_groups().get_group_names())
+ current_group_names = frozenset(
+ self._user.groups.values_list("name", flat=True).iterator()
+ )
+
+ # These were normalized to sets above.
+ MIRROR_GROUPS_EXCEPT = self.settings.MIRROR_GROUPS_EXCEPT
+ MIRROR_GROUPS = self.settings.MIRROR_GROUPS
+
+ # If the settings are white- or black-listing groups, we'll update
+ # target_group_names such that we won't modify the membership of groups
+ # beyond our purview.
+ if isinstance(MIRROR_GROUPS_EXCEPT, (set, frozenset)):
+ target_group_names = (target_group_names - MIRROR_GROUPS_EXCEPT) | (
+ current_group_names & MIRROR_GROUPS_EXCEPT
+ )
+ elif isinstance(MIRROR_GROUPS, (set, frozenset)):
+ target_group_names = (target_group_names & MIRROR_GROUPS) | (
+ current_group_names - MIRROR_GROUPS
+ )
+
+ if target_group_names != current_group_names:
+ existing_groups = list(
+ Group.objects.filter(name__in=target_group_names).iterator()
+ )
+ existing_group_names = frozenset(group.name for group in existing_groups)
+
+ new_groups = [
+ Group.objects.get_or_create(name=name)[0]
+ for name in target_group_names
+ if name not in existing_group_names
+ ]
+
+ self._user.groups.set(existing_groups + new_groups)
diff --git a/netbox/netbox/configuration_example.py b/netbox/netbox/configuration_example.py
index f415ca42f..84ead5339 100644
--- a/netbox/netbox/configuration_example.py
+++ b/netbox/netbox/configuration_example.py
@@ -131,9 +131,6 @@ EMAIL = {
'FROM_EMAIL': '',
}
-# Localization
-ENABLE_LOCALIZATION = False
-
# Exempt certain models from the enforcement of view permissions. Models listed here will be viewable by all users and
# by anonymous users. List models in the form `
`). This is needed only for full CommonMark compatibility. In real
- * world you will need HTML output.
- * - __breaks__ - `false`. Set `true` to convert `\n` in paragraphs into `
`.
- * - __langPrefix__ - `language-`. CSS language class prefix for fenced blocks.
- * Can be useful for external highlighters.
- * - __linkify__ - `false`. Set `true` to autoconvert URL-like text to links.
- * - __typographer__ - `false`. Set `true` to enable [some language-neutral
- * replacement](https://github.com/markdown-it/markdown-it/blob/master/lib/rules_core/replacements.js) +
- * quotes beautification (smartquotes).
- * - __quotes__ - `“”‘’`, String or Array. Double + single quotes replacement
- * pairs, when typographer enabled and smartquotes on. For example, you can
- * use `'«»„“'` for Russian, `'„“‚‘'` for German, and
- * `['«\xA0', '\xA0»', '‹\xA0', '\xA0›']` for French (including nbsp).
- * - __highlight__ - `null`. Highlighter function for fenced code blocks.
- * Highlighter `function (str, lang)` should return escaped HTML. It can also
- * return empty string if the source was not changed and should be escaped
- * externaly. If result starts with
';
- * } catch (__) {}
- * }
- *
- * return '' +
- * hljs.highlight(str, { language: lang, ignoreIllegals: true }).value +
- * '
';
- * }
- * });
- * ```
- *
- **/
-function MarkdownIt(presetName, options) {
- if (!(this instanceof MarkdownIt)) {
- return new MarkdownIt(presetName, options);
- }
- if (!options) {
- if (!utils.isString(presetName)) {
- options = presetName || {};
- presetName = 'default';
- }
- }
-
- /**
- * MarkdownIt#inline -> ParserInline
- *
- * Instance of [[ParserInline]]. You may need it to add new rules when
- * writing plugins. For simple rules control use [[MarkdownIt.disable]] and
- * [[MarkdownIt.enable]].
- **/
- this.inline = new ParserInline();
-
- /**
- * MarkdownIt#block -> ParserBlock
- *
- * Instance of [[ParserBlock]]. You may need it to add new rules when
- * writing plugins. For simple rules control use [[MarkdownIt.disable]] and
- * [[MarkdownIt.enable]].
- **/
- this.block = new ParserBlock();
-
- /**
- * MarkdownIt#core -> Core
- *
- * Instance of [[Core]] chain executor. You may need it to add new rules when
- * writing plugins. For simple rules control use [[MarkdownIt.disable]] and
- * [[MarkdownIt.enable]].
- **/
- this.core = new ParserCore();
-
- /**
- * MarkdownIt#renderer -> Renderer
- *
- * Instance of [[Renderer]]. Use it to modify output look. Or to add rendering
- * rules for new token types, generated by plugins.
- *
- * ##### Example
- *
- * ```javascript
- * var md = require('markdown-it')();
- *
- * function myToken(tokens, idx, options, env, self) {
- * //...
- * return result;
- * };
- *
- * md.renderer.rules['my_token'] = myToken
- * ```
- *
- * See [[Renderer]] docs and [source code](https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.js).
- **/
- this.renderer = new Renderer();
-
- /**
- * MarkdownIt#linkify -> LinkifyIt
- *
- * [linkify-it](https://github.com/markdown-it/linkify-it) instance.
- * Used by [linkify](https://github.com/markdown-it/markdown-it/blob/master/lib/rules_core/linkify.js)
- * rule.
- **/
- this.linkify = new LinkifyIt();
-
- /**
- * MarkdownIt#validateLink(url) -> Boolean
- *
- * Link validation function. CommonMark allows too much in links. By default
- * we disable `javascript:`, `vbscript:`, `file:` schemas, and almost all `data:...` schemas
- * except some embedded image types.
- *
- * You can change this behaviour:
- *
- * ```javascript
- * var md = require('markdown-it')();
- * // enable everything
- * md.validateLink = function () { return true; }
- * ```
- **/
- this.validateLink = validateLink;
-
- /**
- * MarkdownIt#normalizeLink(url) -> String
- *
- * Function used to encode link url to a machine-readable format,
- * which includes url-encoding, punycode, etc.
- **/
- this.normalizeLink = normalizeLink;
-
- /**
- * MarkdownIt#normalizeLinkText(url) -> String
- *
- * Function used to decode link url to a human-readable format`
- **/
- this.normalizeLinkText = normalizeLinkText;
-
- // Expose utils & helpers for easy acces from plugins
-
- /**
- * MarkdownIt#utils -> utils
- *
- * Assorted utility functions, useful to write plugins. See details
- * [here](https://github.com/markdown-it/markdown-it/blob/master/lib/common/utils.js).
- **/
- this.utils = utils;
-
- /**
- * MarkdownIt#helpers -> helpers
- *
- * Link components parser functions, useful to write plugins. See details
- * [here](https://github.com/markdown-it/markdown-it/blob/master/lib/helpers).
- **/
- this.helpers = utils.assign({}, helpers);
- this.options = {};
- this.configure(presetName);
- if (options) {
- this.set(options);
- }
-}
-
-/** chainable
- * MarkdownIt.set(options)
- *
- * Set parser options (in the same format as in constructor). Probably, you
- * will never need it, but you can change options after constructor call.
- *
- * ##### Example
- *
- * ```javascript
- * var md = require('markdown-it')()
- * .set({ html: true, breaks: true })
- * .set({ typographer, true });
- * ```
- *
- * __Note:__ To achieve the best possible performance, don't modify a
- * `markdown-it` instance options on the fly. If you need multiple configurations
- * it's best to create multiple instances and initialize each with separate
- * config.
- **/
-MarkdownIt.prototype.set = function (options) {
- utils.assign(this.options, options);
- return this;
-};
-
-/** chainable, internal
- * MarkdownIt.configure(presets)
- *
- * Batch load of all options and compenent settings. This is internal method,
- * and you probably will not need it. But if you will - see available presets
- * and data structure [here](https://github.com/markdown-it/markdown-it/tree/master/lib/presets)
- *
- * We strongly recommend to use presets instead of direct config loads. That
- * will give better compatibility with next versions.
- **/
-MarkdownIt.prototype.configure = function (presets) {
- var self = this,
- presetName;
- if (utils.isString(presets)) {
- presetName = presets;
- presets = config[presetName];
- if (!presets) {
- throw new Error('Wrong `markdown-it` preset "' + presetName + '", check name');
- }
- }
- if (!presets) {
- throw new Error('Wrong `markdown-it` preset, can\'t be empty');
- }
- if (presets.options) {
- self.set(presets.options);
- }
- if (presets.components) {
- Object.keys(presets.components).forEach(function (name) {
- if (presets.components[name].rules) {
- self[name].ruler.enableOnly(presets.components[name].rules);
- }
- if (presets.components[name].rules2) {
- self[name].ruler2.enableOnly(presets.components[name].rules2);
- }
- });
- }
- return this;
-};
-
-/** chainable
- * MarkdownIt.enable(list, ignoreInvalid)
- * - list (String|Array): rule name or list of rule names to enable
- * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
- *
- * Enable list or rules. It will automatically find appropriate components,
- * containing rules with given names. If rule not found, and `ignoreInvalid`
- * not set - throws exception.
- *
- * ##### Example
- *
- * ```javascript
- * var md = require('markdown-it')()
- * .enable(['sub', 'sup'])
- * .disable('smartquotes');
- * ```
- **/
-MarkdownIt.prototype.enable = function (list, ignoreInvalid) {
- var result = [];
- if (!Array.isArray(list)) {
- list = [list];
- }
- ['core', 'block', 'inline'].forEach(function (chain) {
- result = result.concat(this[chain].ruler.enable(list, true));
- }, this);
- result = result.concat(this.inline.ruler2.enable(list, true));
- var missed = list.filter(function (name) {
- return result.indexOf(name) < 0;
- });
- if (missed.length && !ignoreInvalid) {
- throw new Error('MarkdownIt. Failed to enable unknown rule(s): ' + missed);
- }
- return this;
-};
-
-/** chainable
- * MarkdownIt.disable(list, ignoreInvalid)
- * - list (String|Array): rule name or list of rule names to disable.
- * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
- *
- * The same as [[MarkdownIt.enable]], but turn specified rules off.
- **/
-MarkdownIt.prototype.disable = function (list, ignoreInvalid) {
- var result = [];
- if (!Array.isArray(list)) {
- list = [list];
- }
- ['core', 'block', 'inline'].forEach(function (chain) {
- result = result.concat(this[chain].ruler.disable(list, true));
- }, this);
- result = result.concat(this.inline.ruler2.disable(list, true));
- var missed = list.filter(function (name) {
- return result.indexOf(name) < 0;
- });
- if (missed.length && !ignoreInvalid) {
- throw new Error('MarkdownIt. Failed to disable unknown rule(s): ' + missed);
- }
- return this;
-};
-
-/** chainable
- * MarkdownIt.use(plugin, params)
- *
- * Load specified plugin with given params into current parser instance.
- * It's just a sugar to call `plugin(md, params)` with curring.
- *
- * ##### Example
- *
- * ```javascript
- * var iterator = require('markdown-it-for-inline');
- * var md = require('markdown-it')()
- * .use(iterator, 'foo_replace', 'text', function (tokens, idx) {
- * tokens[idx].content = tokens[idx].content.replace(/foo/g, 'bar');
- * });
- * ```
- **/
-MarkdownIt.prototype.use = function (plugin /*, params, ... */) {
- var args = [this].concat(Array.prototype.slice.call(arguments, 1));
- plugin.apply(plugin, args);
- return this;
-};
-
-/** internal
- * MarkdownIt.parse(src, env) -> Array
- * - src (String): source string
- * - env (Object): environment sandbox
- *
- * Parse input string and return list of block tokens (special token type
- * "inline" will contain list of inline tokens). You should not call this
- * method directly, until you write custom renderer (for example, to produce
- * AST).
- *
- * `env` is used to pass data between "distributed" rules and return additional
- * metadata like reference info, needed for the renderer. It also can be used to
- * inject data in specific cases. Usually, you will be ok to pass `{}`,
- * and then pass updated object to renderer.
- **/
-MarkdownIt.prototype.parse = function (src, env) {
- if (typeof src !== 'string') {
- throw new Error('Input data should be a String');
- }
- var state = new this.core.State(src, this, env);
- this.core.process(state);
- return state.tokens;
-};
-
-/**
- * MarkdownIt.render(src [, env]) -> String
- * - src (String): source string
- * - env (Object): environment sandbox
- *
- * Render markdown string into html. It does all magic for you :).
- *
- * `env` can be used to inject additional metadata (`{}` by default).
- * But you will not need it with high probability. See also comment
- * in [[MarkdownIt.parse]].
- **/
-MarkdownIt.prototype.render = function (src, env) {
- env = env || {};
- return this.renderer.render(this.parse(src, env), this.options, env);
-};
-
-/** internal
- * MarkdownIt.parseInline(src, env) -> Array
- * - src (String): source string
- * - env (Object): environment sandbox
- *
- * The same as [[MarkdownIt.parse]] but skip all block rules. It returns the
- * block tokens list with the single `inline` element, containing parsed inline
- * tokens in `children` property. Also updates `env` object.
- **/
-MarkdownIt.prototype.parseInline = function (src, env) {
- var state = new this.core.State(src, this, env);
- state.inlineMode = true;
- this.core.process(state);
- return state.tokens;
-};
-
-/**
- * MarkdownIt.renderInline(src [, env]) -> String
- * - src (String): source string
- * - env (Object): environment sandbox
- *
- * Similar to [[MarkdownIt.render]] but for single paragraph content. Result
- * will NOT be wrapped into `' + md.utils.escapeHtml(str) + '
)
- breaks: false,
- // Convert '\n' in paragraphs into
- langPrefix: 'language-',
- // CSS language prefix for fenced blocks
- linkify: false,
- // autoconvert URL-like texts to links
-
- // Enable some language-neutral replacements + quotes beautification
- typographer: false,
- // Double + single quotes replacement pairs, when typographer enabled,
- // and smartquotes on. Could be either a String or an Array.
- //
- // For example, you can use '«»„“' for Russian, '„“‚‘' for German,
- // and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
- quotes: '\u201c\u201d\u2018\u2019',
- /* “”‘’ */
-
- // Highlighter function. Should return escaped HTML,
- // or '' if the source string is not changed and should be escaped externaly.
- // If result starts with
- langPrefix: 'language-',
- // CSS language prefix for fenced blocks
- linkify: false,
- // autoconvert URL-like texts to links
-
- // Enable some language-neutral replacements + quotes beautification
- typographer: false,
- // Double + single quotes replacement pairs, when typographer enabled,
- // and smartquotes on. Could be either a String or an Array.
- //
- // For example, you can use '«»„“' for Russian, '„“‚‘' for German,
- // and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
- quotes: '\u201c\u201d\u2018\u2019',
- /* “”‘’ */
-
- // Highlighter function. Should return escaped HTML,
- // or '' if the source string is not changed and should be escaped externaly.
- // If result starts with
- langPrefix: 'language-',
- // CSS language prefix for fenced blocks
- linkify: false,
- // autoconvert URL-like texts to links
-
- // Enable some language-neutral replacements + quotes beautification
- typographer: false,
- // Double + single quotes replacement pairs, when typographer enabled,
- // and smartquotes on. Could be either a String or an Array.
- //
- // For example, you can use '«»„“' for Russian, '„“‚‘' for German,
- // and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
- quotes: '\u201c\u201d\u2018\u2019',
- /* “”‘’ */
-
- // Highlighter function. Should return escaped HTML,
- // or '' if the source string is not changed and should be escaped externaly.
- // If result starts with
\n';
-};
-default_rules.fence = function (tokens, idx, options, env, slf) {
- var token = tokens[idx],
- info = token.info ? unescapeAll(token.info).trim() : '',
- langName = '',
- langAttrs = '',
- highlighted,
- i,
- arr,
- tmpAttrs,
- tmpToken;
- if (info) {
- arr = info.split(/(\s+)/g);
- langName = arr[0];
- langAttrs = arr.slice(2).join('');
- }
- if (options.highlight) {
- highlighted = options.highlight(token.content, langName, langAttrs) || escapeHtml(token.content);
- } else {
- highlighted = escapeHtml(token.content);
- }
- if (highlighted.indexOf('' + escapeHtml(tokens[idx].content) + '
\n';
- }
- return '' + highlighted + '
\n';
-};
-default_rules.image = function (tokens, idx, options, env, slf) {
- var token = tokens[idx];
-
- // "alt" attr MUST be set, even if empty. Because it's mandatory and
- // should be placed on proper position for tests.
- //
- // Replace content with actual value
-
- token.attrs[token.attrIndex('alt')][1] = slf.renderInlineAsText(token.children, options, env);
- return slf.renderToken(tokens, idx, options);
-};
-default_rules.hardbreak = function (tokens, idx, options /*, env */) {
- return options.xhtmlOut ? '' + highlighted + '
\n' : '
\n';
-};
-default_rules.softbreak = function (tokens, idx, options /*, env */) {
- return options.breaks ? options.xhtmlOut ? '
\n' : '
\n' : '\n';
-};
-default_rules.text = function (tokens, idx /*, options, env */) {
- return escapeHtml(tokens[idx].content);
-};
-default_rules.html_block = function (tokens, idx /*, options, env */) {
- return tokens[idx].content;
-};
-default_rules.html_inline = function (tokens, idx /*, options, env */) {
- return tokens[idx].content;
-};
-
-/**
- * new Renderer()
- *
- * Creates new [[Renderer]] instance and fill [[Renderer#rules]] with defaults.
- **/
-function Renderer() {
- /**
- * Renderer#rules -> Object
- *
- * Contains render rules for tokens. Can be updated and extended.
- *
- * ##### Example
- *
- * ```javascript
- * var md = require('markdown-it')();
- *
- * md.renderer.rules.strong_open = function () { return ''; };
- * md.renderer.rules.strong_close = function () { return ''; };
- *
- * var result = md.renderInline(...);
- * ```
- *
- * Each rule is called as independent static function with fixed signature:
- *
- * ```javascript
- * function my_token_render(tokens, idx, options, env, renderer) {
- * // ...
- * return renderedHTML;
- * }
- * ```
- *
- * See [source code](https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.js)
- * for more details and examples.
- **/
- this.rules = assign({}, default_rules);
-}
-
-/**
- * Renderer.renderAttrs(token) -> String
- *
- * Render token attributes to string.
- **/
-Renderer.prototype.renderAttrs = function renderAttrs(token) {
- var i, l, result;
- if (!token.attrs) {
- return '';
- }
- result = '';
- for (i = 0, l = token.attrs.length; i < l; i++) {
- result += ' ' + escapeHtml(token.attrs[i][0]) + '="' + escapeHtml(token.attrs[i][1]) + '"';
- }
- return result;
-};
-
-/**
- * Renderer.renderToken(tokens, idx, options) -> String
- * - tokens (Array): list of tokens
- * - idx (Numbed): token index to render
- * - options (Object): params of parser instance
- *
- * Default token renderer. Can be overriden by custom function
- * in [[Renderer#rules]].
- **/
-Renderer.prototype.renderToken = function renderToken(tokens, idx, options) {
- var nextToken,
- result = '',
- needLf = false,
- token = tokens[idx];
-
- // Tight list paragraphs
- if (token.hidden) {
- return '';
- }
-
- // Insert a newline between hidden paragraph and subsequent opening
- // block-level tag.
- //
- // For example, here we should insert a newline before blockquote:
- // - a
- // >
- //
- if (token.block && token.nesting !== -1 && idx && tokens[idx - 1].hidden) {
- result += '\n';
- }
-
- // Add token name, e.g. ``.
- //
- needLf = false;
- }
- }
- }
- }
- result += needLf ? '>\n' : '>';
- return result;
-};
-
-/**
- * Renderer.renderInline(tokens, options, env) -> String
- * - tokens (Array): list on block tokens to renter
- * - options (Object): params of parser instance
- * - env (Object): additional data from parsed input (references, for example)
- *
- * The same as [[Renderer.render]], but for single token of `inline` type.
- **/
-Renderer.prototype.renderInline = function (tokens, options, env) {
- var type,
- result = '',
- rules = this.rules;
- for (var i = 0, len = tokens.length; i < len; i++) {
- type = tokens[i].type;
- if (typeof rules[type] !== 'undefined') {
- result += rules[type](tokens, i, options, env, this);
- } else {
- result += this.renderToken(tokens, i, options);
- }
- }
- return result;
-};
-
-/** internal
- * Renderer.renderInlineAsText(tokens, options, env) -> String
- * - tokens (Array): list on block tokens to renter
- * - options (Object): params of parser instance
- * - env (Object): additional data from parsed input (references, for example)
- *
- * Special kludge for image `alt` attributes to conform CommonMark spec.
- * Don't try to use it! Spec requires to show `alt` content with stripped markup,
- * instead of simple escaping.
- **/
-Renderer.prototype.renderInlineAsText = function (tokens, options, env) {
- var result = '';
- for (var i = 0, len = tokens.length; i < len; i++) {
- if (tokens[i].type === 'text') {
- result += tokens[i].content;
- } else if (tokens[i].type === 'image') {
- result += this.renderInlineAsText(tokens[i].children, options, env);
- } else if (tokens[i].type === 'softbreak') {
- result += '\n';
- }
- }
- return result;
-};
-
-/**
- * Renderer.render(tokens, options, env) -> String
- * - tokens (Array): list on block tokens to renter
- * - options (Object): params of parser instance
- * - env (Object): additional data from parsed input (references, for example)
- *
- * Takes token stream and generates HTML. Probably, you will never need to call
- * this method directly.
- **/
-Renderer.prototype.render = function (tokens, options, env) {
- var i,
- len,
- type,
- result = '',
- rules = this.rules;
- for (i = 0, len = tokens.length; i < len; i++) {
- type = tokens[i].type;
- if (type === 'inline') {
- result += this.renderInline(tokens[i].children, options, env);
- } else if (typeof rules[type] !== 'undefined') {
- result += rules[tokens[i].type](tokens, i, options, env, this);
- } else {
- result += this.renderToken(tokens, i, options, env);
- }
- }
- return result;
-};
-module.exports = Renderer;
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/ruler.js":
-/*!******************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/ruler.js ***!
- \******************************************************/
-/***/ (function(module) {
-
-/**
- * class Ruler
- *
- * Helper class, used by [[MarkdownIt#core]], [[MarkdownIt#block]] and
- * [[MarkdownIt#inline]] to manage sequences of functions (rules):
- *
- * - keep rules in defined order
- * - assign the name to each rule
- * - enable/disable rules
- * - add/replace rules
- * - allow assign rules to additional named chains (in the same)
- * - cacheing lists of active rules
- *
- * You will not need use this class directly until write plugins. For simple
- * rules control use [[MarkdownIt.disable]], [[MarkdownIt.enable]] and
- * [[MarkdownIt.use]].
- **/
-
-
-/**
- * new Ruler()
- **/
-function Ruler() {
- // List of added rules. Each element is:
- //
- // {
- // name: XXX,
- // enabled: Boolean,
- // fn: Function(),
- // alt: [ name2, name3 ]
- // }
- //
- this.__rules__ = [];
-
- // Cached rule chains.
- //
- // First level - chain name, '' for default.
- // Second level - diginal anchor for fast filtering by charcodes.
- //
- this.__cache__ = null;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Helper methods, should not be used directly
-
-// Find rule index by name
-//
-Ruler.prototype.__find__ = function (name) {
- for (var i = 0; i < this.__rules__.length; i++) {
- if (this.__rules__[i].name === name) {
- return i;
- }
- }
- return -1;
-};
-
-// Build rules lookup cache
-//
-Ruler.prototype.__compile__ = function () {
- var self = this;
- var chains = [''];
-
- // collect unique names
- self.__rules__.forEach(function (rule) {
- if (!rule.enabled) {
- return;
- }
- rule.alt.forEach(function (altName) {
- if (chains.indexOf(altName) < 0) {
- chains.push(altName);
- }
- });
- });
- self.__cache__ = {};
- chains.forEach(function (chain) {
- self.__cache__[chain] = [];
- self.__rules__.forEach(function (rule) {
- if (!rule.enabled) {
- return;
- }
- if (chain && rule.alt.indexOf(chain) < 0) {
- return;
- }
- self.__cache__[chain].push(rule.fn);
- });
- });
-};
-
-/**
- * Ruler.at(name, fn [, options])
- * - name (String): rule name to replace.
- * - fn (Function): new rule function.
- * - options (Object): new rule options (not mandatory).
- *
- * Replace rule by name with new function & options. Throws error if name not
- * found.
- *
- * ##### Options:
- *
- * - __alt__ - array with names of "alternate" chains.
- *
- * ##### Example
- *
- * Replace existing typographer replacement rule with new one:
- *
- * ```javascript
- * var md = require('markdown-it')();
- *
- * md.core.ruler.at('replacements', function replace(state) {
- * //...
- * });
- * ```
- **/
-Ruler.prototype.at = function (name, fn, options) {
- var index = this.__find__(name);
- var opt = options || {};
- if (index === -1) {
- throw new Error('Parser rule not found: ' + name);
- }
- this.__rules__[index].fn = fn;
- this.__rules__[index].alt = opt.alt || [];
- this.__cache__ = null;
-};
-
-/**
- * Ruler.before(beforeName, ruleName, fn [, options])
- * - beforeName (String): new rule will be added before this one.
- * - ruleName (String): name of added rule.
- * - fn (Function): rule function.
- * - options (Object): rule options (not mandatory).
- *
- * Add new rule to chain before one with given name. See also
- * [[Ruler.after]], [[Ruler.push]].
- *
- * ##### Options:
- *
- * - __alt__ - array with names of "alternate" chains.
- *
- * ##### Example
- *
- * ```javascript
- * var md = require('markdown-it')();
- *
- * md.block.ruler.before('paragraph', 'my_rule', function replace(state) {
- * //...
- * });
- * ```
- **/
-Ruler.prototype.before = function (beforeName, ruleName, fn, options) {
- var index = this.__find__(beforeName);
- var opt = options || {};
- if (index === -1) {
- throw new Error('Parser rule not found: ' + beforeName);
- }
- this.__rules__.splice(index, 0, {
- name: ruleName,
- enabled: true,
- fn: fn,
- alt: opt.alt || []
- });
- this.__cache__ = null;
-};
-
-/**
- * Ruler.after(afterName, ruleName, fn [, options])
- * - afterName (String): new rule will be added after this one.
- * - ruleName (String): name of added rule.
- * - fn (Function): rule function.
- * - options (Object): rule options (not mandatory).
- *
- * Add new rule to chain after one with given name. See also
- * [[Ruler.before]], [[Ruler.push]].
- *
- * ##### Options:
- *
- * - __alt__ - array with names of "alternate" chains.
- *
- * ##### Example
- *
- * ```javascript
- * var md = require('markdown-it')();
- *
- * md.inline.ruler.after('text', 'my_rule', function replace(state) {
- * //...
- * });
- * ```
- **/
-Ruler.prototype.after = function (afterName, ruleName, fn, options) {
- var index = this.__find__(afterName);
- var opt = options || {};
- if (index === -1) {
- throw new Error('Parser rule not found: ' + afterName);
- }
- this.__rules__.splice(index + 1, 0, {
- name: ruleName,
- enabled: true,
- fn: fn,
- alt: opt.alt || []
- });
- this.__cache__ = null;
-};
-
-/**
- * Ruler.push(ruleName, fn [, options])
- * - ruleName (String): name of added rule.
- * - fn (Function): rule function.
- * - options (Object): rule options (not mandatory).
- *
- * Push new rule to the end of chain. See also
- * [[Ruler.before]], [[Ruler.after]].
- *
- * ##### Options:
- *
- * - __alt__ - array with names of "alternate" chains.
- *
- * ##### Example
- *
- * ```javascript
- * var md = require('markdown-it')();
- *
- * md.core.ruler.push('my_rule', function replace(state) {
- * //...
- * });
- * ```
- **/
-Ruler.prototype.push = function (ruleName, fn, options) {
- var opt = options || {};
- this.__rules__.push({
- name: ruleName,
- enabled: true,
- fn: fn,
- alt: opt.alt || []
- });
- this.__cache__ = null;
-};
-
-/**
- * Ruler.enable(list [, ignoreInvalid]) -> Array
- * - list (String|Array): list of rule names to enable.
- * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
- *
- * Enable rules with given names. If any rule name not found - throw Error.
- * Errors can be disabled by second param.
- *
- * Returns list of found rule names (if no exception happened).
- *
- * See also [[Ruler.disable]], [[Ruler.enableOnly]].
- **/
-Ruler.prototype.enable = function (list, ignoreInvalid) {
- if (!Array.isArray(list)) {
- list = [list];
- }
- var result = [];
-
- // Search by name and enable
- list.forEach(function (name) {
- var idx = this.__find__(name);
- if (idx < 0) {
- if (ignoreInvalid) {
- return;
- }
- throw new Error('Rules manager: invalid rule name ' + name);
- }
- this.__rules__[idx].enabled = true;
- result.push(name);
- }, this);
- this.__cache__ = null;
- return result;
-};
-
-/**
- * Ruler.enableOnly(list [, ignoreInvalid])
- * - list (String|Array): list of rule names to enable (whitelist).
- * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
- *
- * Enable rules with given names, and disable everything else. If any rule name
- * not found - throw Error. Errors can be disabled by second param.
- *
- * See also [[Ruler.disable]], [[Ruler.enable]].
- **/
-Ruler.prototype.enableOnly = function (list, ignoreInvalid) {
- if (!Array.isArray(list)) {
- list = [list];
- }
- this.__rules__.forEach(function (rule) {
- rule.enabled = false;
- });
- this.enable(list, ignoreInvalid);
-};
-
-/**
- * Ruler.disable(list [, ignoreInvalid]) -> Array
- * - list (String|Array): list of rule names to disable.
- * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
- *
- * Disable rules with given names. If any rule name not found - throw Error.
- * Errors can be disabled by second param.
- *
- * Returns list of found rule names (if no exception happened).
- *
- * See also [[Ruler.enable]], [[Ruler.enableOnly]].
- **/
-Ruler.prototype.disable = function (list, ignoreInvalid) {
- if (!Array.isArray(list)) {
- list = [list];
- }
- var result = [];
-
- // Search by name and disable
- list.forEach(function (name) {
- var idx = this.__find__(name);
- if (idx < 0) {
- if (ignoreInvalid) {
- return;
- }
- throw new Error('Rules manager: invalid rule name ' + name);
- }
- this.__rules__[idx].enabled = false;
- result.push(name);
- }, this);
- this.__cache__ = null;
- return result;
-};
-
-/**
- * Ruler.getRules(chainName) -> Array
- *
- * Return array of active functions (rules) for given chain name. It analyzes
- * rules configuration, compiles caches if not exists and returns result.
- *
- * Default chain name is `''` (empty string). It can't be skipped. That's
- * done intentionally, to keep signature monomorphic for high speed.
- **/
-Ruler.prototype.getRules = function (chainName) {
- if (this.__cache__ === null) {
- this.__compile__();
- }
-
- // Chain can be empty, if rules disabled. But we still have to return Array.
- return this.__cache__[chainName] || [];
-};
-module.exports = Ruler;
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_block/blockquote.js":
-/*!***********************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_block/blockquote.js ***!
- \***********************************************************************/
-/***/ (function(module, __unused_webpack_exports, __webpack_require__) {
-
-// Block quotes
-
-
-
-var isSpace = (__webpack_require__(/*! ../common/utils */ "../../../node_modules/markdown-it/lib/common/utils.js").isSpace);
-module.exports = function blockquote(state, startLine, endLine, silent) {
- var adjustTab,
- ch,
- i,
- initial,
- l,
- lastLineEmpty,
- lines,
- nextLine,
- offset,
- oldBMarks,
- oldBSCount,
- oldIndent,
- oldParentType,
- oldSCount,
- oldTShift,
- spaceAfterMarker,
- terminate,
- terminatorRules,
- token,
- isOutdented,
- oldLineMax = state.lineMax,
- pos = state.bMarks[startLine] + state.tShift[startLine],
- max = state.eMarks[startLine];
-
- // if it's indented more than 3 spaces, it should be a code block
- if (state.sCount[startLine] - state.blkIndent >= 4) {
- return false;
- }
-
- // check the block quote marker
- if (state.src.charCodeAt(pos++) !== 0x3E /* > */) {
- return false;
- }
-
- // we know that it's going to be a valid blockquote,
- // so no point trying to find the end of it in silent mode
- if (silent) {
- return true;
- }
-
- // set offset past spaces and ">"
- initial = offset = state.sCount[startLine] + 1;
-
- // skip one optional space after '>'
- if (state.src.charCodeAt(pos) === 0x20 /* space */) {
- // ' > test '
- // ^ -- position start of line here:
- pos++;
- initial++;
- offset++;
- adjustTab = false;
- spaceAfterMarker = true;
- } else if (state.src.charCodeAt(pos) === 0x09 /* tab */) {
- spaceAfterMarker = true;
- if ((state.bsCount[startLine] + offset) % 4 === 3) {
- // ' >\t test '
- // ^ -- position start of line here (tab has width===1)
- pos++;
- initial++;
- offset++;
- adjustTab = false;
- } else {
- // ' >\t test '
- // ^ -- position start of line here + shift bsCount slightly
- // to make extra space appear
- adjustTab = true;
- }
- } else {
- spaceAfterMarker = false;
- }
- oldBMarks = [state.bMarks[startLine]];
- state.bMarks[startLine] = pos;
- while (pos < max) {
- ch = state.src.charCodeAt(pos);
- if (isSpace(ch)) {
- if (ch === 0x09) {
- offset += 4 - (offset + state.bsCount[startLine] + (adjustTab ? 1 : 0)) % 4;
- } else {
- offset++;
- }
- } else {
- break;
- }
- pos++;
- }
- oldBSCount = [state.bsCount[startLine]];
- state.bsCount[startLine] = state.sCount[startLine] + 1 + (spaceAfterMarker ? 1 : 0);
- lastLineEmpty = pos >= max;
- oldSCount = [state.sCount[startLine]];
- state.sCount[startLine] = offset - initial;
- oldTShift = [state.tShift[startLine]];
- state.tShift[startLine] = pos - state.bMarks[startLine];
- terminatorRules = state.md.block.ruler.getRules('blockquote');
- oldParentType = state.parentType;
- state.parentType = 'blockquote';
-
- // Search the end of the block
- //
- // Block ends with either:
- // 1. an empty line outside:
- // ```
- // > test
- //
- // ```
- // 2. an empty line inside:
- // ```
- // >
- // test
- // ```
- // 3. another tag:
- // ```
- // > test
- // - - -
- // ```
- for (nextLine = startLine + 1; nextLine < endLine; nextLine++) {
- // check if it's outdented, i.e. it's inside list item and indented
- // less than said list item:
- //
- // ```
- // 1. anything
- // > current blockquote
- // 2. checking this line
- // ```
- isOutdented = state.sCount[nextLine] < state.blkIndent;
- pos = state.bMarks[nextLine] + state.tShift[nextLine];
- max = state.eMarks[nextLine];
- if (pos >= max) {
- // Case 1: line is not inside the blockquote, and this line is empty.
- break;
- }
- if (state.src.charCodeAt(pos++) === 0x3E /* > */ && !isOutdented) {
- // This line is inside the blockquote.
-
- // set offset past spaces and ">"
- initial = offset = state.sCount[nextLine] + 1;
-
- // skip one optional space after '>'
- if (state.src.charCodeAt(pos) === 0x20 /* space */) {
- // ' > test '
- // ^ -- position start of line here:
- pos++;
- initial++;
- offset++;
- adjustTab = false;
- spaceAfterMarker = true;
- } else if (state.src.charCodeAt(pos) === 0x09 /* tab */) {
- spaceAfterMarker = true;
- if ((state.bsCount[nextLine] + offset) % 4 === 3) {
- // ' >\t test '
- // ^ -- position start of line here (tab has width===1)
- pos++;
- initial++;
- offset++;
- adjustTab = false;
- } else {
- // ' >\t test '
- // ^ -- position start of line here + shift bsCount slightly
- // to make extra space appear
- adjustTab = true;
- }
- } else {
- spaceAfterMarker = false;
- }
- oldBMarks.push(state.bMarks[nextLine]);
- state.bMarks[nextLine] = pos;
- while (pos < max) {
- ch = state.src.charCodeAt(pos);
- if (isSpace(ch)) {
- if (ch === 0x09) {
- offset += 4 - (offset + state.bsCount[nextLine] + (adjustTab ? 1 : 0)) % 4;
- } else {
- offset++;
- }
- } else {
- break;
- }
- pos++;
- }
- lastLineEmpty = pos >= max;
- oldBSCount.push(state.bsCount[nextLine]);
- state.bsCount[nextLine] = state.sCount[nextLine] + 1 + (spaceAfterMarker ? 1 : 0);
- oldSCount.push(state.sCount[nextLine]);
- state.sCount[nextLine] = offset - initial;
- oldTShift.push(state.tShift[nextLine]);
- state.tShift[nextLine] = pos - state.bMarks[nextLine];
- continue;
- }
-
- // Case 2: line is not inside the blockquote, and the last line was empty.
- if (lastLineEmpty) {
- break;
- }
-
- // Case 3: another tag found.
- terminate = false;
- for (i = 0, l = terminatorRules.length; i < l; i++) {
- if (terminatorRules[i](state, nextLine, endLine, true)) {
- terminate = true;
- break;
- }
- }
- if (terminate) {
- // Quirk to enforce "hard termination mode" for paragraphs;
- // normally if you call `tokenize(state, startLine, nextLine)`,
- // paragraphs will look below nextLine for paragraph continuation,
- // but if blockquote is terminated by another tag, they shouldn't
- state.lineMax = nextLine;
- if (state.blkIndent !== 0) {
- // state.blkIndent was non-zero, we now set it to zero,
- // so we need to re-calculate all offsets to appear as
- // if indent wasn't changed
- oldBMarks.push(state.bMarks[nextLine]);
- oldBSCount.push(state.bsCount[nextLine]);
- oldTShift.push(state.tShift[nextLine]);
- oldSCount.push(state.sCount[nextLine]);
- state.sCount[nextLine] -= state.blkIndent;
- }
- break;
- }
- oldBMarks.push(state.bMarks[nextLine]);
- oldBSCount.push(state.bsCount[nextLine]);
- oldTShift.push(state.tShift[nextLine]);
- oldSCount.push(state.sCount[nextLine]);
-
- // A negative indentation means that this is a paragraph continuation
- //
- state.sCount[nextLine] = -1;
- }
- oldIndent = state.blkIndent;
- state.blkIndent = 0;
- token = state.push('blockquote_open', 'blockquote', 1);
- token.markup = '>';
- token.map = lines = [startLine, 0];
- state.md.block.tokenize(state, startLine, nextLine);
- token = state.push('blockquote_close', 'blockquote', -1);
- token.markup = '>';
- state.lineMax = oldLineMax;
- state.parentType = oldParentType;
- lines[1] = state.line;
-
- // Restore original tShift; this might not be necessary since the parser
- // has already been here, but just to make sure we can do that.
- for (i = 0; i < oldTShift.length; i++) {
- state.bMarks[i + startLine] = oldBMarks[i];
- state.tShift[i + startLine] = oldTShift[i];
- state.sCount[i + startLine] = oldSCount[i];
- state.bsCount[i + startLine] = oldBSCount[i];
- }
- state.blkIndent = oldIndent;
- return true;
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_block/code.js":
-/*!*****************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_block/code.js ***!
- \*****************************************************************/
-/***/ (function(module) {
-
-// Code block (4 spaces padded)
-
-
-
-module.exports = function code(state, startLine, endLine /*, silent*/) {
- var nextLine, last, token;
- if (state.sCount[startLine] - state.blkIndent < 4) {
- return false;
- }
- last = nextLine = startLine + 1;
- while (nextLine < endLine) {
- if (state.isEmpty(nextLine)) {
- nextLine++;
- continue;
- }
- if (state.sCount[nextLine] - state.blkIndent >= 4) {
- nextLine++;
- last = nextLine;
- continue;
- }
- break;
- }
- state.line = last;
- token = state.push('code_block', 'code', 0);
- token.content = state.getLines(startLine, last, 4 + state.blkIndent, false) + '\n';
- token.map = [startLine, state.line];
- return true;
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_block/fence.js":
-/*!******************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_block/fence.js ***!
- \******************************************************************/
-/***/ (function(module) {
-
-// fences (``` lang, ~~~ lang)
-
-
-
-module.exports = function fence(state, startLine, endLine, silent) {
- var marker,
- len,
- params,
- nextLine,
- mem,
- token,
- markup,
- haveEndMarker = false,
- pos = state.bMarks[startLine] + state.tShift[startLine],
- max = state.eMarks[startLine];
-
- // if it's indented more than 3 spaces, it should be a code block
- if (state.sCount[startLine] - state.blkIndent >= 4) {
- return false;
- }
- if (pos + 3 > max) {
- return false;
- }
- marker = state.src.charCodeAt(pos);
- if (marker !== 0x7E /* ~ */ && marker !== 0x60 /* ` */) {
- return false;
- }
-
- // scan marker length
- mem = pos;
- pos = state.skipChars(pos, marker);
- len = pos - mem;
- if (len < 3) {
- return false;
- }
- markup = state.src.slice(mem, pos);
- params = state.src.slice(pos, max);
- if (marker === 0x60 /* ` */) {
- if (params.indexOf(String.fromCharCode(marker)) >= 0) {
- return false;
- }
- }
-
- // Since start is found, we can report success here in validation mode
- if (silent) {
- return true;
- }
-
- // search end of block
- nextLine = startLine;
- for (;;) {
- nextLine++;
- if (nextLine >= endLine) {
- // unclosed block should be autoclosed by end of document.
- // also block seems to be autoclosed by end of parent
- break;
- }
- pos = mem = state.bMarks[nextLine] + state.tShift[nextLine];
- max = state.eMarks[nextLine];
- if (pos < max && state.sCount[nextLine] < state.blkIndent) {
- // non-empty line with negative indent should stop the list:
- // - ```
- // test
- break;
- }
- if (state.src.charCodeAt(pos) !== marker) {
- continue;
- }
- if (state.sCount[nextLine] - state.blkIndent >= 4) {
- // closing fence should be indented less than 4 spaces
- continue;
- }
- pos = state.skipChars(pos, marker);
-
- // closing code fence must be at least as long as the opening one
- if (pos - mem < len) {
- continue;
- }
-
- // make sure tail has spaces only
- pos = state.skipSpaces(pos);
- if (pos < max) {
- continue;
- }
- haveEndMarker = true;
- // found!
- break;
- }
-
- // If a fence has heading spaces, they should be removed from its inner block
- len = state.sCount[startLine];
- state.line = nextLine + (haveEndMarker ? 1 : 0);
- token = state.push('fence', 'code', 0);
- token.info = params;
- token.content = state.getLines(startLine + 1, nextLine, len, true);
- token.markup = markup;
- token.map = [startLine, state.line];
- return true;
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_block/heading.js":
-/*!********************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_block/heading.js ***!
- \********************************************************************/
-/***/ (function(module, __unused_webpack_exports, __webpack_require__) {
-
-// heading (#, ##, ...)
-
-
-
-var isSpace = (__webpack_require__(/*! ../common/utils */ "../../../node_modules/markdown-it/lib/common/utils.js").isSpace);
-module.exports = function heading(state, startLine, endLine, silent) {
- var ch,
- level,
- tmp,
- token,
- pos = state.bMarks[startLine] + state.tShift[startLine],
- max = state.eMarks[startLine];
-
- // if it's indented more than 3 spaces, it should be a code block
- if (state.sCount[startLine] - state.blkIndent >= 4) {
- return false;
- }
- ch = state.src.charCodeAt(pos);
- if (ch !== 0x23 /* # */ || pos >= max) {
- return false;
- }
-
- // count heading level
- level = 1;
- ch = state.src.charCodeAt(++pos);
- while (ch === 0x23 /* # */ && pos < max && level <= 6) {
- level++;
- ch = state.src.charCodeAt(++pos);
- }
- if (level > 6 || pos < max && !isSpace(ch)) {
- return false;
- }
- if (silent) {
- return true;
- }
-
- // Let's cut tails like ' ### ' from the end of string
-
- max = state.skipSpacesBack(max, pos);
- tmp = state.skipCharsBack(max, 0x23, pos); // #
- if (tmp > pos && isSpace(state.src.charCodeAt(tmp - 1))) {
- max = tmp;
- }
- state.line = startLine + 1;
- token = state.push('heading_open', 'h' + String(level), 1);
- token.markup = '########'.slice(0, level);
- token.map = [startLine, state.line];
- token = state.push('inline', '', 0);
- token.content = state.src.slice(pos, max).trim();
- token.map = [startLine, state.line];
- token.children = [];
- token = state.push('heading_close', 'h' + String(level), -1);
- token.markup = '########'.slice(0, level);
- return true;
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_block/hr.js":
-/*!***************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_block/hr.js ***!
- \***************************************************************/
-/***/ (function(module, __unused_webpack_exports, __webpack_require__) {
-
-// Horizontal rule
-
-
-
-var isSpace = (__webpack_require__(/*! ../common/utils */ "../../../node_modules/markdown-it/lib/common/utils.js").isSpace);
-module.exports = function hr(state, startLine, endLine, silent) {
- var marker,
- cnt,
- ch,
- token,
- pos = state.bMarks[startLine] + state.tShift[startLine],
- max = state.eMarks[startLine];
-
- // if it's indented more than 3 spaces, it should be a code block
- if (state.sCount[startLine] - state.blkIndent >= 4) {
- return false;
- }
- marker = state.src.charCodeAt(pos++);
-
- // Check hr marker
- if (marker !== 0x2A /* * */ && marker !== 0x2D /* - */ && marker !== 0x5F /* _ */) {
- return false;
- }
-
- // markers can be mixed with spaces, but there should be at least 3 of them
-
- cnt = 1;
- while (pos < max) {
- ch = state.src.charCodeAt(pos++);
- if (ch !== marker && !isSpace(ch)) {
- return false;
- }
- if (ch === marker) {
- cnt++;
- }
- }
- if (cnt < 3) {
- return false;
- }
- if (silent) {
- return true;
- }
- state.line = startLine + 1;
- token = state.push('hr', 'hr', 0);
- token.map = [startLine, state.line];
- token.markup = Array(cnt + 1).join(String.fromCharCode(marker));
- return true;
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_block/html_block.js":
-/*!***********************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_block/html_block.js ***!
- \***********************************************************************/
-/***/ (function(module, __unused_webpack_exports, __webpack_require__) {
-
-// HTML block
-
-
-
-var block_names = __webpack_require__(/*! ../common/html_blocks */ "../../../node_modules/markdown-it/lib/common/html_blocks.js");
-var HTML_OPEN_CLOSE_TAG_RE = (__webpack_require__(/*! ../common/html_re */ "../../../node_modules/markdown-it/lib/common/html_re.js").HTML_OPEN_CLOSE_TAG_RE);
-
-// An array of opening and corresponding closing sequences for html tags,
-// last argument defines whether it can terminate a paragraph or not
-//
-var HTML_SEQUENCES = [[/^<(script|pre|style|textarea)(?=(\s|>|$))/i, /<\/(script|pre|style|textarea)>/i, true], [/^/, true], [/^<\?/, /\?>/, true], [/^/, true], [/^/, true], [new RegExp('^?(' + block_names.join('|') + ')(?=(\\s|/?>|$))', 'i'), /^$/, true], [new RegExp(HTML_OPEN_CLOSE_TAG_RE.source + '\\s*$'), /^$/, false]];
-module.exports = function html_block(state, startLine, endLine, silent) {
- var i,
- nextLine,
- token,
- lineText,
- pos = state.bMarks[startLine] + state.tShift[startLine],
- max = state.eMarks[startLine];
-
- // if it's indented more than 3 spaces, it should be a code block
- if (state.sCount[startLine] - state.blkIndent >= 4) {
- return false;
- }
- if (!state.md.options.html) {
- return false;
- }
- if (state.src.charCodeAt(pos) !== 0x3C /* < */) {
- return false;
- }
- lineText = state.src.slice(pos, max);
- for (i = 0; i < HTML_SEQUENCES.length; i++) {
- if (HTML_SEQUENCES[i][0].test(lineText)) {
- break;
- }
- }
- if (i === HTML_SEQUENCES.length) {
- return false;
- }
- if (silent) {
- // true if this sequence can be a terminator, false otherwise
- return HTML_SEQUENCES[i][2];
- }
- nextLine = startLine + 1;
-
- // If we are here - we detected HTML block.
- // Let's roll down till block end.
- if (!HTML_SEQUENCES[i][1].test(lineText)) {
- for (; nextLine < endLine; nextLine++) {
- if (state.sCount[nextLine] < state.blkIndent) {
- break;
- }
- pos = state.bMarks[nextLine] + state.tShift[nextLine];
- max = state.eMarks[nextLine];
- lineText = state.src.slice(pos, max);
- if (HTML_SEQUENCES[i][1].test(lineText)) {
- if (lineText.length !== 0) {
- nextLine++;
- }
- break;
- }
- }
- }
- state.line = nextLine;
- token = state.push('html_block', '', 0);
- token.map = [startLine, nextLine];
- token.content = state.getLines(startLine, nextLine, state.blkIndent, true);
- return true;
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_block/lheading.js":
-/*!*********************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_block/lheading.js ***!
- \*********************************************************************/
-/***/ (function(module) {
-
-// lheading (---, ===)
-
-
-
-module.exports = function lheading(state, startLine, endLine /*, silent*/) {
- var content,
- terminate,
- i,
- l,
- token,
- pos,
- max,
- level,
- marker,
- nextLine = startLine + 1,
- oldParentType,
- terminatorRules = state.md.block.ruler.getRules('paragraph');
-
- // if it's indented more than 3 spaces, it should be a code block
- if (state.sCount[startLine] - state.blkIndent >= 4) {
- return false;
- }
- oldParentType = state.parentType;
- state.parentType = 'paragraph'; // use paragraph to match terminatorRules
-
- // jump line-by-line until empty one or EOF
- for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {
- // this would be a code block normally, but after paragraph
- // it's considered a lazy continuation regardless of what's there
- if (state.sCount[nextLine] - state.blkIndent > 3) {
- continue;
- }
-
- //
- // Check for underline in setext header
- //
- if (state.sCount[nextLine] >= state.blkIndent) {
- pos = state.bMarks[nextLine] + state.tShift[nextLine];
- max = state.eMarks[nextLine];
- if (pos < max) {
- marker = state.src.charCodeAt(pos);
- if (marker === 0x2D /* - */ || marker === 0x3D /* = */) {
- pos = state.skipChars(pos, marker);
- pos = state.skipSpaces(pos);
- if (pos >= max) {
- level = marker === 0x3D /* = */ ? 1 : 2;
- break;
- }
- }
- }
- }
-
- // quirk for blockquotes, this line should already be checked by that rule
- if (state.sCount[nextLine] < 0) {
- continue;
- }
-
- // Some tags can terminate paragraph without empty line.
- terminate = false;
- for (i = 0, l = terminatorRules.length; i < l; i++) {
- if (terminatorRules[i](state, nextLine, endLine, true)) {
- terminate = true;
- break;
- }
- }
- if (terminate) {
- break;
- }
- }
- if (!level) {
- // Didn't find valid underline
- return false;
- }
- content = state.getLines(startLine, nextLine, state.blkIndent, false).trim();
- state.line = nextLine + 1;
- token = state.push('heading_open', 'h' + String(level), 1);
- token.markup = String.fromCharCode(marker);
- token.map = [startLine, state.line];
- token = state.push('inline', '', 0);
- token.content = content;
- token.map = [startLine, state.line - 1];
- token.children = [];
- token = state.push('heading_close', 'h' + String(level), -1);
- token.markup = String.fromCharCode(marker);
- state.parentType = oldParentType;
- return true;
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_block/list.js":
-/*!*****************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_block/list.js ***!
- \*****************************************************************/
-/***/ (function(module, __unused_webpack_exports, __webpack_require__) {
-
-// Lists
-
-
-
-var isSpace = (__webpack_require__(/*! ../common/utils */ "../../../node_modules/markdown-it/lib/common/utils.js").isSpace);
-
-// Search `[-+*][\n ]`, returns next pos after marker on success
-// or -1 on fail.
-function skipBulletListMarker(state, startLine) {
- var marker, pos, max, ch;
- pos = state.bMarks[startLine] + state.tShift[startLine];
- max = state.eMarks[startLine];
- marker = state.src.charCodeAt(pos++);
- // Check bullet
- if (marker !== 0x2A /* * */ && marker !== 0x2D /* - */ && marker !== 0x2B /* + */) {
- return -1;
- }
- if (pos < max) {
- ch = state.src.charCodeAt(pos);
- if (!isSpace(ch)) {
- // " -test " - is not a list item
- return -1;
- }
- }
- return pos;
-}
-
-// Search `\d+[.)][\n ]`, returns next pos after marker on success
-// or -1 on fail.
-function skipOrderedListMarker(state, startLine) {
- var ch,
- start = state.bMarks[startLine] + state.tShift[startLine],
- pos = start,
- max = state.eMarks[startLine];
-
- // List marker should have at least 2 chars (digit + dot)
- if (pos + 1 >= max) {
- return -1;
- }
- ch = state.src.charCodeAt(pos++);
- if (ch < 0x30 /* 0 */ || ch > 0x39 /* 9 */) {
- return -1;
- }
- for (;;) {
- // EOL -> fail
- if (pos >= max) {
- return -1;
- }
- ch = state.src.charCodeAt(pos++);
- if (ch >= 0x30 /* 0 */ && ch <= 0x39 /* 9 */) {
- // List marker should have no more than 9 digits
- // (prevents integer overflow in browsers)
- if (pos - start >= 10) {
- return -1;
- }
- continue;
- }
-
- // found valid marker
- if (ch === 0x29 /* ) */ || ch === 0x2e /* . */) {
- break;
- }
- return -1;
- }
- if (pos < max) {
- ch = state.src.charCodeAt(pos);
- if (!isSpace(ch)) {
- // " 1.test " - is not a list item
- return -1;
- }
- }
- return pos;
-}
-function markTightParagraphs(state, idx) {
- var i,
- l,
- level = state.level + 2;
- for (i = idx + 2, l = state.tokens.length - 2; i < l; i++) {
- if (state.tokens[i].level === level && state.tokens[i].type === 'paragraph_open') {
- state.tokens[i + 2].hidden = true;
- state.tokens[i].hidden = true;
- i += 2;
- }
- }
-}
-module.exports = function list(state, startLine, endLine, silent) {
- var ch,
- contentStart,
- i,
- indent,
- indentAfterMarker,
- initial,
- isOrdered,
- itemLines,
- l,
- listLines,
- listTokIdx,
- markerCharCode,
- markerValue,
- max,
- nextLine,
- offset,
- oldListIndent,
- oldParentType,
- oldSCount,
- oldTShift,
- oldTight,
- pos,
- posAfterMarker,
- prevEmptyEnd,
- start,
- terminate,
- terminatorRules,
- token,
- isTerminatingParagraph = false,
- tight = true;
-
- // if it's indented more than 3 spaces, it should be a code block
- if (state.sCount[startLine] - state.blkIndent >= 4) {
- return false;
- }
-
- // Special case:
- // - item 1
- // - item 2
- // - item 3
- // - item 4
- // - this one is a paragraph continuation
- if (state.listIndent >= 0 && state.sCount[startLine] - state.listIndent >= 4 && state.sCount[startLine] < state.blkIndent) {
- return false;
- }
-
- // limit conditions when list can interrupt
- // a paragraph (validation mode only)
- if (silent && state.parentType === 'paragraph') {
- // Next list item should still terminate previous list item;
- //
- // This code can fail if plugins use blkIndent as well as lists,
- // but I hope the spec gets fixed long before that happens.
- //
- if (state.tShift[startLine] >= state.blkIndent) {
- isTerminatingParagraph = true;
- }
- }
-
- // Detect list type and position after marker
- if ((posAfterMarker = skipOrderedListMarker(state, startLine)) >= 0) {
- isOrdered = true;
- start = state.bMarks[startLine] + state.tShift[startLine];
- markerValue = Number(state.src.slice(start, posAfterMarker - 1));
-
- // If we're starting a new ordered list right after
- // a paragraph, it should start with 1.
- if (isTerminatingParagraph && markerValue !== 1) return false;
- } else if ((posAfterMarker = skipBulletListMarker(state, startLine)) >= 0) {
- isOrdered = false;
- } else {
- return false;
- }
-
- // If we're starting a new unordered list right after
- // a paragraph, first line should not be empty.
- if (isTerminatingParagraph) {
- if (state.skipSpaces(posAfterMarker) >= state.eMarks[startLine]) return false;
- }
-
- // We should terminate list on style change. Remember first one to compare.
- markerCharCode = state.src.charCodeAt(posAfterMarker - 1);
-
- // For validation mode we can terminate immediately
- if (silent) {
- return true;
- }
-
- // Start list
- listTokIdx = state.tokens.length;
- if (isOrdered) {
- token = state.push('ordered_list_open', 'ol', 1);
- if (markerValue !== 1) {
- token.attrs = [['start', markerValue]];
- }
- } else {
- token = state.push('bullet_list_open', 'ul', 1);
- }
- token.map = listLines = [startLine, 0];
- token.markup = String.fromCharCode(markerCharCode);
-
- //
- // Iterate list items
- //
-
- nextLine = startLine;
- prevEmptyEnd = false;
- terminatorRules = state.md.block.ruler.getRules('list');
- oldParentType = state.parentType;
- state.parentType = 'list';
- while (nextLine < endLine) {
- pos = posAfterMarker;
- max = state.eMarks[nextLine];
- initial = offset = state.sCount[nextLine] + posAfterMarker - (state.bMarks[startLine] + state.tShift[startLine]);
- while (pos < max) {
- ch = state.src.charCodeAt(pos);
- if (ch === 0x09) {
- offset += 4 - (offset + state.bsCount[nextLine]) % 4;
- } else if (ch === 0x20) {
- offset++;
- } else {
- break;
- }
- pos++;
- }
- contentStart = pos;
- if (contentStart >= max) {
- // trimming space in "- \n 3" case, indent is 1 here
- indentAfterMarker = 1;
- } else {
- indentAfterMarker = offset - initial;
- }
-
- // If we have more than 4 spaces, the indent is 1
- // (the rest is just indented code block)
- if (indentAfterMarker > 4) {
- indentAfterMarker = 1;
- }
-
- // " - test"
- // ^^^^^ - calculating total length of this thing
- indent = initial + indentAfterMarker;
-
- // Run subparser & write tokens
- token = state.push('list_item_open', 'li', 1);
- token.markup = String.fromCharCode(markerCharCode);
- token.map = itemLines = [startLine, 0];
- if (isOrdered) {
- token.info = state.src.slice(start, posAfterMarker - 1);
- }
-
- // change current state, then restore it after parser subcall
- oldTight = state.tight;
- oldTShift = state.tShift[startLine];
- oldSCount = state.sCount[startLine];
-
- // - example list
- // ^ listIndent position will be here
- // ^ blkIndent position will be here
- //
- oldListIndent = state.listIndent;
- state.listIndent = state.blkIndent;
- state.blkIndent = indent;
- state.tight = true;
- state.tShift[startLine] = contentStart - state.bMarks[startLine];
- state.sCount[startLine] = offset;
- if (contentStart >= max && state.isEmpty(startLine + 1)) {
- // workaround for this case
- // (list item is empty, list terminates before "foo"):
- // ~~~~~~~~
- // -
- //
- // foo
- // ~~~~~~~~
- state.line = Math.min(state.line + 2, endLine);
- } else {
- state.md.block.tokenize(state, startLine, endLine, true);
- }
-
- // If any of list item is tight, mark list as tight
- if (!state.tight || prevEmptyEnd) {
- tight = false;
- }
- // Item become loose if finish with empty line,
- // but we should filter last element, because it means list finish
- prevEmptyEnd = state.line - startLine > 1 && state.isEmpty(state.line - 1);
- state.blkIndent = state.listIndent;
- state.listIndent = oldListIndent;
- state.tShift[startLine] = oldTShift;
- state.sCount[startLine] = oldSCount;
- state.tight = oldTight;
- token = state.push('list_item_close', 'li', -1);
- token.markup = String.fromCharCode(markerCharCode);
- nextLine = startLine = state.line;
- itemLines[1] = nextLine;
- contentStart = state.bMarks[startLine];
- if (nextLine >= endLine) {
- break;
- }
-
- //
- // Try to check if list is terminated or continued.
- //
- if (state.sCount[nextLine] < state.blkIndent) {
- break;
- }
-
- // if it's indented more than 3 spaces, it should be a code block
- if (state.sCount[startLine] - state.blkIndent >= 4) {
- break;
- }
-
- // fail if terminating block found
- terminate = false;
- for (i = 0, l = terminatorRules.length; i < l; i++) {
- if (terminatorRules[i](state, nextLine, endLine, true)) {
- terminate = true;
- break;
- }
- }
- if (terminate) {
- break;
- }
-
- // fail if list has another type
- if (isOrdered) {
- posAfterMarker = skipOrderedListMarker(state, nextLine);
- if (posAfterMarker < 0) {
- break;
- }
- start = state.bMarks[nextLine] + state.tShift[nextLine];
- } else {
- posAfterMarker = skipBulletListMarker(state, nextLine);
- if (posAfterMarker < 0) {
- break;
- }
- }
- if (markerCharCode !== state.src.charCodeAt(posAfterMarker - 1)) {
- break;
- }
- }
-
- // Finalize list
- if (isOrdered) {
- token = state.push('ordered_list_close', 'ol', -1);
- } else {
- token = state.push('bullet_list_close', 'ul', -1);
- }
- token.markup = String.fromCharCode(markerCharCode);
- listLines[1] = nextLine;
- state.line = nextLine;
- state.parentType = oldParentType;
-
- // mark paragraphs tight if needed
- if (tight) {
- markTightParagraphs(state, listTokIdx);
- }
- return true;
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_block/paragraph.js":
-/*!**********************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_block/paragraph.js ***!
- \**********************************************************************/
-/***/ (function(module) {
-
-// Paragraph
-
-
-
-module.exports = function paragraph(state, startLine /*, endLine*/) {
- var content,
- terminate,
- i,
- l,
- token,
- oldParentType,
- nextLine = startLine + 1,
- terminatorRules = state.md.block.ruler.getRules('paragraph'),
- endLine = state.lineMax;
- oldParentType = state.parentType;
- state.parentType = 'paragraph';
-
- // jump line-by-line until empty one or EOF
- for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {
- // this would be a code block normally, but after paragraph
- // it's considered a lazy continuation regardless of what's there
- if (state.sCount[nextLine] - state.blkIndent > 3) {
- continue;
- }
-
- // quirk for blockquotes, this line should already be checked by that rule
- if (state.sCount[nextLine] < 0) {
- continue;
- }
-
- // Some tags can terminate paragraph without empty line.
- terminate = false;
- for (i = 0, l = terminatorRules.length; i < l; i++) {
- if (terminatorRules[i](state, nextLine, endLine, true)) {
- terminate = true;
- break;
- }
- }
- if (terminate) {
- break;
- }
- }
- content = state.getLines(startLine, nextLine, state.blkIndent, false).trim();
- state.line = nextLine;
- token = state.push('paragraph_open', 'p', 1);
- token.map = [startLine, state.line];
- token = state.push('inline', '', 0);
- token.content = content;
- token.map = [startLine, state.line];
- token.children = [];
- token = state.push('paragraph_close', 'p', -1);
- state.parentType = oldParentType;
- return true;
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_block/reference.js":
-/*!**********************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_block/reference.js ***!
- \**********************************************************************/
-/***/ (function(module, __unused_webpack_exports, __webpack_require__) {
-
-
-
-var normalizeReference = (__webpack_require__(/*! ../common/utils */ "../../../node_modules/markdown-it/lib/common/utils.js").normalizeReference);
-var isSpace = (__webpack_require__(/*! ../common/utils */ "../../../node_modules/markdown-it/lib/common/utils.js").isSpace);
-module.exports = function reference(state, startLine, _endLine, silent) {
- var ch,
- destEndPos,
- destEndLineNo,
- endLine,
- href,
- i,
- l,
- label,
- labelEnd,
- oldParentType,
- res,
- start,
- str,
- terminate,
- terminatorRules,
- title,
- lines = 0,
- pos = state.bMarks[startLine] + state.tShift[startLine],
- max = state.eMarks[startLine],
- nextLine = startLine + 1;
-
- // if it's indented more than 3 spaces, it should be a code block
- if (state.sCount[startLine] - state.blkIndent >= 4) {
- return false;
- }
- if (state.src.charCodeAt(pos) !== 0x5B /* [ */) {
- return false;
- }
-
- // Simple check to quickly interrupt scan on [link](url) at the start of line.
- // Can be useful on practice: https://github.com/markdown-it/markdown-it/issues/54
- while (++pos < max) {
- if (state.src.charCodeAt(pos) === 0x5D /* ] */ && state.src.charCodeAt(pos - 1) !== 0x5C /* \ */) {
- if (pos + 1 === max) {
- return false;
- }
- if (state.src.charCodeAt(pos + 1) !== 0x3A /* : */) {
- return false;
- }
- break;
- }
- }
- endLine = state.lineMax;
-
- // jump line-by-line until empty one or EOF
- terminatorRules = state.md.block.ruler.getRules('reference');
- oldParentType = state.parentType;
- state.parentType = 'reference';
- for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {
- // this would be a code block normally, but after paragraph
- // it's considered a lazy continuation regardless of what's there
- if (state.sCount[nextLine] - state.blkIndent > 3) {
- continue;
- }
-
- // quirk for blockquotes, this line should already be checked by that rule
- if (state.sCount[nextLine] < 0) {
- continue;
- }
-
- // Some tags can terminate paragraph without empty line.
- terminate = false;
- for (i = 0, l = terminatorRules.length; i < l; i++) {
- if (terminatorRules[i](state, nextLine, endLine, true)) {
- terminate = true;
- break;
- }
- }
- if (terminate) {
- break;
- }
- }
- str = state.getLines(startLine, nextLine, state.blkIndent, false).trim();
- max = str.length;
- for (pos = 1; pos < max; pos++) {
- ch = str.charCodeAt(pos);
- if (ch === 0x5B /* [ */) {
- return false;
- } else if (ch === 0x5D /* ] */) {
- labelEnd = pos;
- break;
- } else if (ch === 0x0A /* \n */) {
- lines++;
- } else if (ch === 0x5C /* \ */) {
- pos++;
- if (pos < max && str.charCodeAt(pos) === 0x0A) {
- lines++;
- }
- }
- }
- if (labelEnd < 0 || str.charCodeAt(labelEnd + 1) !== 0x3A /* : */) {
- return false;
- }
-
- // [label]: destination 'title'
- // ^^^ skip optional whitespace here
- for (pos = labelEnd + 2; pos < max; pos++) {
- ch = str.charCodeAt(pos);
- if (ch === 0x0A) {
- lines++;
- } else if (isSpace(ch)) {
- /*eslint no-empty:0*/
- } else {
- break;
- }
- }
-
- // [label]: destination 'title'
- // ^^^^^^^^^^^ parse this
- res = state.md.helpers.parseLinkDestination(str, pos, max);
- if (!res.ok) {
- return false;
- }
- href = state.md.normalizeLink(res.str);
- if (!state.md.validateLink(href)) {
- return false;
- }
- pos = res.pos;
- lines += res.lines;
-
- // save cursor state, we could require to rollback later
- destEndPos = pos;
- destEndLineNo = lines;
-
- // [label]: destination 'title'
- // ^^^ skipping those spaces
- start = pos;
- for (; pos < max; pos++) {
- ch = str.charCodeAt(pos);
- if (ch === 0x0A) {
- lines++;
- } else if (isSpace(ch)) {
- /*eslint no-empty:0*/
- } else {
- break;
- }
- }
-
- // [label]: destination 'title'
- // ^^^^^^^ parse this
- res = state.md.helpers.parseLinkTitle(str, pos, max);
- if (pos < max && start !== pos && res.ok) {
- title = res.str;
- pos = res.pos;
- lines += res.lines;
- } else {
- title = '';
- pos = destEndPos;
- lines = destEndLineNo;
- }
-
- // skip trailing spaces until the rest of the line
- while (pos < max) {
- ch = str.charCodeAt(pos);
- if (!isSpace(ch)) {
- break;
- }
- pos++;
- }
- if (pos < max && str.charCodeAt(pos) !== 0x0A) {
- if (title) {
- // garbage at the end of the line after title,
- // but it could still be a valid reference if we roll back
- title = '';
- pos = destEndPos;
- lines = destEndLineNo;
- while (pos < max) {
- ch = str.charCodeAt(pos);
- if (!isSpace(ch)) {
- break;
- }
- pos++;
- }
- }
- }
- if (pos < max && str.charCodeAt(pos) !== 0x0A) {
- // garbage at the end of the line
- return false;
- }
- label = normalizeReference(str.slice(1, labelEnd));
- if (!label) {
- // CommonMark 0.20 disallows empty labels
- return false;
- }
-
- // Reference can not terminate anything. This check is for safety only.
- /*istanbul ignore if*/
- if (silent) {
- return true;
- }
- if (typeof state.env.references === 'undefined') {
- state.env.references = {};
- }
- if (typeof state.env.references[label] === 'undefined') {
- state.env.references[label] = {
- title: title,
- href: href
- };
- }
- state.parentType = oldParentType;
- state.line = startLine + lines + 1;
- return true;
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_block/state_block.js":
-/*!************************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_block/state_block.js ***!
- \************************************************************************/
-/***/ (function(module, __unused_webpack_exports, __webpack_require__) {
-
-// Parser state class
-
-
-
-var Token = __webpack_require__(/*! ../token */ "../../../node_modules/markdown-it/lib/token.js");
-var isSpace = (__webpack_require__(/*! ../common/utils */ "../../../node_modules/markdown-it/lib/common/utils.js").isSpace);
-function StateBlock(src, md, env, tokens) {
- var ch, s, start, pos, len, indent, offset, indent_found;
- this.src = src;
-
- // link to parser instance
- this.md = md;
- this.env = env;
-
- //
- // Internal state vartiables
- //
-
- this.tokens = tokens;
- this.bMarks = []; // line begin offsets for fast jumps
- this.eMarks = []; // line end offsets for fast jumps
- this.tShift = []; // offsets of the first non-space characters (tabs not expanded)
- this.sCount = []; // indents for each line (tabs expanded)
-
- // An amount of virtual spaces (tabs expanded) between beginning
- // of each line (bMarks) and real beginning of that line.
- //
- // It exists only as a hack because blockquotes override bMarks
- // losing information in the process.
- //
- // It's used only when expanding tabs, you can think about it as
- // an initial tab length, e.g. bsCount=21 applied to string `\t123`
- // means first tab should be expanded to 4-21%4 === 3 spaces.
- //
- this.bsCount = [];
-
- // block parser variables
- this.blkIndent = 0; // required block content indent (for example, if we are
- // inside a list, it would be positioned after list marker)
- this.line = 0; // line index in src
- this.lineMax = 0; // lines count
- this.tight = false; // loose/tight mode for lists
- this.ddIndent = -1; // indent of the current dd block (-1 if there isn't any)
- this.listIndent = -1; // indent of the current list block (-1 if there isn't any)
-
- // can be 'blockquote', 'list', 'root', 'paragraph' or 'reference'
- // used in lists to determine if they interrupt a paragraph
- this.parentType = 'root';
- this.level = 0;
-
- // renderer
- this.result = '';
-
- // Create caches
- // Generate markers.
- s = this.src;
- indent_found = false;
- for (start = pos = indent = offset = 0, len = s.length; pos < len; pos++) {
- ch = s.charCodeAt(pos);
- if (!indent_found) {
- if (isSpace(ch)) {
- indent++;
- if (ch === 0x09) {
- offset += 4 - offset % 4;
- } else {
- offset++;
- }
- continue;
- } else {
- indent_found = true;
- }
- }
- if (ch === 0x0A || pos === len - 1) {
- if (ch !== 0x0A) {
- pos++;
- }
- this.bMarks.push(start);
- this.eMarks.push(pos);
- this.tShift.push(indent);
- this.sCount.push(offset);
- this.bsCount.push(0);
- indent_found = false;
- indent = 0;
- offset = 0;
- start = pos + 1;
- }
- }
-
- // Push fake entry to simplify cache bounds checks
- this.bMarks.push(s.length);
- this.eMarks.push(s.length);
- this.tShift.push(0);
- this.sCount.push(0);
- this.bsCount.push(0);
- this.lineMax = this.bMarks.length - 1; // don't count last fake line
-}
-
-// Push new token to "stream".
-//
-StateBlock.prototype.push = function (type, tag, nesting) {
- var token = new Token(type, tag, nesting);
- token.block = true;
- if (nesting < 0) this.level--; // closing tag
- token.level = this.level;
- if (nesting > 0) this.level++; // opening tag
-
- this.tokens.push(token);
- return token;
-};
-StateBlock.prototype.isEmpty = function isEmpty(line) {
- return this.bMarks[line] + this.tShift[line] >= this.eMarks[line];
-};
-StateBlock.prototype.skipEmptyLines = function skipEmptyLines(from) {
- for (var max = this.lineMax; from < max; from++) {
- if (this.bMarks[from] + this.tShift[from] < this.eMarks[from]) {
- break;
- }
- }
- return from;
-};
-
-// Skip spaces from given position.
-StateBlock.prototype.skipSpaces = function skipSpaces(pos) {
- var ch;
- for (var max = this.src.length; pos < max; pos++) {
- ch = this.src.charCodeAt(pos);
- if (!isSpace(ch)) {
- break;
- }
- }
- return pos;
-};
-
-// Skip spaces from given position in reverse.
-StateBlock.prototype.skipSpacesBack = function skipSpacesBack(pos, min) {
- if (pos <= min) {
- return pos;
- }
- while (pos > min) {
- if (!isSpace(this.src.charCodeAt(--pos))) {
- return pos + 1;
- }
- }
- return pos;
-};
-
-// Skip char codes from given position
-StateBlock.prototype.skipChars = function skipChars(pos, code) {
- for (var max = this.src.length; pos < max; pos++) {
- if (this.src.charCodeAt(pos) !== code) {
- break;
- }
- }
- return pos;
-};
-
-// Skip char codes reverse from given position - 1
-StateBlock.prototype.skipCharsBack = function skipCharsBack(pos, code, min) {
- if (pos <= min) {
- return pos;
- }
- while (pos > min) {
- if (code !== this.src.charCodeAt(--pos)) {
- return pos + 1;
- }
- }
- return pos;
-};
-
-// cut lines range from source.
-StateBlock.prototype.getLines = function getLines(begin, end, indent, keepLastLF) {
- var i,
- lineIndent,
- ch,
- first,
- last,
- queue,
- lineStart,
- line = begin;
- if (begin >= end) {
- return '';
- }
- queue = new Array(end - begin);
- for (i = 0; line < end; line++, i++) {
- lineIndent = 0;
- lineStart = first = this.bMarks[line];
- if (line + 1 < end || keepLastLF) {
- // No need for bounds check because we have fake entry on tail.
- last = this.eMarks[line] + 1;
- } else {
- last = this.eMarks[line];
- }
- while (first < last && lineIndent < indent) {
- ch = this.src.charCodeAt(first);
- if (isSpace(ch)) {
- if (ch === 0x09) {
- lineIndent += 4 - (lineIndent + this.bsCount[line]) % 4;
- } else {
- lineIndent++;
- }
- } else if (first - lineStart < this.tShift[line]) {
- // patched tShift masked characters to look like spaces (blockquotes, list markers)
- lineIndent++;
- } else {
- break;
- }
- first++;
- }
- if (lineIndent > indent) {
- // partially expanding tabs in code blocks, e.g '\t\tfoobar'
- // with indent=2 becomes ' \tfoobar'
- queue[i] = new Array(lineIndent - indent + 1).join(' ') + this.src.slice(first, last);
- } else {
- queue[i] = this.src.slice(first, last);
- }
- }
- return queue.join('');
-};
-
-// re-export Token class to use in block rules
-StateBlock.prototype.Token = Token;
-module.exports = StateBlock;
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_block/table.js":
-/*!******************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_block/table.js ***!
- \******************************************************************/
-/***/ (function(module, __unused_webpack_exports, __webpack_require__) {
-
-// GFM table, https://github.github.com/gfm/#tables-extension-
-
-
-
-var isSpace = (__webpack_require__(/*! ../common/utils */ "../../../node_modules/markdown-it/lib/common/utils.js").isSpace);
-function getLine(state, line) {
- var pos = state.bMarks[line] + state.tShift[line],
- max = state.eMarks[line];
- return state.src.substr(pos, max - pos);
-}
-function escapedSplit(str) {
- var result = [],
- pos = 0,
- max = str.length,
- ch,
- isEscaped = false,
- lastPos = 0,
- current = '';
- ch = str.charCodeAt(pos);
- while (pos < max) {
- if (ch === 0x7c /* | */) {
- if (!isEscaped) {
- // pipe separating cells, '|'
- result.push(current + str.substring(lastPos, pos));
- current = '';
- lastPos = pos + 1;
- } else {
- // escaped pipe, '\|'
- current += str.substring(lastPos, pos - 1);
- lastPos = pos;
- }
- }
- isEscaped = ch === 0x5c /* \ */;
- pos++;
- ch = str.charCodeAt(pos);
- }
- result.push(current + str.substring(lastPos));
- return result;
-}
-module.exports = function table(state, startLine, endLine, silent) {
- var ch, lineText, pos, i, l, nextLine, columns, columnCount, token, aligns, t, tableLines, tbodyLines, oldParentType, terminate, terminatorRules, firstCh, secondCh;
-
- // should have at least two lines
- if (startLine + 2 > endLine) {
- return false;
- }
- nextLine = startLine + 1;
- if (state.sCount[nextLine] < state.blkIndent) {
- return false;
- }
-
- // if it's indented more than 3 spaces, it should be a code block
- if (state.sCount[nextLine] - state.blkIndent >= 4) {
- return false;
- }
-
- // first character of the second line should be '|', '-', ':',
- // and no other characters are allowed but spaces;
- // basically, this is the equivalent of /^[-:|][-:|\s]*$/ regexp
-
- pos = state.bMarks[nextLine] + state.tShift[nextLine];
- if (pos >= state.eMarks[nextLine]) {
- return false;
- }
- firstCh = state.src.charCodeAt(pos++);
- if (firstCh !== 0x7C /* | */ && firstCh !== 0x2D /* - */ && firstCh !== 0x3A /* : */) {
- return false;
- }
- if (pos >= state.eMarks[nextLine]) {
- return false;
- }
- secondCh = state.src.charCodeAt(pos++);
- if (secondCh !== 0x7C /* | */ && secondCh !== 0x2D /* - */ && secondCh !== 0x3A /* : */ && !isSpace(secondCh)) {
- return false;
- }
-
- // if first character is '-', then second character must not be a space
- // (due to parsing ambiguity with list)
- if (firstCh === 0x2D /* - */ && isSpace(secondCh)) {
- return false;
- }
- while (pos < state.eMarks[nextLine]) {
- ch = state.src.charCodeAt(pos);
- if (ch !== 0x7C /* | */ && ch !== 0x2D /* - */ && ch !== 0x3A /* : */ && !isSpace(ch)) {
- return false;
- }
- pos++;
- }
- lineText = getLine(state, startLine + 1);
- columns = lineText.split('|');
- aligns = [];
- for (i = 0; i < columns.length; i++) {
- t = columns[i].trim();
- if (!t) {
- // allow empty columns before and after table, but not in between columns;
- // e.g. allow ` |---| `, disallow ` ---||--- `
- if (i === 0 || i === columns.length - 1) {
- continue;
- } else {
- return false;
- }
- }
- if (!/^:?-+:?$/.test(t)) {
- return false;
- }
- if (t.charCodeAt(t.length - 1) === 0x3A /* : */) {
- aligns.push(t.charCodeAt(0) === 0x3A /* : */ ? 'center' : 'right');
- } else if (t.charCodeAt(0) === 0x3A /* : */) {
- aligns.push('left');
- } else {
- aligns.push('');
- }
- }
- lineText = getLine(state, startLine).trim();
- if (lineText.indexOf('|') === -1) {
- return false;
- }
- if (state.sCount[startLine] - state.blkIndent >= 4) {
- return false;
- }
- columns = escapedSplit(lineText);
- if (columns.length && columns[0] === '') columns.shift();
- if (columns.length && columns[columns.length - 1] === '') columns.pop();
-
- // header row will define an amount of columns in the entire table,
- // and align row should be exactly the same (the rest of the rows can differ)
- columnCount = columns.length;
- if (columnCount === 0 || columnCount !== aligns.length) {
- return false;
- }
- if (silent) {
- return true;
- }
- oldParentType = state.parentType;
- state.parentType = 'table';
-
- // use 'blockquote' lists for termination because it's
- // the most similar to tables
- terminatorRules = state.md.block.ruler.getRules('blockquote');
- token = state.push('table_open', 'table', 1);
- token.map = tableLines = [startLine, 0];
- token = state.push('thead_open', 'thead', 1);
- token.map = [startLine, startLine + 1];
- token = state.push('tr_open', 'tr', 1);
- token.map = [startLine, startLine + 1];
- for (i = 0; i < columns.length; i++) {
- token = state.push('th_open', 'th', 1);
- if (aligns[i]) {
- token.attrs = [['style', 'text-align:' + aligns[i]]];
- }
- token = state.push('inline', '', 0);
- token.content = columns[i].trim();
- token.children = [];
- token = state.push('th_close', 'th', -1);
- }
- token = state.push('tr_close', 'tr', -1);
- token = state.push('thead_close', 'thead', -1);
- for (nextLine = startLine + 2; nextLine < endLine; nextLine++) {
- if (state.sCount[nextLine] < state.blkIndent) {
- break;
- }
- terminate = false;
- for (i = 0, l = terminatorRules.length; i < l; i++) {
- if (terminatorRules[i](state, nextLine, endLine, true)) {
- terminate = true;
- break;
- }
- }
- if (terminate) {
- break;
- }
- lineText = getLine(state, nextLine).trim();
- if (!lineText) {
- break;
- }
- if (state.sCount[nextLine] - state.blkIndent >= 4) {
- break;
- }
- columns = escapedSplit(lineText);
- if (columns.length && columns[0] === '') columns.shift();
- if (columns.length && columns[columns.length - 1] === '') columns.pop();
- if (nextLine === startLine + 2) {
- token = state.push('tbody_open', 'tbody', 1);
- token.map = tbodyLines = [startLine + 2, 0];
- }
- token = state.push('tr_open', 'tr', 1);
- token.map = [nextLine, nextLine + 1];
- for (i = 0; i < columnCount; i++) {
- token = state.push('td_open', 'td', 1);
- if (aligns[i]) {
- token.attrs = [['style', 'text-align:' + aligns[i]]];
- }
- token = state.push('inline', '', 0);
- token.content = columns[i] ? columns[i].trim() : '';
- token.children = [];
- token = state.push('td_close', 'td', -1);
- }
- token = state.push('tr_close', 'tr', -1);
- }
- if (tbodyLines) {
- token = state.push('tbody_close', 'tbody', -1);
- tbodyLines[1] = nextLine;
- }
- token = state.push('table_close', 'table', -1);
- tableLines[1] = nextLine;
- state.parentType = oldParentType;
- state.line = nextLine;
- return true;
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_core/block.js":
-/*!*****************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_core/block.js ***!
- \*****************************************************************/
-/***/ (function(module) {
-
-
-
-module.exports = function block(state) {
- var token;
- if (state.inlineMode) {
- token = new state.Token('inline', '', 0);
- token.content = state.src;
- token.map = [0, 1];
- token.children = [];
- state.tokens.push(token);
- } else {
- state.md.block.parse(state.src, state.md, state.env, state.tokens);
- }
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_core/inline.js":
-/*!******************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_core/inline.js ***!
- \******************************************************************/
-/***/ (function(module) {
-
-
-
-module.exports = function inline(state) {
- var tokens = state.tokens,
- tok,
- i,
- l;
-
- // Parse inlines
- for (i = 0, l = tokens.length; i < l; i++) {
- tok = tokens[i];
- if (tok.type === 'inline') {
- state.md.inline.parse(tok.content, state.md, state.env, tok.children);
- }
- }
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_core/linkify.js":
-/*!*******************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_core/linkify.js ***!
- \*******************************************************************/
-/***/ (function(module, __unused_webpack_exports, __webpack_require__) {
-
-// Replace link-like texts with link nodes.
-//
-// Currently restricted by `md.validateLink()` to http/https/ftp
-//
-
-
-var arrayReplaceAt = (__webpack_require__(/*! ../common/utils */ "../../../node_modules/markdown-it/lib/common/utils.js").arrayReplaceAt);
-function isLinkOpen(str) {
- return /^\s]/i.test(str);
-}
-function isLinkClose(str) {
- return /^<\/a\s*>/i.test(str);
-}
-module.exports = function linkify(state) {
- var i,
- j,
- l,
- tokens,
- token,
- currentToken,
- nodes,
- ln,
- text,
- pos,
- lastPos,
- level,
- htmlLinkLevel,
- url,
- fullUrl,
- urlText,
- blockTokens = state.tokens,
- links;
- if (!state.md.options.linkify) {
- return;
- }
- for (j = 0, l = blockTokens.length; j < l; j++) {
- if (blockTokens[j].type !== 'inline' || !state.md.linkify.pretest(blockTokens[j].content)) {
- continue;
- }
- tokens = blockTokens[j].children;
- htmlLinkLevel = 0;
-
- // We scan from the end, to keep position when new tags added.
- // Use reversed logic in links start/end match
- for (i = tokens.length - 1; i >= 0; i--) {
- currentToken = tokens[i];
-
- // Skip content of markdown links
- if (currentToken.type === 'link_close') {
- i--;
- while (tokens[i].level !== currentToken.level && tokens[i].type !== 'link_open') {
- i--;
- }
- continue;
- }
-
- // Skip content of html tag links
- if (currentToken.type === 'html_inline') {
- if (isLinkOpen(currentToken.content) && htmlLinkLevel > 0) {
- htmlLinkLevel--;
- }
- if (isLinkClose(currentToken.content)) {
- htmlLinkLevel++;
- }
- }
- if (htmlLinkLevel > 0) {
- continue;
- }
- if (currentToken.type === 'text' && state.md.linkify.test(currentToken.content)) {
- text = currentToken.content;
- links = state.md.linkify.match(text);
-
- // Now split string to nodes
- nodes = [];
- level = currentToken.level;
- lastPos = 0;
- for (ln = 0; ln < links.length; ln++) {
- url = links[ln].url;
- fullUrl = state.md.normalizeLink(url);
- if (!state.md.validateLink(fullUrl)) {
- continue;
- }
- urlText = links[ln].text;
-
- // Linkifier might send raw hostnames like "example.com", where url
- // starts with domain name. So we prepend http:// in those cases,
- // and remove it afterwards.
- //
- if (!links[ln].schema) {
- urlText = state.md.normalizeLinkText('http://' + urlText).replace(/^http:\/\//, '');
- } else if (links[ln].schema === 'mailto:' && !/^mailto:/i.test(urlText)) {
- urlText = state.md.normalizeLinkText('mailto:' + urlText).replace(/^mailto:/, '');
- } else {
- urlText = state.md.normalizeLinkText(urlText);
- }
- pos = links[ln].index;
- if (pos > lastPos) {
- token = new state.Token('text', '', 0);
- token.content = text.slice(lastPos, pos);
- token.level = level;
- nodes.push(token);
- }
- token = new state.Token('link_open', 'a', 1);
- token.attrs = [['href', fullUrl]];
- token.level = level++;
- token.markup = 'linkify';
- token.info = 'auto';
- nodes.push(token);
- token = new state.Token('text', '', 0);
- token.content = urlText;
- token.level = level;
- nodes.push(token);
- token = new state.Token('link_close', 'a', -1);
- token.level = --level;
- token.markup = 'linkify';
- token.info = 'auto';
- nodes.push(token);
- lastPos = links[ln].lastIndex;
- }
- if (lastPos < text.length) {
- token = new state.Token('text', '', 0);
- token.content = text.slice(lastPos);
- token.level = level;
- nodes.push(token);
- }
-
- // replace current node
- blockTokens[j].children = tokens = arrayReplaceAt(tokens, i, nodes);
- }
- }
- }
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_core/normalize.js":
-/*!*********************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_core/normalize.js ***!
- \*********************************************************************/
-/***/ (function(module) {
-
-// Normalize input string
-
-
-
-// https://spec.commonmark.org/0.29/#line-ending
-var NEWLINES_RE = /\r\n?|\n/g;
-var NULL_RE = /\0/g;
-module.exports = function normalize(state) {
- var str;
-
- // Normalize newlines
- str = state.src.replace(NEWLINES_RE, '\n');
-
- // Replace NULL characters
- str = str.replace(NULL_RE, '\uFFFD');
- state.src = str;
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_core/replacements.js":
-/*!************************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_core/replacements.js ***!
- \************************************************************************/
-/***/ (function(module) {
-
-// Simple typographic replacements
-//
-// (c) (C) → ©
-// (tm) (TM) → ™
-// (r) (R) → ®
-// +- → ±
-// (p) (P) -> §
-// ... → … (also ?.... → ?.., !.... → !..)
-// ???????? → ???, !!!!! → !!!, `,,` → `,`
-// -- → –, --- → —
-//
-
-
-// TODO:
-// - fractionals 1/2, 1/4, 3/4 -> ½, ¼, ¾
-// - miltiplication 2 x 4 -> 2 × 4
-var RARE_RE = /\+-|\.\.|\?\?\?\?|!!!!|,,|--/;
-
-// Workaround for phantomjs - need regex without /g flag,
-// or root check will fail every second time
-var SCOPED_ABBR_TEST_RE = /\((c|tm|r|p)\)/i;
-var SCOPED_ABBR_RE = /\((c|tm|r|p)\)/ig;
-var SCOPED_ABBR = {
- c: '©',
- r: '®',
- p: '§',
- tm: '™'
-};
-function replaceFn(match, name) {
- return SCOPED_ABBR[name.toLowerCase()];
-}
-function replace_scoped(inlineTokens) {
- var i,
- token,
- inside_autolink = 0;
- for (i = inlineTokens.length - 1; i >= 0; i--) {
- token = inlineTokens[i];
- if (token.type === 'text' && !inside_autolink) {
- token.content = token.content.replace(SCOPED_ABBR_RE, replaceFn);
- }
- if (token.type === 'link_open' && token.info === 'auto') {
- inside_autolink--;
- }
- if (token.type === 'link_close' && token.info === 'auto') {
- inside_autolink++;
- }
- }
-}
-function replace_rare(inlineTokens) {
- var i,
- token,
- inside_autolink = 0;
- for (i = inlineTokens.length - 1; i >= 0; i--) {
- token = inlineTokens[i];
- if (token.type === 'text' && !inside_autolink) {
- if (RARE_RE.test(token.content)) {
- token.content = token.content.replace(/\+-/g, '±')
- // .., ..., ....... -> …
- // but ?..... & !..... -> ?.. & !..
- .replace(/\.{2,}/g, '…').replace(/([?!])…/g, '$1..').replace(/([?!]){4,}/g, '$1$1$1').replace(/,{2,}/g, ',')
- // em-dash
- .replace(/(^|[^-])---(?=[^-]|$)/mg, '$1\u2014')
- // en-dash
- .replace(/(^|\s)--(?=\s|$)/mg, '$1\u2013').replace(/(^|[^-\s])--(?=[^-\s]|$)/mg, '$1\u2013');
- }
- }
- if (token.type === 'link_open' && token.info === 'auto') {
- inside_autolink--;
- }
- if (token.type === 'link_close' && token.info === 'auto') {
- inside_autolink++;
- }
- }
-}
-module.exports = function replace(state) {
- var blkIdx;
- if (!state.md.options.typographer) {
- return;
- }
- for (blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) {
- if (state.tokens[blkIdx].type !== 'inline') {
- continue;
- }
- if (SCOPED_ABBR_TEST_RE.test(state.tokens[blkIdx].content)) {
- replace_scoped(state.tokens[blkIdx].children);
- }
- if (RARE_RE.test(state.tokens[blkIdx].content)) {
- replace_rare(state.tokens[blkIdx].children);
- }
- }
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_core/smartquotes.js":
-/*!***********************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_core/smartquotes.js ***!
- \***********************************************************************/
-/***/ (function(module, __unused_webpack_exports, __webpack_require__) {
-
-// Convert straight quotation marks to typographic ones
-//
-
-
-var isWhiteSpace = (__webpack_require__(/*! ../common/utils */ "../../../node_modules/markdown-it/lib/common/utils.js").isWhiteSpace);
-var isPunctChar = (__webpack_require__(/*! ../common/utils */ "../../../node_modules/markdown-it/lib/common/utils.js").isPunctChar);
-var isMdAsciiPunct = (__webpack_require__(/*! ../common/utils */ "../../../node_modules/markdown-it/lib/common/utils.js").isMdAsciiPunct);
-var QUOTE_TEST_RE = /['"]/;
-var QUOTE_RE = /['"]/g;
-var APOSTROPHE = '\u2019'; /* ’ */
-
-function replaceAt(str, index, ch) {
- return str.substr(0, index) + ch + str.substr(index + 1);
-}
-function process_inlines(tokens, state) {
- var i, token, text, t, pos, max, thisLevel, item, lastChar, nextChar, isLastPunctChar, isNextPunctChar, isLastWhiteSpace, isNextWhiteSpace, canOpen, canClose, j, isSingle, stack, openQuote, closeQuote;
- stack = [];
- for (i = 0; i < tokens.length; i++) {
- token = tokens[i];
- thisLevel = tokens[i].level;
- for (j = stack.length - 1; j >= 0; j--) {
- if (stack[j].level <= thisLevel) {
- break;
- }
- }
- stack.length = j + 1;
- if (token.type !== 'text') {
- continue;
- }
- text = token.content;
- pos = 0;
- max = text.length;
-
- /*eslint no-labels:0,block-scoped-var:0*/
- OUTER: while (pos < max) {
- QUOTE_RE.lastIndex = pos;
- t = QUOTE_RE.exec(text);
- if (!t) {
- break;
- }
- canOpen = canClose = true;
- pos = t.index + 1;
- isSingle = t[0] === "'";
-
- // Find previous character,
- // default to space if it's the beginning of the line
- //
- lastChar = 0x20;
- if (t.index - 1 >= 0) {
- lastChar = text.charCodeAt(t.index - 1);
- } else {
- for (j = i - 1; j >= 0; j--) {
- if (tokens[j].type === 'softbreak' || tokens[j].type === 'hardbreak') break; // lastChar defaults to 0x20
- if (!tokens[j].content) continue; // should skip all tokens except 'text', 'html_inline' or 'code_inline'
-
- lastChar = tokens[j].content.charCodeAt(tokens[j].content.length - 1);
- break;
- }
- }
-
- // Find next character,
- // default to space if it's the end of the line
- //
- nextChar = 0x20;
- if (pos < max) {
- nextChar = text.charCodeAt(pos);
- } else {
- for (j = i + 1; j < tokens.length; j++) {
- if (tokens[j].type === 'softbreak' || tokens[j].type === 'hardbreak') break; // nextChar defaults to 0x20
- if (!tokens[j].content) continue; // should skip all tokens except 'text', 'html_inline' or 'code_inline'
-
- nextChar = tokens[j].content.charCodeAt(0);
- break;
- }
- }
- isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(String.fromCharCode(lastChar));
- isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(String.fromCharCode(nextChar));
- isLastWhiteSpace = isWhiteSpace(lastChar);
- isNextWhiteSpace = isWhiteSpace(nextChar);
- if (isNextWhiteSpace) {
- canOpen = false;
- } else if (isNextPunctChar) {
- if (!(isLastWhiteSpace || isLastPunctChar)) {
- canOpen = false;
- }
- }
- if (isLastWhiteSpace) {
- canClose = false;
- } else if (isLastPunctChar) {
- if (!(isNextWhiteSpace || isNextPunctChar)) {
- canClose = false;
- }
- }
- if (nextChar === 0x22 /* " */ && t[0] === '"') {
- if (lastChar >= 0x30 /* 0 */ && lastChar <= 0x39 /* 9 */) {
- // special case: 1"" - count first quote as an inch
- canClose = canOpen = false;
- }
- }
- if (canOpen && canClose) {
- // Replace quotes in the middle of punctuation sequence, but not
- // in the middle of the words, i.e.:
- //
- // 1. foo " bar " baz - not replaced
- // 2. foo-"-bar-"-baz - replaced
- // 3. foo"bar"baz - not replaced
- //
- canOpen = isLastPunctChar;
- canClose = isNextPunctChar;
- }
- if (!canOpen && !canClose) {
- // middle of word
- if (isSingle) {
- token.content = replaceAt(token.content, t.index, APOSTROPHE);
- }
- continue;
- }
- if (canClose) {
- // this could be a closing quote, rewind the stack to get a match
- for (j = stack.length - 1; j >= 0; j--) {
- item = stack[j];
- if (stack[j].level < thisLevel) {
- break;
- }
- if (item.single === isSingle && stack[j].level === thisLevel) {
- item = stack[j];
- if (isSingle) {
- openQuote = state.md.options.quotes[2];
- closeQuote = state.md.options.quotes[3];
- } else {
- openQuote = state.md.options.quotes[0];
- closeQuote = state.md.options.quotes[1];
- }
-
- // replace token.content *before* tokens[item.token].content,
- // because, if they are pointing at the same token, replaceAt
- // could mess up indices when quote length != 1
- token.content = replaceAt(token.content, t.index, closeQuote);
- tokens[item.token].content = replaceAt(tokens[item.token].content, item.pos, openQuote);
- pos += closeQuote.length - 1;
- if (item.token === i) {
- pos += openQuote.length - 1;
- }
- text = token.content;
- max = text.length;
- stack.length = j;
- continue OUTER;
- }
- }
- }
- if (canOpen) {
- stack.push({
- token: i,
- pos: t.index,
- single: isSingle,
- level: thisLevel
- });
- } else if (canClose && isSingle) {
- token.content = replaceAt(token.content, t.index, APOSTROPHE);
- }
- }
- }
-}
-module.exports = function smartquotes(state) {
- /*eslint max-depth:0*/
- var blkIdx;
- if (!state.md.options.typographer) {
- return;
- }
- for (blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) {
- if (state.tokens[blkIdx].type !== 'inline' || !QUOTE_TEST_RE.test(state.tokens[blkIdx].content)) {
- continue;
- }
- process_inlines(state.tokens[blkIdx].children, state);
- }
-};
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_core/state_core.js":
-/*!**********************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_core/state_core.js ***!
- \**********************************************************************/
-/***/ (function(module, __unused_webpack_exports, __webpack_require__) {
-
-// Core state object
-//
-
-
-var Token = __webpack_require__(/*! ../token */ "../../../node_modules/markdown-it/lib/token.js");
-function StateCore(src, md, env) {
- this.src = src;
- this.env = env;
- this.tokens = [];
- this.inlineMode = false;
- this.md = md; // link to parser instance
-}
-
-// re-export Token class to use in core rules
-StateCore.prototype.Token = Token;
-module.exports = StateCore;
-
-/***/ }),
-
-/***/ "../../../node_modules/markdown-it/lib/rules_inline/autolink.js":
-/*!**********************************************************************!*\
- !*** ../../../node_modules/markdown-it/lib/rules_inline/autolink.js ***!
- \**********************************************************************/
-/***/ (function(module) {
-
-// Process autolinks '
' + escapeHtml(token.content) + '
';
+};
+default_rules.code_block = function (tokens, idx, options, env, slf) {
+ const token = tokens[idx];
+ return '
\n';
+};
+default_rules.fence = function (tokens, idx, options, env, slf) {
+ const token = tokens[idx];
+ const info = token.info ? unescapeAll(token.info).trim() : '';
+ let langName = '';
+ let langAttrs = '';
+ if (info) {
+ const arr = info.split(/(\s+)/g);
+ langName = arr[0];
+ langAttrs = arr.slice(2).join('');
+ }
+ let highlighted;
+ if (options.highlight) {
+ highlighted = options.highlight(token.content, langName, langAttrs) || escapeHtml(token.content);
+ } else {
+ highlighted = escapeHtml(token.content);
+ }
+ if (highlighted.indexOf('' + escapeHtml(tokens[idx].content) + '
\n`;
+ }
+ return `${highlighted}
\n`;
+};
+default_rules.image = function (tokens, idx, options, env, slf) {
+ const token = tokens[idx];
+
+ // "alt" attr MUST be set, even if empty. Because it's mandatory and
+ // should be placed on proper position for tests.
+ //
+ // Replace content with actual value
+
+ token.attrs[token.attrIndex('alt')][1] = slf.renderInlineAsText(token.children, options, env);
+ return slf.renderToken(tokens, idx, options);
+};
+default_rules.hardbreak = function (tokens, idx, options /*, env */) {
+ return options.xhtmlOut ? '${highlighted}
\n' : '
\n';
+};
+default_rules.softbreak = function (tokens, idx, options /*, env */) {
+ return options.breaks ? options.xhtmlOut ? '
\n' : '
\n' : '\n';
+};
+default_rules.text = function (tokens, idx /*, options, env */) {
+ return escapeHtml(tokens[idx].content);
+};
+default_rules.html_block = function (tokens, idx /*, options, env */) {
+ return tokens[idx].content;
+};
+default_rules.html_inline = function (tokens, idx /*, options, env */) {
+ return tokens[idx].content;
+};
+
+/**
+ * new Renderer()
+ *
+ * Creates new [[Renderer]] instance and fill [[Renderer#rules]] with defaults.
+ **/
+function Renderer() {
+ /**
+ * Renderer#rules -> Object
+ *
+ * Contains render rules for tokens. Can be updated and extended.
+ *
+ * ##### Example
+ *
+ * ```javascript
+ * var md = require('markdown-it')();
+ *
+ * md.renderer.rules.strong_open = function () { return ''; };
+ * md.renderer.rules.strong_close = function () { return ''; };
+ *
+ * var result = md.renderInline(...);
+ * ```
+ *
+ * Each rule is called as independent static function with fixed signature:
+ *
+ * ```javascript
+ * function my_token_render(tokens, idx, options, env, renderer) {
+ * // ...
+ * return renderedHTML;
+ * }
+ * ```
+ *
+ * See [source code](https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.mjs)
+ * for more details and examples.
+ **/
+ this.rules = assign({}, default_rules);
+}
+
+/**
+ * Renderer.renderAttrs(token) -> String
+ *
+ * Render token attributes to string.
+ **/
+Renderer.prototype.renderAttrs = function renderAttrs(token) {
+ let i, l, result;
+ if (!token.attrs) {
+ return '';
+ }
+ result = '';
+ for (i = 0, l = token.attrs.length; i < l; i++) {
+ result += ' ' + escapeHtml(token.attrs[i][0]) + '="' + escapeHtml(token.attrs[i][1]) + '"';
+ }
+ return result;
+};
+
+/**
+ * Renderer.renderToken(tokens, idx, options) -> String
+ * - tokens (Array): list of tokens
+ * - idx (Numbed): token index to render
+ * - options (Object): params of parser instance
+ *
+ * Default token renderer. Can be overriden by custom function
+ * in [[Renderer#rules]].
+ **/
+Renderer.prototype.renderToken = function renderToken(tokens, idx, options) {
+ const token = tokens[idx];
+ let result = '';
+
+ // Tight list paragraphs
+ if (token.hidden) {
+ return '';
+ }
+
+ // Insert a newline between hidden paragraph and subsequent opening
+ // block-level tag.
+ //
+ // For example, here we should insert a newline before blockquote:
+ // - a
+ // >
+ //
+ if (token.block && token.nesting !== -1 && idx && tokens[idx - 1].hidden) {
+ result += '\n';
+ }
+
+ // Add token name, e.g. ``.
+ //
+ needLf = false;
+ }
+ }
+ }
+ }
+ result += needLf ? '>\n' : '>';
+ return result;
+};
+
+/**
+ * Renderer.renderInline(tokens, options, env) -> String
+ * - tokens (Array): list on block tokens to render
+ * - options (Object): params of parser instance
+ * - env (Object): additional data from parsed input (references, for example)
+ *
+ * The same as [[Renderer.render]], but for single token of `inline` type.
+ **/
+Renderer.prototype.renderInline = function (tokens, options, env) {
+ let result = '';
+ const rules = this.rules;
+ for (let i = 0, len = tokens.length; i < len; i++) {
+ const type = tokens[i].type;
+ if (typeof rules[type] !== 'undefined') {
+ result += rules[type](tokens, i, options, env, this);
+ } else {
+ result += this.renderToken(tokens, i, options);
+ }
+ }
+ return result;
+};
+
+/** internal
+ * Renderer.renderInlineAsText(tokens, options, env) -> String
+ * - tokens (Array): list on block tokens to render
+ * - options (Object): params of parser instance
+ * - env (Object): additional data from parsed input (references, for example)
+ *
+ * Special kludge for image `alt` attributes to conform CommonMark spec.
+ * Don't try to use it! Spec requires to show `alt` content with stripped markup,
+ * instead of simple escaping.
+ **/
+Renderer.prototype.renderInlineAsText = function (tokens, options, env) {
+ let result = '';
+ for (let i = 0, len = tokens.length; i < len; i++) {
+ switch (tokens[i].type) {
+ case 'text':
+ result += tokens[i].content;
+ break;
+ case 'image':
+ result += this.renderInlineAsText(tokens[i].children, options, env);
+ break;
+ case 'html_inline':
+ case 'html_block':
+ result += tokens[i].content;
+ break;
+ case 'softbreak':
+ case 'hardbreak':
+ result += '\n';
+ break;
+ // all other tokens are skipped
+ }
+ }
+
+ return result;
+};
+
+/**
+ * Renderer.render(tokens, options, env) -> String
+ * - tokens (Array): list on block tokens to render
+ * - options (Object): params of parser instance
+ * - env (Object): additional data from parsed input (references, for example)
+ *
+ * Takes token stream and generates HTML. Probably, you will never need to call
+ * this method directly.
+ **/
+Renderer.prototype.render = function (tokens, options, env) {
+ let result = '';
+ const rules = this.rules;
+ for (let i = 0, len = tokens.length; i < len; i++) {
+ const type = tokens[i].type;
+ if (type === 'inline') {
+ result += this.renderInline(tokens[i].children, options, env);
+ } else if (typeof rules[type] !== 'undefined') {
+ result += rules[type](tokens, i, options, env, this);
+ } else {
+ result += this.renderToken(tokens, i, options, env);
+ }
+ }
+ return result;
+};
+
+/**
+ * class Ruler
+ *
+ * Helper class, used by [[MarkdownIt#core]], [[MarkdownIt#block]] and
+ * [[MarkdownIt#inline]] to manage sequences of functions (rules):
+ *
+ * - keep rules in defined order
+ * - assign the name to each rule
+ * - enable/disable rules
+ * - add/replace rules
+ * - allow assign rules to additional named chains (in the same)
+ * - cacheing lists of active rules
+ *
+ * You will not need use this class directly until write plugins. For simple
+ * rules control use [[MarkdownIt.disable]], [[MarkdownIt.enable]] and
+ * [[MarkdownIt.use]].
+ **/
+
+/**
+ * new Ruler()
+ **/
+function Ruler() {
+ // List of added rules. Each element is:
+ //
+ // {
+ // name: XXX,
+ // enabled: Boolean,
+ // fn: Function(),
+ // alt: [ name2, name3 ]
+ // }
+ //
+ this.__rules__ = [];
+
+ // Cached rule chains.
+ //
+ // First level - chain name, '' for default.
+ // Second level - diginal anchor for fast filtering by charcodes.
+ //
+ this.__cache__ = null;
+}
+
+// Helper methods, should not be used directly
+
+// Find rule index by name
+//
+Ruler.prototype.__find__ = function (name) {
+ for (let i = 0; i < this.__rules__.length; i++) {
+ if (this.__rules__[i].name === name) {
+ return i;
+ }
+ }
+ return -1;
+};
+
+// Build rules lookup cache
+//
+Ruler.prototype.__compile__ = function () {
+ const self = this;
+ const chains = [''];
+
+ // collect unique names
+ self.__rules__.forEach(function (rule) {
+ if (!rule.enabled) {
+ return;
+ }
+ rule.alt.forEach(function (altName) {
+ if (chains.indexOf(altName) < 0) {
+ chains.push(altName);
+ }
+ });
+ });
+ self.__cache__ = {};
+ chains.forEach(function (chain) {
+ self.__cache__[chain] = [];
+ self.__rules__.forEach(function (rule) {
+ if (!rule.enabled) {
+ return;
+ }
+ if (chain && rule.alt.indexOf(chain) < 0) {
+ return;
+ }
+ self.__cache__[chain].push(rule.fn);
+ });
+ });
+};
+
+/**
+ * Ruler.at(name, fn [, options])
+ * - name (String): rule name to replace.
+ * - fn (Function): new rule function.
+ * - options (Object): new rule options (not mandatory).
+ *
+ * Replace rule by name with new function & options. Throws error if name not
+ * found.
+ *
+ * ##### Options:
+ *
+ * - __alt__ - array with names of "alternate" chains.
+ *
+ * ##### Example
+ *
+ * Replace existing typographer replacement rule with new one:
+ *
+ * ```javascript
+ * var md = require('markdown-it')();
+ *
+ * md.core.ruler.at('replacements', function replace(state) {
+ * //...
+ * });
+ * ```
+ **/
+Ruler.prototype.at = function (name, fn, options) {
+ const index = this.__find__(name);
+ const opt = options || {};
+ if (index === -1) {
+ throw new Error('Parser rule not found: ' + name);
+ }
+ this.__rules__[index].fn = fn;
+ this.__rules__[index].alt = opt.alt || [];
+ this.__cache__ = null;
+};
+
+/**
+ * Ruler.before(beforeName, ruleName, fn [, options])
+ * - beforeName (String): new rule will be added before this one.
+ * - ruleName (String): name of added rule.
+ * - fn (Function): rule function.
+ * - options (Object): rule options (not mandatory).
+ *
+ * Add new rule to chain before one with given name. See also
+ * [[Ruler.after]], [[Ruler.push]].
+ *
+ * ##### Options:
+ *
+ * - __alt__ - array with names of "alternate" chains.
+ *
+ * ##### Example
+ *
+ * ```javascript
+ * var md = require('markdown-it')();
+ *
+ * md.block.ruler.before('paragraph', 'my_rule', function replace(state) {
+ * //...
+ * });
+ * ```
+ **/
+Ruler.prototype.before = function (beforeName, ruleName, fn, options) {
+ const index = this.__find__(beforeName);
+ const opt = options || {};
+ if (index === -1) {
+ throw new Error('Parser rule not found: ' + beforeName);
+ }
+ this.__rules__.splice(index, 0, {
+ name: ruleName,
+ enabled: true,
+ fn,
+ alt: opt.alt || []
+ });
+ this.__cache__ = null;
+};
+
+/**
+ * Ruler.after(afterName, ruleName, fn [, options])
+ * - afterName (String): new rule will be added after this one.
+ * - ruleName (String): name of added rule.
+ * - fn (Function): rule function.
+ * - options (Object): rule options (not mandatory).
+ *
+ * Add new rule to chain after one with given name. See also
+ * [[Ruler.before]], [[Ruler.push]].
+ *
+ * ##### Options:
+ *
+ * - __alt__ - array with names of "alternate" chains.
+ *
+ * ##### Example
+ *
+ * ```javascript
+ * var md = require('markdown-it')();
+ *
+ * md.inline.ruler.after('text', 'my_rule', function replace(state) {
+ * //...
+ * });
+ * ```
+ **/
+Ruler.prototype.after = function (afterName, ruleName, fn, options) {
+ const index = this.__find__(afterName);
+ const opt = options || {};
+ if (index === -1) {
+ throw new Error('Parser rule not found: ' + afterName);
+ }
+ this.__rules__.splice(index + 1, 0, {
+ name: ruleName,
+ enabled: true,
+ fn,
+ alt: opt.alt || []
+ });
+ this.__cache__ = null;
+};
+
+/**
+ * Ruler.push(ruleName, fn [, options])
+ * - ruleName (String): name of added rule.
+ * - fn (Function): rule function.
+ * - options (Object): rule options (not mandatory).
+ *
+ * Push new rule to the end of chain. See also
+ * [[Ruler.before]], [[Ruler.after]].
+ *
+ * ##### Options:
+ *
+ * - __alt__ - array with names of "alternate" chains.
+ *
+ * ##### Example
+ *
+ * ```javascript
+ * var md = require('markdown-it')();
+ *
+ * md.core.ruler.push('my_rule', function replace(state) {
+ * //...
+ * });
+ * ```
+ **/
+Ruler.prototype.push = function (ruleName, fn, options) {
+ const opt = options || {};
+ this.__rules__.push({
+ name: ruleName,
+ enabled: true,
+ fn,
+ alt: opt.alt || []
+ });
+ this.__cache__ = null;
+};
+
+/**
+ * Ruler.enable(list [, ignoreInvalid]) -> Array
+ * - list (String|Array): list of rule names to enable.
+ * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
+ *
+ * Enable rules with given names. If any rule name not found - throw Error.
+ * Errors can be disabled by second param.
+ *
+ * Returns list of found rule names (if no exception happened).
+ *
+ * See also [[Ruler.disable]], [[Ruler.enableOnly]].
+ **/
+Ruler.prototype.enable = function (list, ignoreInvalid) {
+ if (!Array.isArray(list)) {
+ list = [list];
+ }
+ const result = [];
+
+ // Search by name and enable
+ list.forEach(function (name) {
+ const idx = this.__find__(name);
+ if (idx < 0) {
+ if (ignoreInvalid) {
+ return;
+ }
+ throw new Error('Rules manager: invalid rule name ' + name);
+ }
+ this.__rules__[idx].enabled = true;
+ result.push(name);
+ }, this);
+ this.__cache__ = null;
+ return result;
+};
+
+/**
+ * Ruler.enableOnly(list [, ignoreInvalid])
+ * - list (String|Array): list of rule names to enable (whitelist).
+ * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
+ *
+ * Enable rules with given names, and disable everything else. If any rule name
+ * not found - throw Error. Errors can be disabled by second param.
+ *
+ * See also [[Ruler.disable]], [[Ruler.enable]].
+ **/
+Ruler.prototype.enableOnly = function (list, ignoreInvalid) {
+ if (!Array.isArray(list)) {
+ list = [list];
+ }
+ this.__rules__.forEach(function (rule) {
+ rule.enabled = false;
+ });
+ this.enable(list, ignoreInvalid);
+};
+
+/**
+ * Ruler.disable(list [, ignoreInvalid]) -> Array
+ * - list (String|Array): list of rule names to disable.
+ * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
+ *
+ * Disable rules with given names. If any rule name not found - throw Error.
+ * Errors can be disabled by second param.
+ *
+ * Returns list of found rule names (if no exception happened).
+ *
+ * See also [[Ruler.enable]], [[Ruler.enableOnly]].
+ **/
+Ruler.prototype.disable = function (list, ignoreInvalid) {
+ if (!Array.isArray(list)) {
+ list = [list];
+ }
+ const result = [];
+
+ // Search by name and disable
+ list.forEach(function (name) {
+ const idx = this.__find__(name);
+ if (idx < 0) {
+ if (ignoreInvalid) {
+ return;
+ }
+ throw new Error('Rules manager: invalid rule name ' + name);
+ }
+ this.__rules__[idx].enabled = false;
+ result.push(name);
+ }, this);
+ this.__cache__ = null;
+ return result;
+};
+
+/**
+ * Ruler.getRules(chainName) -> Array
+ *
+ * Return array of active functions (rules) for given chain name. It analyzes
+ * rules configuration, compiles caches if not exists and returns result.
+ *
+ * Default chain name is `''` (empty string). It can't be skipped. That's
+ * done intentionally, to keep signature monomorphic for high speed.
+ **/
+Ruler.prototype.getRules = function (chainName) {
+ if (this.__cache__ === null) {
+ this.__compile__();
+ }
+
+ // Chain can be empty, if rules disabled. But we still have to return Array.
+ return this.__cache__[chainName] || [];
+};
+
+// Token class
+
+/**
+ * class Token
+ **/
+
+/**
+ * new Token(type, tag, nesting)
+ *
+ * Create new token and fill passed properties.
+ **/
+function Token(type, tag, nesting) {
+ /**
+ * Token#type -> String
+ *
+ * Type of the token (string, e.g. "paragraph_open")
+ **/
+ this.type = type;
+
+ /**
+ * Token#tag -> String
+ *
+ * html tag name, e.g. "p"
+ **/
+ this.tag = tag;
+
+ /**
+ * Token#attrs -> Array
+ *
+ * Html attributes. Format: `[ [ name1, value1 ], [ name2, value2 ] ]`
+ **/
+ this.attrs = null;
+
+ /**
+ * Token#map -> Array
+ *
+ * Source map info. Format: `[ line_begin, line_end ]`
+ **/
+ this.map = null;
+
+ /**
+ * Token#nesting -> Number
+ *
+ * Level change (number in {-1, 0, 1} set), where:
+ *
+ * - `1` means the tag is opening
+ * - `0` means the tag is self-closing
+ * - `-1` means the tag is closing
+ **/
+ this.nesting = nesting;
+
+ /**
+ * Token#level -> Number
+ *
+ * nesting level, the same as `state.level`
+ **/
+ this.level = 0;
+
+ /**
+ * Token#children -> Array
+ *
+ * An array of child nodes (inline and img tokens)
+ **/
+ this.children = null;
+
+ /**
+ * Token#content -> String
+ *
+ * In a case of self-closing tag (code, html, fence, etc.),
+ * it has contents of this tag.
+ **/
+ this.content = '';
+
+ /**
+ * Token#markup -> String
+ *
+ * '*' or '_' for emphasis, fence string for fence, etc.
+ **/
+ this.markup = '';
+
+ /**
+ * Token#info -> String
+ *
+ * Additional information:
+ *
+ * - Info string for "fence" tokens
+ * - The value "auto" for autolink "link_open" and "link_close" tokens
+ * - The string value of the item marker for ordered-list "list_item_open" tokens
+ **/
+ this.info = '';
+
+ /**
+ * Token#meta -> Object
+ *
+ * A place for plugins to store an arbitrary data
+ **/
+ this.meta = null;
+
+ /**
+ * Token#block -> Boolean
+ *
+ * True for block-level tokens, false for inline tokens.
+ * Used in renderer to calculate line breaks
+ **/
+ this.block = false;
+
+ /**
+ * Token#hidden -> Boolean
+ *
+ * If it's true, ignore this element when rendering. Used for tight lists
+ * to hide paragraphs.
+ **/
+ this.hidden = false;
+}
+
+/**
+ * Token.attrIndex(name) -> Number
+ *
+ * Search attribute index by name.
+ **/
+Token.prototype.attrIndex = function attrIndex(name) {
+ if (!this.attrs) {
+ return -1;
+ }
+ const attrs = this.attrs;
+ for (let i = 0, len = attrs.length; i < len; i++) {
+ if (attrs[i][0] === name) {
+ return i;
+ }
+ }
+ return -1;
+};
+
+/**
+ * Token.attrPush(attrData)
+ *
+ * Add `[ name, value ]` attribute to list. Init attrs if necessary
+ **/
+Token.prototype.attrPush = function attrPush(attrData) {
+ if (this.attrs) {
+ this.attrs.push(attrData);
+ } else {
+ this.attrs = [attrData];
+ }
+};
+
+/**
+ * Token.attrSet(name, value)
+ *
+ * Set `name` attribute to `value`. Override old value if exists.
+ **/
+Token.prototype.attrSet = function attrSet(name, value) {
+ const idx = this.attrIndex(name);
+ const attrData = [name, value];
+ if (idx < 0) {
+ this.attrPush(attrData);
+ } else {
+ this.attrs[idx] = attrData;
+ }
+};
+
+/**
+ * Token.attrGet(name)
+ *
+ * Get the value of attribute `name`, or null if it does not exist.
+ **/
+Token.prototype.attrGet = function attrGet(name) {
+ const idx = this.attrIndex(name);
+ let value = null;
+ if (idx >= 0) {
+ value = this.attrs[idx][1];
+ }
+ return value;
+};
+
+/**
+ * Token.attrJoin(name, value)
+ *
+ * Join value to existing attribute via space. Or create new attribute if not
+ * exists. Useful to operate with token classes.
+ **/
+Token.prototype.attrJoin = function attrJoin(name, value) {
+ const idx = this.attrIndex(name);
+ if (idx < 0) {
+ this.attrPush([name, value]);
+ } else {
+ this.attrs[idx][1] = this.attrs[idx][1] + ' ' + value;
+ }
+};
+
+// Core state object
+//
+
+function StateCore(src, md, env) {
+ this.src = src;
+ this.env = env;
+ this.tokens = [];
+ this.inlineMode = false;
+ this.md = md; // link to parser instance
+}
+
+// re-export Token class to use in core rules
+StateCore.prototype.Token = Token;
+
+// Normalize input string
+
+// https://spec.commonmark.org/0.29/#line-ending
+const NEWLINES_RE = /\r\n?|\n/g;
+const NULL_RE = /\0/g;
+function normalize(state) {
+ let str;
+
+ // Normalize newlines
+ str = state.src.replace(NEWLINES_RE, '\n');
+
+ // Replace NULL characters
+ str = str.replace(NULL_RE, '\uFFFD');
+ state.src = str;
+}
+function block(state) {
+ let token;
+ if (state.inlineMode) {
+ token = new state.Token('inline', '', 0);
+ token.content = state.src;
+ token.map = [0, 1];
+ token.children = [];
+ state.tokens.push(token);
+ } else {
+ state.md.block.parse(state.src, state.md, state.env, state.tokens);
+ }
+}
+function inline(state) {
+ const tokens = state.tokens;
+
+ // Parse inlines
+ for (let i = 0, l = tokens.length; i < l; i++) {
+ const tok = tokens[i];
+ if (tok.type === 'inline') {
+ state.md.inline.parse(tok.content, state.md, state.env, tok.children);
+ }
+ }
+}
+
+// Replace link-like texts with link nodes.
+//
+// Currently restricted by `md.validateLink()` to http/https/ftp
+//
+
+function isLinkOpen$1(str) {
+ return /^\s]/i.test(str);
+}
+function isLinkClose$1(str) {
+ return /^<\/a\s*>/i.test(str);
+}
+function linkify$1(state) {
+ const blockTokens = state.tokens;
+ if (!state.md.options.linkify) {
+ return;
+ }
+ for (let j = 0, l = blockTokens.length; j < l; j++) {
+ if (blockTokens[j].type !== 'inline' || !state.md.linkify.pretest(blockTokens[j].content)) {
+ continue;
+ }
+ let tokens = blockTokens[j].children;
+ let htmlLinkLevel = 0;
+
+ // We scan from the end, to keep position when new tags added.
+ // Use reversed logic in links start/end match
+ for (let i = tokens.length - 1; i >= 0; i--) {
+ const currentToken = tokens[i];
+
+ // Skip content of markdown links
+ if (currentToken.type === 'link_close') {
+ i--;
+ while (tokens[i].level !== currentToken.level && tokens[i].type !== 'link_open') {
+ i--;
+ }
+ continue;
+ }
+
+ // Skip content of html tag links
+ if (currentToken.type === 'html_inline') {
+ if (isLinkOpen$1(currentToken.content) && htmlLinkLevel > 0) {
+ htmlLinkLevel--;
+ }
+ if (isLinkClose$1(currentToken.content)) {
+ htmlLinkLevel++;
+ }
+ }
+ if (htmlLinkLevel > 0) {
+ continue;
+ }
+ if (currentToken.type === 'text' && state.md.linkify.test(currentToken.content)) {
+ const text = currentToken.content;
+ let links = state.md.linkify.match(text);
+
+ // Now split string to nodes
+ const nodes = [];
+ let level = currentToken.level;
+ let lastPos = 0;
+
+ // forbid escape sequence at the start of the string,
+ // this avoids http\://example.com/ from being linkified as
+ // http://example.com/
+ if (links.length > 0 && links[0].index === 0 && i > 0 && tokens[i - 1].type === 'text_special') {
+ links = links.slice(1);
+ }
+ for (let ln = 0; ln < links.length; ln++) {
+ const url = links[ln].url;
+ const fullUrl = state.md.normalizeLink(url);
+ if (!state.md.validateLink(fullUrl)) {
+ continue;
+ }
+ let urlText = links[ln].text;
+
+ // Linkifier might send raw hostnames like "example.com", where url
+ // starts with domain name. So we prepend http:// in those cases,
+ // and remove it afterwards.
+ //
+ if (!links[ln].schema) {
+ urlText = state.md.normalizeLinkText('http://' + urlText).replace(/^http:\/\//, '');
+ } else if (links[ln].schema === 'mailto:' && !/^mailto:/i.test(urlText)) {
+ urlText = state.md.normalizeLinkText('mailto:' + urlText).replace(/^mailto:/, '');
+ } else {
+ urlText = state.md.normalizeLinkText(urlText);
+ }
+ const pos = links[ln].index;
+ if (pos > lastPos) {
+ const token = new state.Token('text', '', 0);
+ token.content = text.slice(lastPos, pos);
+ token.level = level;
+ nodes.push(token);
+ }
+ const token_o = new state.Token('link_open', 'a', 1);
+ token_o.attrs = [['href', fullUrl]];
+ token_o.level = level++;
+ token_o.markup = 'linkify';
+ token_o.info = 'auto';
+ nodes.push(token_o);
+ const token_t = new state.Token('text', '', 0);
+ token_t.content = urlText;
+ token_t.level = level;
+ nodes.push(token_t);
+ const token_c = new state.Token('link_close', 'a', -1);
+ token_c.level = --level;
+ token_c.markup = 'linkify';
+ token_c.info = 'auto';
+ nodes.push(token_c);
+ lastPos = links[ln].lastIndex;
+ }
+ if (lastPos < text.length) {
+ const token = new state.Token('text', '', 0);
+ token.content = text.slice(lastPos);
+ token.level = level;
+ nodes.push(token);
+ }
+
+ // replace current node
+ blockTokens[j].children = tokens = arrayReplaceAt(tokens, i, nodes);
+ }
+ }
+ }
+}
+
+// Simple typographic replacements
+//
+// (c) (C) → ©
+// (tm) (TM) → ™
+// (r) (R) → ®
+// +- → ±
+// ... → … (also ?.... → ?.., !.... → !..)
+// ???????? → ???, !!!!! → !!!, `,,` → `,`
+// -- → –, --- → —
+//
+
+// TODO:
+// - fractionals 1/2, 1/4, 3/4 -> ½, ¼, ¾
+// - multiplications 2 x 4 -> 2 × 4
+
+const RARE_RE = /\+-|\.\.|\?\?\?\?|!!!!|,,|--/;
+
+// Workaround for phantomjs - need regex without /g flag,
+// or root check will fail every second time
+const SCOPED_ABBR_TEST_RE = /\((c|tm|r)\)/i;
+const SCOPED_ABBR_RE = /\((c|tm|r)\)/ig;
+const SCOPED_ABBR = {
+ c: '©',
+ r: '®',
+ tm: '™'
+};
+function replaceFn(match, name) {
+ return SCOPED_ABBR[name.toLowerCase()];
+}
+function replace_scoped(inlineTokens) {
+ let inside_autolink = 0;
+ for (let i = inlineTokens.length - 1; i >= 0; i--) {
+ const token = inlineTokens[i];
+ if (token.type === 'text' && !inside_autolink) {
+ token.content = token.content.replace(SCOPED_ABBR_RE, replaceFn);
+ }
+ if (token.type === 'link_open' && token.info === 'auto') {
+ inside_autolink--;
+ }
+ if (token.type === 'link_close' && token.info === 'auto') {
+ inside_autolink++;
+ }
+ }
+}
+function replace_rare(inlineTokens) {
+ let inside_autolink = 0;
+ for (let i = inlineTokens.length - 1; i >= 0; i--) {
+ const token = inlineTokens[i];
+ if (token.type === 'text' && !inside_autolink) {
+ if (RARE_RE.test(token.content)) {
+ token.content = token.content.replace(/\+-/g, '±')
+ // .., ..., ....... -> …
+ // but ?..... & !..... -> ?.. & !..
+ .replace(/\.{2,}/g, '…').replace(/([?!])…/g, '$1..').replace(/([?!]){4,}/g, '$1$1$1').replace(/,{2,}/g, ',')
+ // em-dash
+ .replace(/(^|[^-])---(?=[^-]|$)/mg, '$1\u2014')
+ // en-dash
+ .replace(/(^|\s)--(?=\s|$)/mg, '$1\u2013').replace(/(^|[^-\s])--(?=[^-\s]|$)/mg, '$1\u2013');
+ }
+ }
+ if (token.type === 'link_open' && token.info === 'auto') {
+ inside_autolink--;
+ }
+ if (token.type === 'link_close' && token.info === 'auto') {
+ inside_autolink++;
+ }
+ }
+}
+function replace(state) {
+ let blkIdx;
+ if (!state.md.options.typographer) {
+ return;
+ }
+ for (blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) {
+ if (state.tokens[blkIdx].type !== 'inline') {
+ continue;
+ }
+ if (SCOPED_ABBR_TEST_RE.test(state.tokens[blkIdx].content)) {
+ replace_scoped(state.tokens[blkIdx].children);
+ }
+ if (RARE_RE.test(state.tokens[blkIdx].content)) {
+ replace_rare(state.tokens[blkIdx].children);
+ }
+ }
+}
+
+// Convert straight quotation marks to typographic ones
+//
+
+const QUOTE_TEST_RE = /['"]/;
+const QUOTE_RE = /['"]/g;
+const APOSTROPHE = '\u2019'; /* ’ */
+
+function replaceAt(str, index, ch) {
+ return str.slice(0, index) + ch + str.slice(index + 1);
+}
+function process_inlines(tokens, state) {
+ let j;
+ const stack = [];
+ for (let i = 0; i < tokens.length; i++) {
+ const token = tokens[i];
+ const thisLevel = tokens[i].level;
+ for (j = stack.length - 1; j >= 0; j--) {
+ if (stack[j].level <= thisLevel) {
+ break;
+ }
+ }
+ stack.length = j + 1;
+ if (token.type !== 'text') {
+ continue;
+ }
+ let text = token.content;
+ let pos = 0;
+ let max = text.length;
+
+ /* eslint no-labels:0,block-scoped-var:0 */
+ OUTER: while (pos < max) {
+ QUOTE_RE.lastIndex = pos;
+ const t = QUOTE_RE.exec(text);
+ if (!t) {
+ break;
+ }
+ let canOpen = true;
+ let canClose = true;
+ pos = t.index + 1;
+ const isSingle = t[0] === "'";
+
+ // Find previous character,
+ // default to space if it's the beginning of the line
+ //
+ let lastChar = 0x20;
+ if (t.index - 1 >= 0) {
+ lastChar = text.charCodeAt(t.index - 1);
+ } else {
+ for (j = i - 1; j >= 0; j--) {
+ if (tokens[j].type === 'softbreak' || tokens[j].type === 'hardbreak') break; // lastChar defaults to 0x20
+ if (!tokens[j].content) continue; // should skip all tokens except 'text', 'html_inline' or 'code_inline'
+
+ lastChar = tokens[j].content.charCodeAt(tokens[j].content.length - 1);
+ break;
+ }
+ }
+
+ // Find next character,
+ // default to space if it's the end of the line
+ //
+ let nextChar = 0x20;
+ if (pos < max) {
+ nextChar = text.charCodeAt(pos);
+ } else {
+ for (j = i + 1; j < tokens.length; j++) {
+ if (tokens[j].type === 'softbreak' || tokens[j].type === 'hardbreak') break; // nextChar defaults to 0x20
+ if (!tokens[j].content) continue; // should skip all tokens except 'text', 'html_inline' or 'code_inline'
+
+ nextChar = tokens[j].content.charCodeAt(0);
+ break;
+ }
+ }
+ const isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(String.fromCharCode(lastChar));
+ const isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(String.fromCharCode(nextChar));
+ const isLastWhiteSpace = isWhiteSpace(lastChar);
+ const isNextWhiteSpace = isWhiteSpace(nextChar);
+ if (isNextWhiteSpace) {
+ canOpen = false;
+ } else if (isNextPunctChar) {
+ if (!(isLastWhiteSpace || isLastPunctChar)) {
+ canOpen = false;
+ }
+ }
+ if (isLastWhiteSpace) {
+ canClose = false;
+ } else if (isLastPunctChar) {
+ if (!(isNextWhiteSpace || isNextPunctChar)) {
+ canClose = false;
+ }
+ }
+ if (nextChar === 0x22 /* " */ && t[0] === '"') {
+ if (lastChar >= 0x30 /* 0 */ && lastChar <= 0x39 /* 9 */) {
+ // special case: 1"" - count first quote as an inch
+ canClose = canOpen = false;
+ }
+ }
+ if (canOpen && canClose) {
+ // Replace quotes in the middle of punctuation sequence, but not
+ // in the middle of the words, i.e.:
+ //
+ // 1. foo " bar " baz - not replaced
+ // 2. foo-"-bar-"-baz - replaced
+ // 3. foo"bar"baz - not replaced
+ //
+ canOpen = isLastPunctChar;
+ canClose = isNextPunctChar;
+ }
+ if (!canOpen && !canClose) {
+ // middle of word
+ if (isSingle) {
+ token.content = replaceAt(token.content, t.index, APOSTROPHE);
+ }
+ continue;
+ }
+ if (canClose) {
+ // this could be a closing quote, rewind the stack to get a match
+ for (j = stack.length - 1; j >= 0; j--) {
+ let item = stack[j];
+ if (stack[j].level < thisLevel) {
+ break;
+ }
+ if (item.single === isSingle && stack[j].level === thisLevel) {
+ item = stack[j];
+ let openQuote;
+ let closeQuote;
+ if (isSingle) {
+ openQuote = state.md.options.quotes[2];
+ closeQuote = state.md.options.quotes[3];
+ } else {
+ openQuote = state.md.options.quotes[0];
+ closeQuote = state.md.options.quotes[1];
+ }
+
+ // replace token.content *before* tokens[item.token].content,
+ // because, if they are pointing at the same token, replaceAt
+ // could mess up indices when quote length != 1
+ token.content = replaceAt(token.content, t.index, closeQuote);
+ tokens[item.token].content = replaceAt(tokens[item.token].content, item.pos, openQuote);
+ pos += closeQuote.length - 1;
+ if (item.token === i) {
+ pos += openQuote.length - 1;
+ }
+ text = token.content;
+ max = text.length;
+ stack.length = j;
+ continue OUTER;
+ }
+ }
+ }
+ if (canOpen) {
+ stack.push({
+ token: i,
+ pos: t.index,
+ single: isSingle,
+ level: thisLevel
+ });
+ } else if (canClose && isSingle) {
+ token.content = replaceAt(token.content, t.index, APOSTROPHE);
+ }
+ }
+ }
+}
+function smartquotes(state) {
+ /* eslint max-depth:0 */
+ if (!state.md.options.typographer) {
+ return;
+ }
+ for (let blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) {
+ if (state.tokens[blkIdx].type !== 'inline' || !QUOTE_TEST_RE.test(state.tokens[blkIdx].content)) {
+ continue;
+ }
+ process_inlines(state.tokens[blkIdx].children, state);
+ }
+}
+
+// Join raw text tokens with the rest of the text
+//
+// This is set as a separate rule to provide an opportunity for plugins
+// to run text replacements after text join, but before escape join.
+//
+// For example, `\:)` shouldn't be replaced with an emoji.
+//
+
+function text_join(state) {
+ let curr, last;
+ const blockTokens = state.tokens;
+ const l = blockTokens.length;
+ for (let j = 0; j < l; j++) {
+ if (blockTokens[j].type !== 'inline') continue;
+ const tokens = blockTokens[j].children;
+ const max = tokens.length;
+ for (curr = 0; curr < max; curr++) {
+ if (tokens[curr].type === 'text_special') {
+ tokens[curr].type = 'text';
+ }
+ }
+ for (curr = last = 0; curr < max; curr++) {
+ if (tokens[curr].type === 'text' && curr + 1 < max && tokens[curr + 1].type === 'text') {
+ // collapse two adjacent text nodes
+ tokens[curr + 1].content = tokens[curr].content + tokens[curr + 1].content;
+ } else {
+ if (curr !== last) {
+ tokens[last] = tokens[curr];
+ }
+ last++;
+ }
+ }
+ if (curr !== last) {
+ tokens.length = last;
+ }
+ }
+}
+
+/** internal
+ * class Core
+ *
+ * Top-level rules executor. Glues block/inline parsers and does intermediate
+ * transformations.
+ **/
+
+const _rules$2 = [['normalize', normalize], ['block', block], ['inline', inline], ['linkify', linkify$1], ['replacements', replace], ['smartquotes', smartquotes],
+// `text_join` finds `text_special` tokens (for escape sequences)
+// and joins them with the rest of the text
+['text_join', text_join]];
+
+/**
+ * new Core()
+ **/
+function Core() {
+ /**
+ * Core#ruler -> Ruler
+ *
+ * [[Ruler]] instance. Keep configuration of core rules.
+ **/
+ this.ruler = new Ruler();
+ for (let i = 0; i < _rules$2.length; i++) {
+ this.ruler.push(_rules$2[i][0], _rules$2[i][1]);
+ }
+}
+
+/**
+ * Core.process(state)
+ *
+ * Executes core chain rules.
+ **/
+Core.prototype.process = function (state) {
+ const rules = this.ruler.getRules('');
+ for (let i = 0, l = rules.length; i < l; i++) {
+ rules[i](state);
+ }
+};
+Core.prototype.State = StateCore;
+
+// Parser state class
+
+function StateBlock(src, md, env, tokens) {
+ this.src = src;
+
+ // link to parser instance
+ this.md = md;
+ this.env = env;
+
+ //
+ // Internal state vartiables
+ //
+
+ this.tokens = tokens;
+ this.bMarks = []; // line begin offsets for fast jumps
+ this.eMarks = []; // line end offsets for fast jumps
+ this.tShift = []; // offsets of the first non-space characters (tabs not expanded)
+ this.sCount = []; // indents for each line (tabs expanded)
+
+ // An amount of virtual spaces (tabs expanded) between beginning
+ // of each line (bMarks) and real beginning of that line.
+ //
+ // It exists only as a hack because blockquotes override bMarks
+ // losing information in the process.
+ //
+ // It's used only when expanding tabs, you can think about it as
+ // an initial tab length, e.g. bsCount=21 applied to string `\t123`
+ // means first tab should be expanded to 4-21%4 === 3 spaces.
+ //
+ this.bsCount = [];
+
+ // block parser variables
+
+ // required block content indent (for example, if we are
+ // inside a list, it would be positioned after list marker)
+ this.blkIndent = 0;
+ this.line = 0; // line index in src
+ this.lineMax = 0; // lines count
+ this.tight = false; // loose/tight mode for lists
+ this.ddIndent = -1; // indent of the current dd block (-1 if there isn't any)
+ this.listIndent = -1; // indent of the current list block (-1 if there isn't any)
+
+ // can be 'blockquote', 'list', 'root', 'paragraph' or 'reference'
+ // used in lists to determine if they interrupt a paragraph
+ this.parentType = 'root';
+ this.level = 0;
+
+ // Create caches
+ // Generate markers.
+ const s = this.src;
+ for (let start = 0, pos = 0, indent = 0, offset = 0, len = s.length, indent_found = false; pos < len; pos++) {
+ const ch = s.charCodeAt(pos);
+ if (!indent_found) {
+ if (isSpace(ch)) {
+ indent++;
+ if (ch === 0x09) {
+ offset += 4 - offset % 4;
+ } else {
+ offset++;
+ }
+ continue;
+ } else {
+ indent_found = true;
+ }
+ }
+ if (ch === 0x0A || pos === len - 1) {
+ if (ch !== 0x0A) {
+ pos++;
+ }
+ this.bMarks.push(start);
+ this.eMarks.push(pos);
+ this.tShift.push(indent);
+ this.sCount.push(offset);
+ this.bsCount.push(0);
+ indent_found = false;
+ indent = 0;
+ offset = 0;
+ start = pos + 1;
+ }
+ }
+
+ // Push fake entry to simplify cache bounds checks
+ this.bMarks.push(s.length);
+ this.eMarks.push(s.length);
+ this.tShift.push(0);
+ this.sCount.push(0);
+ this.bsCount.push(0);
+ this.lineMax = this.bMarks.length - 1; // don't count last fake line
+}
+
+// Push new token to "stream".
+//
+StateBlock.prototype.push = function (type, tag, nesting) {
+ const token = new Token(type, tag, nesting);
+ token.block = true;
+ if (nesting < 0) this.level--; // closing tag
+ token.level = this.level;
+ if (nesting > 0) this.level++; // opening tag
+
+ this.tokens.push(token);
+ return token;
+};
+StateBlock.prototype.isEmpty = function isEmpty(line) {
+ return this.bMarks[line] + this.tShift[line] >= this.eMarks[line];
+};
+StateBlock.prototype.skipEmptyLines = function skipEmptyLines(from) {
+ for (let max = this.lineMax; from < max; from++) {
+ if (this.bMarks[from] + this.tShift[from] < this.eMarks[from]) {
+ break;
+ }
+ }
+ return from;
+};
+
+// Skip spaces from given position.
+StateBlock.prototype.skipSpaces = function skipSpaces(pos) {
+ for (let max = this.src.length; pos < max; pos++) {
+ const ch = this.src.charCodeAt(pos);
+ if (!isSpace(ch)) {
+ break;
+ }
+ }
+ return pos;
+};
+
+// Skip spaces from given position in reverse.
+StateBlock.prototype.skipSpacesBack = function skipSpacesBack(pos, min) {
+ if (pos <= min) {
+ return pos;
+ }
+ while (pos > min) {
+ if (!isSpace(this.src.charCodeAt(--pos))) {
+ return pos + 1;
+ }
+ }
+ return pos;
+};
+
+// Skip char codes from given position
+StateBlock.prototype.skipChars = function skipChars(pos, code) {
+ for (let max = this.src.length; pos < max; pos++) {
+ if (this.src.charCodeAt(pos) !== code) {
+ break;
+ }
+ }
+ return pos;
+};
+
+// Skip char codes reverse from given position - 1
+StateBlock.prototype.skipCharsBack = function skipCharsBack(pos, code, min) {
+ if (pos <= min) {
+ return pos;
+ }
+ while (pos > min) {
+ if (code !== this.src.charCodeAt(--pos)) {
+ return pos + 1;
+ }
+ }
+ return pos;
+};
+
+// cut lines range from source.
+StateBlock.prototype.getLines = function getLines(begin, end, indent, keepLastLF) {
+ if (begin >= end) {
+ return '';
+ }
+ const queue = new Array(end - begin);
+ for (let i = 0, line = begin; line < end; line++, i++) {
+ let lineIndent = 0;
+ const lineStart = this.bMarks[line];
+ let first = lineStart;
+ let last;
+ if (line + 1 < end || keepLastLF) {
+ // No need for bounds check because we have fake entry on tail.
+ last = this.eMarks[line] + 1;
+ } else {
+ last = this.eMarks[line];
+ }
+ while (first < last && lineIndent < indent) {
+ const ch = this.src.charCodeAt(first);
+ if (isSpace(ch)) {
+ if (ch === 0x09) {
+ lineIndent += 4 - (lineIndent + this.bsCount[line]) % 4;
+ } else {
+ lineIndent++;
+ }
+ } else if (first - lineStart < this.tShift[line]) {
+ // patched tShift masked characters to look like spaces (blockquotes, list markers)
+ lineIndent++;
+ } else {
+ break;
+ }
+ first++;
+ }
+ if (lineIndent > indent) {
+ // partially expanding tabs in code blocks, e.g '\t\tfoobar'
+ // with indent=2 becomes ' \tfoobar'
+ queue[i] = new Array(lineIndent - indent + 1).join(' ') + this.src.slice(first, last);
+ } else {
+ queue[i] = this.src.slice(first, last);
+ }
+ }
+ return queue.join('');
+};
+
+// re-export Token class to use in block rules
+StateBlock.prototype.Token = Token;
+
+// GFM table, https://github.github.com/gfm/#tables-extension-
+
+// Limit the amount of empty autocompleted cells in a table,
+// see https://github.com/markdown-it/markdown-it/issues/1000,
+//
+// Both pulldown-cmark and commonmark-hs limit the number of cells this way to ~200k.
+// We set it to 65k, which can expand user input by a factor of x370
+// (256x256 square is 1.8kB expanded into 650kB).
+const MAX_AUTOCOMPLETED_CELLS = 0x10000;
+function getLine(state, line) {
+ const pos = state.bMarks[line] + state.tShift[line];
+ const max = state.eMarks[line];
+ return state.src.slice(pos, max);
+}
+function escapedSplit(str) {
+ const result = [];
+ const max = str.length;
+ let pos = 0;
+ let ch = str.charCodeAt(pos);
+ let isEscaped = false;
+ let lastPos = 0;
+ let current = '';
+ while (pos < max) {
+ if (ch === 0x7c /* | */) {
+ if (!isEscaped) {
+ // pipe separating cells, '|'
+ result.push(current + str.substring(lastPos, pos));
+ current = '';
+ lastPos = pos + 1;
+ } else {
+ // escaped pipe, '\|'
+ current += str.substring(lastPos, pos - 1);
+ lastPos = pos;
+ }
+ }
+ isEscaped = ch === 0x5c /* \ */;
+ pos++;
+ ch = str.charCodeAt(pos);
+ }
+ result.push(current + str.substring(lastPos));
+ return result;
+}
+function table(state, startLine, endLine, silent) {
+ // should have at least two lines
+ if (startLine + 2 > endLine) {
+ return false;
+ }
+ let nextLine = startLine + 1;
+ if (state.sCount[nextLine] < state.blkIndent) {
+ return false;
+ }
+
+ // if it's indented more than 3 spaces, it should be a code block
+ if (state.sCount[nextLine] - state.blkIndent >= 4) {
+ return false;
+ }
+
+ // first character of the second line should be '|', '-', ':',
+ // and no other characters are allowed but spaces;
+ // basically, this is the equivalent of /^[-:|][-:|\s]*$/ regexp
+
+ let pos = state.bMarks[nextLine] + state.tShift[nextLine];
+ if (pos >= state.eMarks[nextLine]) {
+ return false;
+ }
+ const firstCh = state.src.charCodeAt(pos++);
+ if (firstCh !== 0x7C /* | */ && firstCh !== 0x2D /* - */ && firstCh !== 0x3A /* : */) {
+ return false;
+ }
+ if (pos >= state.eMarks[nextLine]) {
+ return false;
+ }
+ const secondCh = state.src.charCodeAt(pos++);
+ if (secondCh !== 0x7C /* | */ && secondCh !== 0x2D /* - */ && secondCh !== 0x3A /* : */ && !isSpace(secondCh)) {
+ return false;
+ }
+
+ // if first character is '-', then second character must not be a space
+ // (due to parsing ambiguity with list)
+ if (firstCh === 0x2D /* - */ && isSpace(secondCh)) {
+ return false;
+ }
+ while (pos < state.eMarks[nextLine]) {
+ const ch = state.src.charCodeAt(pos);
+ if (ch !== 0x7C /* | */ && ch !== 0x2D /* - */ && ch !== 0x3A /* : */ && !isSpace(ch)) {
+ return false;
+ }
+ pos++;
+ }
+ let lineText = getLine(state, startLine + 1);
+ let columns = lineText.split('|');
+ const aligns = [];
+ for (let i = 0; i < columns.length; i++) {
+ const t = columns[i].trim();
+ if (!t) {
+ // allow empty columns before and after table, but not in between columns;
+ // e.g. allow ` |---| `, disallow ` ---||--- `
+ if (i === 0 || i === columns.length - 1) {
+ continue;
+ } else {
+ return false;
+ }
+ }
+ if (!/^:?-+:?$/.test(t)) {
+ return false;
+ }
+ if (t.charCodeAt(t.length - 1) === 0x3A /* : */) {
+ aligns.push(t.charCodeAt(0) === 0x3A /* : */ ? 'center' : 'right');
+ } else if (t.charCodeAt(0) === 0x3A /* : */) {
+ aligns.push('left');
+ } else {
+ aligns.push('');
+ }
+ }
+ lineText = getLine(state, startLine).trim();
+ if (lineText.indexOf('|') === -1) {
+ return false;
+ }
+ if (state.sCount[startLine] - state.blkIndent >= 4) {
+ return false;
+ }
+ columns = escapedSplit(lineText);
+ if (columns.length && columns[0] === '') columns.shift();
+ if (columns.length && columns[columns.length - 1] === '') columns.pop();
+
+ // header row will define an amount of columns in the entire table,
+ // and align row should be exactly the same (the rest of the rows can differ)
+ const columnCount = columns.length;
+ if (columnCount === 0 || columnCount !== aligns.length) {
+ return false;
+ }
+ if (silent) {
+ return true;
+ }
+ const oldParentType = state.parentType;
+ state.parentType = 'table';
+
+ // use 'blockquote' lists for termination because it's
+ // the most similar to tables
+ const terminatorRules = state.md.block.ruler.getRules('blockquote');
+ const token_to = state.push('table_open', 'table', 1);
+ const tableLines = [startLine, 0];
+ token_to.map = tableLines;
+ const token_tho = state.push('thead_open', 'thead', 1);
+ token_tho.map = [startLine, startLine + 1];
+ const token_htro = state.push('tr_open', 'tr', 1);
+ token_htro.map = [startLine, startLine + 1];
+ for (let i = 0; i < columns.length; i++) {
+ const token_ho = state.push('th_open', 'th', 1);
+ if (aligns[i]) {
+ token_ho.attrs = [['style', 'text-align:' + aligns[i]]];
+ }
+ const token_il = state.push('inline', '', 0);
+ token_il.content = columns[i].trim();
+ token_il.children = [];
+ state.push('th_close', 'th', -1);
+ }
+ state.push('tr_close', 'tr', -1);
+ state.push('thead_close', 'thead', -1);
+ let tbodyLines;
+ let autocompletedCells = 0;
+ for (nextLine = startLine + 2; nextLine < endLine; nextLine++) {
+ if (state.sCount[nextLine] < state.blkIndent) {
+ break;
+ }
+ let terminate = false;
+ for (let i = 0, l = terminatorRules.length; i < l; i++) {
+ if (terminatorRules[i](state, nextLine, endLine, true)) {
+ terminate = true;
+ break;
+ }
+ }
+ if (terminate) {
+ break;
+ }
+ lineText = getLine(state, nextLine).trim();
+ if (!lineText) {
+ break;
+ }
+ if (state.sCount[nextLine] - state.blkIndent >= 4) {
+ break;
+ }
+ columns = escapedSplit(lineText);
+ if (columns.length && columns[0] === '') columns.shift();
+ if (columns.length && columns[columns.length - 1] === '') columns.pop();
+
+ // note: autocomplete count can be negative if user specifies more columns than header,
+ // but that does not affect intended use (which is limiting expansion)
+ autocompletedCells += columnCount - columns.length;
+ if (autocompletedCells > MAX_AUTOCOMPLETED_CELLS) {
+ break;
+ }
+ if (nextLine === startLine + 2) {
+ const token_tbo = state.push('tbody_open', 'tbody', 1);
+ token_tbo.map = tbodyLines = [startLine + 2, 0];
+ }
+ const token_tro = state.push('tr_open', 'tr', 1);
+ token_tro.map = [nextLine, nextLine + 1];
+ for (let i = 0; i < columnCount; i++) {
+ const token_tdo = state.push('td_open', 'td', 1);
+ if (aligns[i]) {
+ token_tdo.attrs = [['style', 'text-align:' + aligns[i]]];
+ }
+ const token_il = state.push('inline', '', 0);
+ token_il.content = columns[i] ? columns[i].trim() : '';
+ token_il.children = [];
+ state.push('td_close', 'td', -1);
+ }
+ state.push('tr_close', 'tr', -1);
+ }
+ if (tbodyLines) {
+ state.push('tbody_close', 'tbody', -1);
+ tbodyLines[1] = nextLine;
+ }
+ state.push('table_close', 'table', -1);
+ tableLines[1] = nextLine;
+ state.parentType = oldParentType;
+ state.line = nextLine;
+ return true;
+}
+
+// Code block (4 spaces padded)
+
+function code(state, startLine, endLine /*, silent */) {
+ if (state.sCount[startLine] - state.blkIndent < 4) {
+ return false;
+ }
+ let nextLine = startLine + 1;
+ let last = nextLine;
+ while (nextLine < endLine) {
+ if (state.isEmpty(nextLine)) {
+ nextLine++;
+ continue;
+ }
+ if (state.sCount[nextLine] - state.blkIndent >= 4) {
+ nextLine++;
+ last = nextLine;
+ continue;
+ }
+ break;
+ }
+ state.line = last;
+ const token = state.push('code_block', 'code', 0);
+ token.content = state.getLines(startLine, last, 4 + state.blkIndent, false) + '\n';
+ token.map = [startLine, state.line];
+ return true;
+}
+
+// fences (``` lang, ~~~ lang)
+
+function fence(state, startLine, endLine, silent) {
+ let pos = state.bMarks[startLine] + state.tShift[startLine];
+ let max = state.eMarks[startLine];
+
+ // if it's indented more than 3 spaces, it should be a code block
+ if (state.sCount[startLine] - state.blkIndent >= 4) {
+ return false;
+ }
+ if (pos + 3 > max) {
+ return false;
+ }
+ const marker = state.src.charCodeAt(pos);
+ if (marker !== 0x7E /* ~ */ && marker !== 0x60 /* ` */) {
+ return false;
+ }
+
+ // scan marker length
+ let mem = pos;
+ pos = state.skipChars(pos, marker);
+ let len = pos - mem;
+ if (len < 3) {
+ return false;
+ }
+ const markup = state.src.slice(mem, pos);
+ const params = state.src.slice(pos, max);
+ if (marker === 0x60 /* ` */) {
+ if (params.indexOf(String.fromCharCode(marker)) >= 0) {
+ return false;
+ }
+ }
+
+ // Since start is found, we can report success here in validation mode
+ if (silent) {
+ return true;
+ }
+
+ // search end of block
+ let nextLine = startLine;
+ let haveEndMarker = false;
+ for (;;) {
+ nextLine++;
+ if (nextLine >= endLine) {
+ // unclosed block should be autoclosed by end of document.
+ // also block seems to be autoclosed by end of parent
+ break;
+ }
+ pos = mem = state.bMarks[nextLine] + state.tShift[nextLine];
+ max = state.eMarks[nextLine];
+ if (pos < max && state.sCount[nextLine] < state.blkIndent) {
+ // non-empty line with negative indent should stop the list:
+ // - ```
+ // test
+ break;
+ }
+ if (state.src.charCodeAt(pos) !== marker) {
+ continue;
+ }
+ if (state.sCount[nextLine] - state.blkIndent >= 4) {
+ // closing fence should be indented less than 4 spaces
+ continue;
+ }
+ pos = state.skipChars(pos, marker);
+
+ // closing code fence must be at least as long as the opening one
+ if (pos - mem < len) {
+ continue;
+ }
+
+ // make sure tail has spaces only
+ pos = state.skipSpaces(pos);
+ if (pos < max) {
+ continue;
+ }
+ haveEndMarker = true;
+ // found!
+ break;
+ }
+
+ // If a fence has heading spaces, they should be removed from its inner block
+ len = state.sCount[startLine];
+ state.line = nextLine + (haveEndMarker ? 1 : 0);
+ const token = state.push('fence', 'code', 0);
+ token.info = params;
+ token.content = state.getLines(startLine + 1, nextLine, len, true);
+ token.markup = markup;
+ token.map = [startLine, state.line];
+ return true;
+}
+
+// Block quotes
+
+function blockquote(state, startLine, endLine, silent) {
+ let pos = state.bMarks[startLine] + state.tShift[startLine];
+ let max = state.eMarks[startLine];
+ const oldLineMax = state.lineMax;
+
+ // if it's indented more than 3 spaces, it should be a code block
+ if (state.sCount[startLine] - state.blkIndent >= 4) {
+ return false;
+ }
+
+ // check the block quote marker
+ if (state.src.charCodeAt(pos) !== 0x3E /* > */) {
+ return false;
+ }
+
+ // we know that it's going to be a valid blockquote,
+ // so no point trying to find the end of it in silent mode
+ if (silent) {
+ return true;
+ }
+ const oldBMarks = [];
+ const oldBSCount = [];
+ const oldSCount = [];
+ const oldTShift = [];
+ const terminatorRules = state.md.block.ruler.getRules('blockquote');
+ const oldParentType = state.parentType;
+ state.parentType = 'blockquote';
+ let lastLineEmpty = false;
+ let nextLine;
+
+ // Search the end of the block
+ //
+ // Block ends with either:
+ // 1. an empty line outside:
+ // ```
+ // > test
+ //
+ // ```
+ // 2. an empty line inside:
+ // ```
+ // >
+ // test
+ // ```
+ // 3. another tag:
+ // ```
+ // > test
+ // - - -
+ // ```
+ for (nextLine = startLine; nextLine < endLine; nextLine++) {
+ // check if it's outdented, i.e. it's inside list item and indented
+ // less than said list item:
+ //
+ // ```
+ // 1. anything
+ // > current blockquote
+ // 2. checking this line
+ // ```
+ const isOutdented = state.sCount[nextLine] < state.blkIndent;
+ pos = state.bMarks[nextLine] + state.tShift[nextLine];
+ max = state.eMarks[nextLine];
+ if (pos >= max) {
+ // Case 1: line is not inside the blockquote, and this line is empty.
+ break;
+ }
+ if (state.src.charCodeAt(pos++) === 0x3E /* > */ && !isOutdented) {
+ // This line is inside the blockquote.
+
+ // set offset past spaces and ">"
+ let initial = state.sCount[nextLine] + 1;
+ let spaceAfterMarker;
+ let adjustTab;
+
+ // skip one optional space after '>'
+ if (state.src.charCodeAt(pos) === 0x20 /* space */) {
+ // ' > test '
+ // ^ -- position start of line here:
+ pos++;
+ initial++;
+ adjustTab = false;
+ spaceAfterMarker = true;
+ } else if (state.src.charCodeAt(pos) === 0x09 /* tab */) {
+ spaceAfterMarker = true;
+ if ((state.bsCount[nextLine] + initial) % 4 === 3) {
+ // ' >\t test '
+ // ^ -- position start of line here (tab has width===1)
+ pos++;
+ initial++;
+ adjustTab = false;
+ } else {
+ // ' >\t test '
+ // ^ -- position start of line here + shift bsCount slightly
+ // to make extra space appear
+ adjustTab = true;
+ }
+ } else {
+ spaceAfterMarker = false;
+ }
+ let offset = initial;
+ oldBMarks.push(state.bMarks[nextLine]);
+ state.bMarks[nextLine] = pos;
+ while (pos < max) {
+ const ch = state.src.charCodeAt(pos);
+ if (isSpace(ch)) {
+ if (ch === 0x09) {
+ offset += 4 - (offset + state.bsCount[nextLine] + (adjustTab ? 1 : 0)) % 4;
+ } else {
+ offset++;
+ }
+ } else {
+ break;
+ }
+ pos++;
+ }
+ lastLineEmpty = pos >= max;
+ oldBSCount.push(state.bsCount[nextLine]);
+ state.bsCount[nextLine] = state.sCount[nextLine] + 1 + (spaceAfterMarker ? 1 : 0);
+ oldSCount.push(state.sCount[nextLine]);
+ state.sCount[nextLine] = offset - initial;
+ oldTShift.push(state.tShift[nextLine]);
+ state.tShift[nextLine] = pos - state.bMarks[nextLine];
+ continue;
+ }
+
+ // Case 2: line is not inside the blockquote, and the last line was empty.
+ if (lastLineEmpty) {
+ break;
+ }
+
+ // Case 3: another tag found.
+ let terminate = false;
+ for (let i = 0, l = terminatorRules.length; i < l; i++) {
+ if (terminatorRules[i](state, nextLine, endLine, true)) {
+ terminate = true;
+ break;
+ }
+ }
+ if (terminate) {
+ // Quirk to enforce "hard termination mode" for paragraphs;
+ // normally if you call `tokenize(state, startLine, nextLine)`,
+ // paragraphs will look below nextLine for paragraph continuation,
+ // but if blockquote is terminated by another tag, they shouldn't
+ state.lineMax = nextLine;
+ if (state.blkIndent !== 0) {
+ // state.blkIndent was non-zero, we now set it to zero,
+ // so we need to re-calculate all offsets to appear as
+ // if indent wasn't changed
+ oldBMarks.push(state.bMarks[nextLine]);
+ oldBSCount.push(state.bsCount[nextLine]);
+ oldTShift.push(state.tShift[nextLine]);
+ oldSCount.push(state.sCount[nextLine]);
+ state.sCount[nextLine] -= state.blkIndent;
+ }
+ break;
+ }
+ oldBMarks.push(state.bMarks[nextLine]);
+ oldBSCount.push(state.bsCount[nextLine]);
+ oldTShift.push(state.tShift[nextLine]);
+ oldSCount.push(state.sCount[nextLine]);
+
+ // A negative indentation means that this is a paragraph continuation
+ //
+ state.sCount[nextLine] = -1;
+ }
+ const oldIndent = state.blkIndent;
+ state.blkIndent = 0;
+ const token_o = state.push('blockquote_open', 'blockquote', 1);
+ token_o.markup = '>';
+ const lines = [startLine, 0];
+ token_o.map = lines;
+ state.md.block.tokenize(state, startLine, nextLine);
+ const token_c = state.push('blockquote_close', 'blockquote', -1);
+ token_c.markup = '>';
+ state.lineMax = oldLineMax;
+ state.parentType = oldParentType;
+ lines[1] = state.line;
+
+ // Restore original tShift; this might not be necessary since the parser
+ // has already been here, but just to make sure we can do that.
+ for (let i = 0; i < oldTShift.length; i++) {
+ state.bMarks[i + startLine] = oldBMarks[i];
+ state.tShift[i + startLine] = oldTShift[i];
+ state.sCount[i + startLine] = oldSCount[i];
+ state.bsCount[i + startLine] = oldBSCount[i];
+ }
+ state.blkIndent = oldIndent;
+ return true;
+}
+
+// Horizontal rule
+
+function hr(state, startLine, endLine, silent) {
+ const max = state.eMarks[startLine];
+ // if it's indented more than 3 spaces, it should be a code block
+ if (state.sCount[startLine] - state.blkIndent >= 4) {
+ return false;
+ }
+ let pos = state.bMarks[startLine] + state.tShift[startLine];
+ const marker = state.src.charCodeAt(pos++);
+
+ // Check hr marker
+ if (marker !== 0x2A /* * */ && marker !== 0x2D /* - */ && marker !== 0x5F /* _ */) {
+ return false;
+ }
+
+ // markers can be mixed with spaces, but there should be at least 3 of them
+
+ let cnt = 1;
+ while (pos < max) {
+ const ch = state.src.charCodeAt(pos++);
+ if (ch !== marker && !isSpace(ch)) {
+ return false;
+ }
+ if (ch === marker) {
+ cnt++;
+ }
+ }
+ if (cnt < 3) {
+ return false;
+ }
+ if (silent) {
+ return true;
+ }
+ state.line = startLine + 1;
+ const token = state.push('hr', 'hr', 0);
+ token.map = [startLine, state.line];
+ token.markup = Array(cnt + 1).join(String.fromCharCode(marker));
+ return true;
+}
+
+// Lists
+
+// Search `[-+*][\n ]`, returns next pos after marker on success
+// or -1 on fail.
+function skipBulletListMarker(state, startLine) {
+ const max = state.eMarks[startLine];
+ let pos = state.bMarks[startLine] + state.tShift[startLine];
+ const marker = state.src.charCodeAt(pos++);
+ // Check bullet
+ if (marker !== 0x2A /* * */ && marker !== 0x2D /* - */ && marker !== 0x2B /* + */) {
+ return -1;
+ }
+ if (pos < max) {
+ const ch = state.src.charCodeAt(pos);
+ if (!isSpace(ch)) {
+ // " -test " - is not a list item
+ return -1;
+ }
+ }
+ return pos;
+}
+
+// Search `\d+[.)][\n ]`, returns next pos after marker on success
+// or -1 on fail.
+function skipOrderedListMarker(state, startLine) {
+ const start = state.bMarks[startLine] + state.tShift[startLine];
+ const max = state.eMarks[startLine];
+ let pos = start;
+
+ // List marker should have at least 2 chars (digit + dot)
+ if (pos + 1 >= max) {
+ return -1;
+ }
+ let ch = state.src.charCodeAt(pos++);
+ if (ch < 0x30 /* 0 */ || ch > 0x39 /* 9 */) {
+ return -1;
+ }
+ for (;;) {
+ // EOL -> fail
+ if (pos >= max) {
+ return -1;
+ }
+ ch = state.src.charCodeAt(pos++);
+ if (ch >= 0x30 /* 0 */ && ch <= 0x39 /* 9 */) {
+ // List marker should have no more than 9 digits
+ // (prevents integer overflow in browsers)
+ if (pos - start >= 10) {
+ return -1;
+ }
+ continue;
+ }
+
+ // found valid marker
+ if (ch === 0x29 /* ) */ || ch === 0x2e /* . */) {
+ break;
+ }
+ return -1;
+ }
+ if (pos < max) {
+ ch = state.src.charCodeAt(pos);
+ if (!isSpace(ch)) {
+ // " 1.test " - is not a list item
+ return -1;
+ }
+ }
+ return pos;
+}
+function markTightParagraphs(state, idx) {
+ const level = state.level + 2;
+ for (let i = idx + 2, l = state.tokens.length - 2; i < l; i++) {
+ if (state.tokens[i].level === level && state.tokens[i].type === 'paragraph_open') {
+ state.tokens[i + 2].hidden = true;
+ state.tokens[i].hidden = true;
+ i += 2;
+ }
+ }
+}
+function list(state, startLine, endLine, silent) {
+ let max, pos, start, token;
+ let nextLine = startLine;
+ let tight = true;
+
+ // if it's indented more than 3 spaces, it should be a code block
+ if (state.sCount[nextLine] - state.blkIndent >= 4) {
+ return false;
+ }
+
+ // Special case:
+ // - item 1
+ // - item 2
+ // - item 3
+ // - item 4
+ // - this one is a paragraph continuation
+ if (state.listIndent >= 0 && state.sCount[nextLine] - state.listIndent >= 4 && state.sCount[nextLine] < state.blkIndent) {
+ return false;
+ }
+ let isTerminatingParagraph = false;
+
+ // limit conditions when list can interrupt
+ // a paragraph (validation mode only)
+ if (silent && state.parentType === 'paragraph') {
+ // Next list item should still terminate previous list item;
+ //
+ // This code can fail if plugins use blkIndent as well as lists,
+ // but I hope the spec gets fixed long before that happens.
+ //
+ if (state.sCount[nextLine] >= state.blkIndent) {
+ isTerminatingParagraph = true;
+ }
+ }
+
+ // Detect list type and position after marker
+ let isOrdered;
+ let markerValue;
+ let posAfterMarker;
+ if ((posAfterMarker = skipOrderedListMarker(state, nextLine)) >= 0) {
+ isOrdered = true;
+ start = state.bMarks[nextLine] + state.tShift[nextLine];
+ markerValue = Number(state.src.slice(start, posAfterMarker - 1));
+
+ // If we're starting a new ordered list right after
+ // a paragraph, it should start with 1.
+ if (isTerminatingParagraph && markerValue !== 1) return false;
+ } else if ((posAfterMarker = skipBulletListMarker(state, nextLine)) >= 0) {
+ isOrdered = false;
+ } else {
+ return false;
+ }
+
+ // If we're starting a new unordered list right after
+ // a paragraph, first line should not be empty.
+ if (isTerminatingParagraph) {
+ if (state.skipSpaces(posAfterMarker) >= state.eMarks[nextLine]) return false;
+ }
+
+ // For validation mode we can terminate immediately
+ if (silent) {
+ return true;
+ }
+
+ // We should terminate list on style change. Remember first one to compare.
+ const markerCharCode = state.src.charCodeAt(posAfterMarker - 1);
+
+ // Start list
+ const listTokIdx = state.tokens.length;
+ if (isOrdered) {
+ token = state.push('ordered_list_open', 'ol', 1);
+ if (markerValue !== 1) {
+ token.attrs = [['start', markerValue]];
+ }
+ } else {
+ token = state.push('bullet_list_open', 'ul', 1);
+ }
+ const listLines = [nextLine, 0];
+ token.map = listLines;
+ token.markup = String.fromCharCode(markerCharCode);
+
+ //
+ // Iterate list items
+ //
+
+ let prevEmptyEnd = false;
+ const terminatorRules = state.md.block.ruler.getRules('list');
+ const oldParentType = state.parentType;
+ state.parentType = 'list';
+ while (nextLine < endLine) {
+ pos = posAfterMarker;
+ max = state.eMarks[nextLine];
+ const initial = state.sCount[nextLine] + posAfterMarker - (state.bMarks[nextLine] + state.tShift[nextLine]);
+ let offset = initial;
+ while (pos < max) {
+ const ch = state.src.charCodeAt(pos);
+ if (ch === 0x09) {
+ offset += 4 - (offset + state.bsCount[nextLine]) % 4;
+ } else if (ch === 0x20) {
+ offset++;
+ } else {
+ break;
+ }
+ pos++;
+ }
+ const contentStart = pos;
+ let indentAfterMarker;
+ if (contentStart >= max) {
+ // trimming space in "- \n 3" case, indent is 1 here
+ indentAfterMarker = 1;
+ } else {
+ indentAfterMarker = offset - initial;
+ }
+
+ // If we have more than 4 spaces, the indent is 1
+ // (the rest is just indented code block)
+ if (indentAfterMarker > 4) {
+ indentAfterMarker = 1;
+ }
+
+ // " - test"
+ // ^^^^^ - calculating total length of this thing
+ const indent = initial + indentAfterMarker;
+
+ // Run subparser & write tokens
+ token = state.push('list_item_open', 'li', 1);
+ token.markup = String.fromCharCode(markerCharCode);
+ const itemLines = [nextLine, 0];
+ token.map = itemLines;
+ if (isOrdered) {
+ token.info = state.src.slice(start, posAfterMarker - 1);
+ }
+
+ // change current state, then restore it after parser subcall
+ const oldTight = state.tight;
+ const oldTShift = state.tShift[nextLine];
+ const oldSCount = state.sCount[nextLine];
+
+ // - example list
+ // ^ listIndent position will be here
+ // ^ blkIndent position will be here
+ //
+ const oldListIndent = state.listIndent;
+ state.listIndent = state.blkIndent;
+ state.blkIndent = indent;
+ state.tight = true;
+ state.tShift[nextLine] = contentStart - state.bMarks[nextLine];
+ state.sCount[nextLine] = offset;
+ if (contentStart >= max && state.isEmpty(nextLine + 1)) {
+ // workaround for this case
+ // (list item is empty, list terminates before "foo"):
+ // ~~~~~~~~
+ // -
+ //
+ // foo
+ // ~~~~~~~~
+ state.line = Math.min(state.line + 2, endLine);
+ } else {
+ state.md.block.tokenize(state, nextLine, endLine, true);
+ }
+
+ // If any of list item is tight, mark list as tight
+ if (!state.tight || prevEmptyEnd) {
+ tight = false;
+ }
+ // Item become loose if finish with empty line,
+ // but we should filter last element, because it means list finish
+ prevEmptyEnd = state.line - nextLine > 1 && state.isEmpty(state.line - 1);
+ state.blkIndent = state.listIndent;
+ state.listIndent = oldListIndent;
+ state.tShift[nextLine] = oldTShift;
+ state.sCount[nextLine] = oldSCount;
+ state.tight = oldTight;
+ token = state.push('list_item_close', 'li', -1);
+ token.markup = String.fromCharCode(markerCharCode);
+ nextLine = state.line;
+ itemLines[1] = nextLine;
+ if (nextLine >= endLine) {
+ break;
+ }
+
+ //
+ // Try to check if list is terminated or continued.
+ //
+ if (state.sCount[nextLine] < state.blkIndent) {
+ break;
+ }
+
+ // if it's indented more than 3 spaces, it should be a code block
+ if (state.sCount[nextLine] - state.blkIndent >= 4) {
+ break;
+ }
+
+ // fail if terminating block found
+ let terminate = false;
+ for (let i = 0, l = terminatorRules.length; i < l; i++) {
+ if (terminatorRules[i](state, nextLine, endLine, true)) {
+ terminate = true;
+ break;
+ }
+ }
+ if (terminate) {
+ break;
+ }
+
+ // fail if list has another type
+ if (isOrdered) {
+ posAfterMarker = skipOrderedListMarker(state, nextLine);
+ if (posAfterMarker < 0) {
+ break;
+ }
+ start = state.bMarks[nextLine] + state.tShift[nextLine];
+ } else {
+ posAfterMarker = skipBulletListMarker(state, nextLine);
+ if (posAfterMarker < 0) {
+ break;
+ }
+ }
+ if (markerCharCode !== state.src.charCodeAt(posAfterMarker - 1)) {
+ break;
+ }
+ }
+
+ // Finalize list
+ if (isOrdered) {
+ token = state.push('ordered_list_close', 'ol', -1);
+ } else {
+ token = state.push('bullet_list_close', 'ul', -1);
+ }
+ token.markup = String.fromCharCode(markerCharCode);
+ listLines[1] = nextLine;
+ state.line = nextLine;
+ state.parentType = oldParentType;
+
+ // mark paragraphs tight if needed
+ if (tight) {
+ markTightParagraphs(state, listTokIdx);
+ }
+ return true;
+}
+function reference(state, startLine, _endLine, silent) {
+ let pos = state.bMarks[startLine] + state.tShift[startLine];
+ let max = state.eMarks[startLine];
+ let nextLine = startLine + 1;
+
+ // if it's indented more than 3 spaces, it should be a code block
+ if (state.sCount[startLine] - state.blkIndent >= 4) {
+ return false;
+ }
+ if (state.src.charCodeAt(pos) !== 0x5B /* [ */) {
+ return false;
+ }
+ function getNextLine(nextLine) {
+ const endLine = state.lineMax;
+ if (nextLine >= endLine || state.isEmpty(nextLine)) {
+ // empty line or end of input
+ return null;
+ }
+ let isContinuation = false;
+
+ // this would be a code block normally, but after paragraph
+ // it's considered a lazy continuation regardless of what's there
+ if (state.sCount[nextLine] - state.blkIndent > 3) {
+ isContinuation = true;
+ }
+
+ // quirk for blockquotes, this line should already be checked by that rule
+ if (state.sCount[nextLine] < 0) {
+ isContinuation = true;
+ }
+ if (!isContinuation) {
+ const terminatorRules = state.md.block.ruler.getRules('reference');
+ const oldParentType = state.parentType;
+ state.parentType = 'reference';
+
+ // Some tags can terminate paragraph without empty line.
+ let terminate = false;
+ for (let i = 0, l = terminatorRules.length; i < l; i++) {
+ if (terminatorRules[i](state, nextLine, endLine, true)) {
+ terminate = true;
+ break;
+ }
+ }
+ state.parentType = oldParentType;
+ if (terminate) {
+ // terminated by another block
+ return null;
+ }
+ }
+ const pos = state.bMarks[nextLine] + state.tShift[nextLine];
+ const max = state.eMarks[nextLine];
+
+ // max + 1 explicitly includes the newline
+ return state.src.slice(pos, max + 1);
+ }
+ let str = state.src.slice(pos, max + 1);
+ max = str.length;
+ let labelEnd = -1;
+ for (pos = 1; pos < max; pos++) {
+ const ch = str.charCodeAt(pos);
+ if (ch === 0x5B /* [ */) {
+ return false;
+ } else if (ch === 0x5D /* ] */) {
+ labelEnd = pos;
+ break;
+ } else if (ch === 0x0A /* \n */) {
+ const lineContent = getNextLine(nextLine);
+ if (lineContent !== null) {
+ str += lineContent;
+ max = str.length;
+ nextLine++;
+ }
+ } else if (ch === 0x5C /* \ */) {
+ pos++;
+ if (pos < max && str.charCodeAt(pos) === 0x0A) {
+ const lineContent = getNextLine(nextLine);
+ if (lineContent !== null) {
+ str += lineContent;
+ max = str.length;
+ nextLine++;
+ }
+ }
+ }
+ }
+ if (labelEnd < 0 || str.charCodeAt(labelEnd + 1) !== 0x3A /* : */) {
+ return false;
+ }
+
+ // [label]: destination 'title'
+ // ^^^ skip optional whitespace here
+ for (pos = labelEnd + 2; pos < max; pos++) {
+ const ch = str.charCodeAt(pos);
+ if (ch === 0x0A) {
+ const lineContent = getNextLine(nextLine);
+ if (lineContent !== null) {
+ str += lineContent;
+ max = str.length;
+ nextLine++;
+ }
+ } else if (isSpace(ch)) ;else {
+ break;
+ }
+ }
+
+ // [label]: destination 'title'
+ // ^^^^^^^^^^^ parse this
+ const destRes = state.md.helpers.parseLinkDestination(str, pos, max);
+ if (!destRes.ok) {
+ return false;
+ }
+ const href = state.md.normalizeLink(destRes.str);
+ if (!state.md.validateLink(href)) {
+ return false;
+ }
+ pos = destRes.pos;
+
+ // save cursor state, we could require to rollback later
+ const destEndPos = pos;
+ const destEndLineNo = nextLine;
+
+ // [label]: destination 'title'
+ // ^^^ skipping those spaces
+ const start = pos;
+ for (; pos < max; pos++) {
+ const ch = str.charCodeAt(pos);
+ if (ch === 0x0A) {
+ const lineContent = getNextLine(nextLine);
+ if (lineContent !== null) {
+ str += lineContent;
+ max = str.length;
+ nextLine++;
+ }
+ } else if (isSpace(ch)) ;else {
+ break;
+ }
+ }
+
+ // [label]: destination 'title'
+ // ^^^^^^^ parse this
+ let titleRes = state.md.helpers.parseLinkTitle(str, pos, max);
+ while (titleRes.can_continue) {
+ const lineContent = getNextLine(nextLine);
+ if (lineContent === null) break;
+ str += lineContent;
+ pos = max;
+ max = str.length;
+ nextLine++;
+ titleRes = state.md.helpers.parseLinkTitle(str, pos, max, titleRes);
+ }
+ let title;
+ if (pos < max && start !== pos && titleRes.ok) {
+ title = titleRes.str;
+ pos = titleRes.pos;
+ } else {
+ title = '';
+ pos = destEndPos;
+ nextLine = destEndLineNo;
+ }
+
+ // skip trailing spaces until the rest of the line
+ while (pos < max) {
+ const ch = str.charCodeAt(pos);
+ if (!isSpace(ch)) {
+ break;
+ }
+ pos++;
+ }
+ if (pos < max && str.charCodeAt(pos) !== 0x0A) {
+ if (title) {
+ // garbage at the end of the line after title,
+ // but it could still be a valid reference if we roll back
+ title = '';
+ pos = destEndPos;
+ nextLine = destEndLineNo;
+ while (pos < max) {
+ const ch = str.charCodeAt(pos);
+ if (!isSpace(ch)) {
+ break;
+ }
+ pos++;
+ }
+ }
+ }
+ if (pos < max && str.charCodeAt(pos) !== 0x0A) {
+ // garbage at the end of the line
+ return false;
+ }
+ const label = normalizeReference(str.slice(1, labelEnd));
+ if (!label) {
+ // CommonMark 0.20 disallows empty labels
+ return false;
+ }
+
+ // Reference can not terminate anything. This check is for safety only.
+ /* istanbul ignore if */
+ if (silent) {
+ return true;
+ }
+ if (typeof state.env.references === 'undefined') {
+ state.env.references = {};
+ }
+ if (typeof state.env.references[label] === 'undefined') {
+ state.env.references[label] = {
+ title,
+ href
+ };
+ }
+ state.line = nextLine;
+ return true;
+}
+
+// List of valid html blocks names, according to commonmark spec
+// https://spec.commonmark.org/0.30/#html-blocks
+
+var block_names = ['address', 'article', 'aside', 'base', 'basefont', 'blockquote', 'body', 'caption', 'center', 'col', 'colgroup', 'dd', 'details', 'dialog', 'dir', 'div', 'dl', 'dt', 'fieldset', 'figcaption', 'figure', 'footer', 'form', 'frame', 'frameset', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'head', 'header', 'hr', 'html', 'iframe', 'legend', 'li', 'link', 'main', 'menu', 'menuitem', 'nav', 'noframes', 'ol', 'optgroup', 'option', 'p', 'param', 'search', 'section', 'summary', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'title', 'tr', 'track', 'ul'];
+
+// Regexps to match html elements
+
+const attr_name = '[a-zA-Z_:][a-zA-Z0-9:._-]*';
+const unquoted = '[^"\'=<>`\\x00-\\x20]+';
+const single_quoted = "'[^']*'";
+const double_quoted = '"[^"]*"';
+const attr_value = '(?:' + unquoted + '|' + single_quoted + '|' + double_quoted + ')';
+const attribute = '(?:\\s+' + attr_name + '(?:\\s*=\\s*' + attr_value + ')?)';
+const open_tag = '<[A-Za-z][A-Za-z0-9\\-]*' + attribute + '*\\s*\\/?>';
+const close_tag = '<\\/[A-Za-z][A-Za-z0-9\\-]*\\s*>';
+const comment = '';
+const processing = '<[?][\\s\\S]*?[?]>';
+const declaration = ']*>';
+const cdata = '';
+const HTML_TAG_RE = new RegExp('^(?:' + open_tag + '|' + close_tag + '|' + comment + '|' + processing + '|' + declaration + '|' + cdata + ')');
+const HTML_OPEN_CLOSE_TAG_RE = new RegExp('^(?:' + open_tag + '|' + close_tag + ')');
+
+// HTML block
+
+// An array of opening and corresponding closing sequences for html tags,
+// last argument defines whether it can terminate a paragraph or not
+//
+const HTML_SEQUENCES = [[/^<(script|pre|style|textarea)(?=(\s|>|$))/i, /<\/(script|pre|style|textarea)>/i, true], [/^/, true], [/^<\?/, /\?>/, true], [/^/, true], [/^/, true], [new RegExp('^?(' + block_names.join('|') + ')(?=(\\s|/?>|$))', 'i'), /^$/, true], [new RegExp(HTML_OPEN_CLOSE_TAG_RE.source + '\\s*$'), /^$/, false]];
+function html_block(state, startLine, endLine, silent) {
+ let pos = state.bMarks[startLine] + state.tShift[startLine];
+ let max = state.eMarks[startLine];
+
+ // if it's indented more than 3 spaces, it should be a code block
+ if (state.sCount[startLine] - state.blkIndent >= 4) {
+ return false;
+ }
+ if (!state.md.options.html) {
+ return false;
+ }
+ if (state.src.charCodeAt(pos) !== 0x3C /* < */) {
+ return false;
+ }
+ let lineText = state.src.slice(pos, max);
+ let i = 0;
+ for (; i < HTML_SEQUENCES.length; i++) {
+ if (HTML_SEQUENCES[i][0].test(lineText)) {
+ break;
+ }
+ }
+ if (i === HTML_SEQUENCES.length) {
+ return false;
+ }
+ if (silent) {
+ // true if this sequence can be a terminator, false otherwise
+ return HTML_SEQUENCES[i][2];
+ }
+ let nextLine = startLine + 1;
+
+ // If we are here - we detected HTML block.
+ // Let's roll down till block end.
+ if (!HTML_SEQUENCES[i][1].test(lineText)) {
+ for (; nextLine < endLine; nextLine++) {
+ if (state.sCount[nextLine] < state.blkIndent) {
+ break;
+ }
+ pos = state.bMarks[nextLine] + state.tShift[nextLine];
+ max = state.eMarks[nextLine];
+ lineText = state.src.slice(pos, max);
+ if (HTML_SEQUENCES[i][1].test(lineText)) {
+ if (lineText.length !== 0) {
+ nextLine++;
+ }
+ break;
+ }
+ }
+ }
+ state.line = nextLine;
+ const token = state.push('html_block', '', 0);
+ token.map = [startLine, nextLine];
+ token.content = state.getLines(startLine, nextLine, state.blkIndent, true);
+ return true;
+}
+
+// heading (#, ##, ...)
+
+function heading(state, startLine, endLine, silent) {
+ let pos = state.bMarks[startLine] + state.tShift[startLine];
+ let max = state.eMarks[startLine];
+
+ // if it's indented more than 3 spaces, it should be a code block
+ if (state.sCount[startLine] - state.blkIndent >= 4) {
+ return false;
+ }
+ let ch = state.src.charCodeAt(pos);
+ if (ch !== 0x23 /* # */ || pos >= max) {
+ return false;
+ }
+
+ // count heading level
+ let level = 1;
+ ch = state.src.charCodeAt(++pos);
+ while (ch === 0x23 /* # */ && pos < max && level <= 6) {
+ level++;
+ ch = state.src.charCodeAt(++pos);
+ }
+ if (level > 6 || pos < max && !isSpace(ch)) {
+ return false;
+ }
+ if (silent) {
+ return true;
+ }
+
+ // Let's cut tails like ' ### ' from the end of string
+
+ max = state.skipSpacesBack(max, pos);
+ const tmp = state.skipCharsBack(max, 0x23, pos); // #
+ if (tmp > pos && isSpace(state.src.charCodeAt(tmp - 1))) {
+ max = tmp;
+ }
+ state.line = startLine + 1;
+ const token_o = state.push('heading_open', 'h' + String(level), 1);
+ token_o.markup = '########'.slice(0, level);
+ token_o.map = [startLine, state.line];
+ const token_i = state.push('inline', '', 0);
+ token_i.content = state.src.slice(pos, max).trim();
+ token_i.map = [startLine, state.line];
+ token_i.children = [];
+ const token_c = state.push('heading_close', 'h' + String(level), -1);
+ token_c.markup = '########'.slice(0, level);
+ return true;
+}
+
+// lheading (---, ===)
+
+function lheading(state, startLine, endLine /*, silent */) {
+ const terminatorRules = state.md.block.ruler.getRules('paragraph');
+
+ // if it's indented more than 3 spaces, it should be a code block
+ if (state.sCount[startLine] - state.blkIndent >= 4) {
+ return false;
+ }
+ const oldParentType = state.parentType;
+ state.parentType = 'paragraph'; // use paragraph to match terminatorRules
+
+ // jump line-by-line until empty one or EOF
+ let level = 0;
+ let marker;
+ let nextLine = startLine + 1;
+ for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {
+ // this would be a code block normally, but after paragraph
+ // it's considered a lazy continuation regardless of what's there
+ if (state.sCount[nextLine] - state.blkIndent > 3) {
+ continue;
+ }
+
+ //
+ // Check for underline in setext header
+ //
+ if (state.sCount[nextLine] >= state.blkIndent) {
+ let pos = state.bMarks[nextLine] + state.tShift[nextLine];
+ const max = state.eMarks[nextLine];
+ if (pos < max) {
+ marker = state.src.charCodeAt(pos);
+ if (marker === 0x2D /* - */ || marker === 0x3D /* = */) {
+ pos = state.skipChars(pos, marker);
+ pos = state.skipSpaces(pos);
+ if (pos >= max) {
+ level = marker === 0x3D /* = */ ? 1 : 2;
+ break;
+ }
+ }
+ }
+ }
+
+ // quirk for blockquotes, this line should already be checked by that rule
+ if (state.sCount[nextLine] < 0) {
+ continue;
+ }
+
+ // Some tags can terminate paragraph without empty line.
+ let terminate = false;
+ for (let i = 0, l = terminatorRules.length; i < l; i++) {
+ if (terminatorRules[i](state, nextLine, endLine, true)) {
+ terminate = true;
+ break;
+ }
+ }
+ if (terminate) {
+ break;
+ }
+ }
+ if (!level) {
+ // Didn't find valid underline
+ return false;
+ }
+ const content = state.getLines(startLine, nextLine, state.blkIndent, false).trim();
+ state.line = nextLine + 1;
+ const token_o = state.push('heading_open', 'h' + String(level), 1);
+ token_o.markup = String.fromCharCode(marker);
+ token_o.map = [startLine, state.line];
+ const token_i = state.push('inline', '', 0);
+ token_i.content = content;
+ token_i.map = [startLine, state.line - 1];
+ token_i.children = [];
+ const token_c = state.push('heading_close', 'h' + String(level), -1);
+ token_c.markup = String.fromCharCode(marker);
+ state.parentType = oldParentType;
+ return true;
+}
+
+// Paragraph
+
+function paragraph(state, startLine, endLine) {
+ const terminatorRules = state.md.block.ruler.getRules('paragraph');
+ const oldParentType = state.parentType;
+ let nextLine = startLine + 1;
+ state.parentType = 'paragraph';
+
+ // jump line-by-line until empty one or EOF
+ for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {
+ // this would be a code block normally, but after paragraph
+ // it's considered a lazy continuation regardless of what's there
+ if (state.sCount[nextLine] - state.blkIndent > 3) {
+ continue;
+ }
+
+ // quirk for blockquotes, this line should already be checked by that rule
+ if (state.sCount[nextLine] < 0) {
+ continue;
+ }
+
+ // Some tags can terminate paragraph without empty line.
+ let terminate = false;
+ for (let i = 0, l = terminatorRules.length; i < l; i++) {
+ if (terminatorRules[i](state, nextLine, endLine, true)) {
+ terminate = true;
+ break;
+ }
+ }
+ if (terminate) {
+ break;
+ }
+ }
+ const content = state.getLines(startLine, nextLine, state.blkIndent, false).trim();
+ state.line = nextLine;
+ const token_o = state.push('paragraph_open', 'p', 1);
+ token_o.map = [startLine, state.line];
+ const token_i = state.push('inline', '', 0);
+ token_i.content = content;
+ token_i.map = [startLine, state.line];
+ token_i.children = [];
+ state.push('paragraph_close', 'p', -1);
+ state.parentType = oldParentType;
+ return true;
+}
+
+/** internal
+ * class ParserBlock
+ *
+ * Block-level tokenizer.
+ **/
+
+const _rules$1 = [
+// First 2 params - rule name & source. Secondary array - list of rules,
+// which can be terminated by this one.
+['table', table, ['paragraph', 'reference']], ['code', code], ['fence', fence, ['paragraph', 'reference', 'blockquote', 'list']], ['blockquote', blockquote, ['paragraph', 'reference', 'blockquote', 'list']], ['hr', hr, ['paragraph', 'reference', 'blockquote', 'list']], ['list', list, ['paragraph', 'reference', 'blockquote']], ['reference', reference], ['html_block', html_block, ['paragraph', 'reference', 'blockquote']], ['heading', heading, ['paragraph', 'reference', 'blockquote']], ['lheading', lheading], ['paragraph', paragraph]];
+
+/**
+ * new ParserBlock()
+ **/
+function ParserBlock() {
+ /**
+ * ParserBlock#ruler -> Ruler
+ *
+ * [[Ruler]] instance. Keep configuration of block rules.
+ **/
+ this.ruler = new Ruler();
+ for (let i = 0; i < _rules$1.length; i++) {
+ this.ruler.push(_rules$1[i][0], _rules$1[i][1], {
+ alt: (_rules$1[i][2] || []).slice()
+ });
+ }
+}
+
+// Generate tokens for input range
+//
+ParserBlock.prototype.tokenize = function (state, startLine, endLine) {
+ const rules = this.ruler.getRules('');
+ const len = rules.length;
+ const maxNesting = state.md.options.maxNesting;
+ let line = startLine;
+ let hasEmptyLines = false;
+ while (line < endLine) {
+ state.line = line = state.skipEmptyLines(line);
+ if (line >= endLine) {
+ break;
+ }
+
+ // Termination condition for nested calls.
+ // Nested calls currently used for blockquotes & lists
+ if (state.sCount[line] < state.blkIndent) {
+ break;
+ }
+
+ // If nesting level exceeded - skip tail to the end. That's not ordinary
+ // situation and we should not care about content.
+ if (state.level >= maxNesting) {
+ state.line = endLine;
+ break;
+ }
+
+ // Try all possible rules.
+ // On success, rule should:
+ //
+ // - update `state.line`
+ // - update `state.tokens`
+ // - return true
+ const prevLine = state.line;
+ let ok = false;
+ for (let i = 0; i < len; i++) {
+ ok = rules[i](state, line, endLine, false);
+ if (ok) {
+ if (prevLine >= state.line) {
+ throw new Error("block rule didn't increment state.line");
+ }
+ break;
+ }
+ }
+
+ // this can only happen if user disables paragraph rule
+ if (!ok) throw new Error('none of the block rules matched');
+
+ // set state.tight if we had an empty line before current tag
+ // i.e. latest empty line should not count
+ state.tight = !hasEmptyLines;
+
+ // paragraph might "eat" one newline after it in nested lists
+ if (state.isEmpty(state.line - 1)) {
+ hasEmptyLines = true;
+ }
+ line = state.line;
+ if (line < endLine && state.isEmpty(line)) {
+ hasEmptyLines = true;
+ line++;
+ state.line = line;
+ }
+ }
+};
+
+/**
+ * ParserBlock.parse(str, md, env, outTokens)
+ *
+ * Process input string and push block tokens into `outTokens`
+ **/
+ParserBlock.prototype.parse = function (src, md, env, outTokens) {
+ if (!src) {
+ return;
+ }
+ const state = new this.State(src, md, env, outTokens);
+ this.tokenize(state, state.line, state.lineMax);
+};
+ParserBlock.prototype.State = StateBlock;
+
+// Inline parser state
+
+function StateInline(src, md, env, outTokens) {
+ this.src = src;
+ this.env = env;
+ this.md = md;
+ this.tokens = outTokens;
+ this.tokens_meta = Array(outTokens.length);
+ this.pos = 0;
+ this.posMax = this.src.length;
+ this.level = 0;
+ this.pending = '';
+ this.pendingLevel = 0;
+
+ // Stores { start: end } pairs. Useful for backtrack
+ // optimization of pairs parse (emphasis, strikes).
+ this.cache = {};
+
+ // List of emphasis-like delimiters for current tag
+ this.delimiters = [];
+
+ // Stack of delimiter lists for upper level tags
+ this._prev_delimiters = [];
+
+ // backtick length => last seen position
+ this.backticks = {};
+ this.backticksScanned = false;
+
+ // Counter used to disable inline linkify-it execution
+ // inside and markdown links
+ this.linkLevel = 0;
+}
+
+// Flush pending text
+//
+StateInline.prototype.pushPending = function () {
+ const token = new Token('text', '', 0);
+ token.content = this.pending;
+ token.level = this.pendingLevel;
+ this.tokens.push(token);
+ this.pending = '';
+ return token;
+};
+
+// Push new token to "stream".
+// If pending text exists - flush it as text token
+//
+StateInline.prototype.push = function (type, tag, nesting) {
+ if (this.pending) {
+ this.pushPending();
+ }
+ const token = new Token(type, tag, nesting);
+ let token_meta = null;
+ if (nesting < 0) {
+ // closing tag
+ this.level--;
+ this.delimiters = this._prev_delimiters.pop();
+ }
+ token.level = this.level;
+ if (nesting > 0) {
+ // opening tag
+ this.level++;
+ this._prev_delimiters.push(this.delimiters);
+ this.delimiters = [];
+ token_meta = {
+ delimiters: this.delimiters
+ };
+ }
+ this.pendingLevel = this.level;
+ this.tokens.push(token);
+ this.tokens_meta.push(token_meta);
+ return token;
+};
+
+// Scan a sequence of emphasis-like markers, and determine whether
+// it can start an emphasis sequence or end an emphasis sequence.
+//
+// - start - position to scan from (it should point at a valid marker);
+// - canSplitWord - determine if these markers can be found inside a word
+//
+StateInline.prototype.scanDelims = function (start, canSplitWord) {
+ const max = this.posMax;
+ const marker = this.src.charCodeAt(start);
+
+ // treat beginning of the line as a whitespace
+ const lastChar = start > 0 ? this.src.charCodeAt(start - 1) : 0x20;
+ let pos = start;
+ while (pos < max && this.src.charCodeAt(pos) === marker) {
+ pos++;
+ }
+ const count = pos - start;
+
+ // treat end of the line as a whitespace
+ const nextChar = pos < max ? this.src.charCodeAt(pos) : 0x20;
+ const isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(String.fromCharCode(lastChar));
+ const isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(String.fromCharCode(nextChar));
+ const isLastWhiteSpace = isWhiteSpace(lastChar);
+ const isNextWhiteSpace = isWhiteSpace(nextChar);
+ const left_flanking = !isNextWhiteSpace && (!isNextPunctChar || isLastWhiteSpace || isLastPunctChar);
+ const right_flanking = !isLastWhiteSpace && (!isLastPunctChar || isNextWhiteSpace || isNextPunctChar);
+ const can_open = left_flanking && (canSplitWord || !right_flanking || isLastPunctChar);
+ const can_close = right_flanking && (canSplitWord || !left_flanking || isNextPunctChar);
+ return {
+ can_open,
+ can_close,
+ length: count
+ };
+};
+
+// re-export Token class to use in block rules
+StateInline.prototype.Token = Token;
+
+// Skip text characters for text token, place those to pending buffer
+// and increment current pos
+
+// Rule to skip pure text
+// '{}$%@~+=:' reserved for extentions
+
+// !, ", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \, ], ^, _, `, {, |, }, or ~
+
+// !!!! Don't confuse with "Markdown ASCII Punctuation" chars
+// http://spec.commonmark.org/0.15/#ascii-punctuation-character
+function isTerminatorChar(ch) {
+ switch (ch) {
+ case 0x0A /* \n */:
+ case 0x21 /* ! */:
+ case 0x23 /* # */:
+ case 0x24 /* $ */:
+ case 0x25 /* % */:
+ case 0x26 /* & */:
+ case 0x2A /* * */:
+ case 0x2B /* + */:
+ case 0x2D /* - */:
+ case 0x3A /* : */:
+ case 0x3C /* < */:
+ case 0x3D /* = */:
+ case 0x3E /* > */:
+ case 0x40 /* @ */:
+ case 0x5B /* [ */:
+ case 0x5C /* \ */:
+ case 0x5D /* ] */:
+ case 0x5E /* ^ */:
+ case 0x5F /* _ */:
+ case 0x60 /* ` */:
+ case 0x7B /* { */:
+ case 0x7D /* } */:
+ case 0x7E /* ~ */:
+ return true;
+ default:
+ return false;
+ }
+}
+function text(state, silent) {
+ let pos = state.pos;
+ while (pos < state.posMax && !isTerminatorChar(state.src.charCodeAt(pos))) {
+ pos++;
+ }
+ if (pos === state.pos) {
+ return false;
+ }
+ if (!silent) {
+ state.pending += state.src.slice(state.pos, pos);
+ }
+ state.pos = pos;
+ return true;
+}
+
+// Alternative implementation, for memory.
+//
+// It costs 10% of performance, but allows extend terminators list, if place it
+// to `ParserInline` property. Probably, will switch to it sometime, such
+// flexibility required.
+
+/*
+var TERMINATOR_RE = /[\n!#$%&*+\-:<=>@[\\\]^_`{}~]/;
+
+module.exports = function text(state, silent) {
+ var pos = state.pos,
+ idx = state.src.slice(pos).search(TERMINATOR_RE);
+
+ // first char is terminator -> empty text
+ if (idx === 0) { return false; }
+
+ // no terminator -> text till end of string
+ if (idx < 0) {
+ if (!silent) { state.pending += state.src.slice(pos); }
+ state.pos = state.src.length;
+ return true;
+ }
+
+ if (!silent) { state.pending += state.src.slice(pos, pos + idx); }
+
+ state.pos += idx;
+
+ return true;
+}; */
+
+// Process links like https://example.org/
+
+// RFC3986: scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
+const SCHEME_RE = /(?:^|[^a-z0-9.+-])([a-z][a-z0-9.+-]*)$/i;
+function linkify(state, silent) {
+ if (!state.md.options.linkify) return false;
+ if (state.linkLevel > 0) return false;
+ const pos = state.pos;
+ const max = state.posMax;
+ if (pos + 3 > max) return false;
+ if (state.src.charCodeAt(pos) !== 0x3A /* : */) return false;
+ if (state.src.charCodeAt(pos + 1) !== 0x2F /* / */) return false;
+ if (state.src.charCodeAt(pos + 2) !== 0x2F /* / */) return false;
+ const match = state.pending.match(SCHEME_RE);
+ if (!match) return false;
+ const proto = match[1];
+ const link = state.md.linkify.matchAtStart(state.src.slice(pos - proto.length));
+ if (!link) return false;
+ let url = link.url;
+
+ // invalid link, but still detected by linkify somehow;
+ // need to check to prevent infinite loop below
+ if (url.length <= proto.length) return false;
+
+ // disallow '*' at the end of the link (conflicts with emphasis)
+ url = url.replace(/\*+$/, '');
+ const fullUrl = state.md.normalizeLink(url);
+ if (!state.md.validateLink(fullUrl)) return false;
+ if (!silent) {
+ state.pending = state.pending.slice(0, -proto.length);
+ const token_o = state.push('link_open', 'a', 1);
+ token_o.attrs = [['href', fullUrl]];
+ token_o.markup = 'linkify';
+ token_o.info = 'auto';
+ const token_t = state.push('text', '', 0);
+ token_t.content = state.md.normalizeLinkText(url);
+ const token_c = state.push('link_close', 'a', -1);
+ token_c.markup = 'linkify';
+ token_c.info = 'auto';
+ }
+ state.pos += url.length - proto.length;
+ return true;
+}
+
+// Proceess '\n'
+
+function newline(state, silent) {
+ let pos = state.pos;
+ if (state.src.charCodeAt(pos) !== 0x0A /* \n */) {
+ return false;
+ }
+ const pmax = state.pending.length - 1;
+ const max = state.posMax;
+
+ // ' \n' -> hardbreak
+ // Lookup in pending chars is bad practice! Don't copy to other rules!
+ // Pending string is stored in concat mode, indexed lookups will cause
+ // convertion to flat mode.
+ if (!silent) {
+ if (pmax >= 0 && state.pending.charCodeAt(pmax) === 0x20) {
+ if (pmax >= 1 && state.pending.charCodeAt(pmax - 1) === 0x20) {
+ // Find whitespaces tail of pending chars.
+ let ws = pmax - 1;
+ while (ws >= 1 && state.pending.charCodeAt(ws - 1) === 0x20) ws--;
+ state.pending = state.pending.slice(0, ws);
+ state.push('hardbreak', 'br', 0);
+ } else {
+ state.pending = state.pending.slice(0, -1);
+ state.push('softbreak', 'br', 0);
+ }
+ } else {
+ state.push('softbreak', 'br', 0);
+ }
+ }
+ pos++;
+
+ // skip heading spaces for next line
+ while (pos < max && isSpace(state.src.charCodeAt(pos))) {
+ pos++;
+ }
+ state.pos = pos;
+ return true;
+}
+
+// Process escaped chars and hardbreaks
+
+const ESCAPED = [];
+for (let i = 0; i < 256; i++) {
+ ESCAPED.push(0);
+}
+'\\!"#$%&\'()*+,./:;<=>?@[]^_`{|}~-'.split('').forEach(function (ch) {
+ ESCAPED[ch.charCodeAt(0)] = 1;
+});
+function escape(state, silent) {
+ let pos = state.pos;
+ const max = state.posMax;
+ if (state.src.charCodeAt(pos) !== 0x5C /* \ */) return false;
+ pos++;
+
+ // '\' at the end of the inline block
+ if (pos >= max) return false;
+ let ch1 = state.src.charCodeAt(pos);
+ if (ch1 === 0x0A) {
+ if (!silent) {
+ state.push('hardbreak', 'br', 0);
+ }
+ pos++;
+ // skip leading whitespaces from next line
+ while (pos < max) {
+ ch1 = state.src.charCodeAt(pos);
+ if (!isSpace(ch1)) break;
+ pos++;
+ }
+ state.pos = pos;
+ return true;
+ }
+ let escapedStr = state.src[pos];
+ if (ch1 >= 0xD800 && ch1 <= 0xDBFF && pos + 1 < max) {
+ const ch2 = state.src.charCodeAt(pos + 1);
+ if (ch2 >= 0xDC00 && ch2 <= 0xDFFF) {
+ escapedStr += state.src[pos + 1];
+ pos++;
+ }
+ }
+ const origStr = '\\' + escapedStr;
+ if (!silent) {
+ const token = state.push('text_special', '', 0);
+ if (ch1 < 256 && ESCAPED[ch1] !== 0) {
+ token.content = escapedStr;
+ } else {
+ token.content = origStr;
+ }
+ token.markup = origStr;
+ token.info = 'escape';
+ }
+ state.pos = pos + 1;
+ return true;
+}
+
+// Parse backticks
+
+function backtick(state, silent) {
+ let pos = state.pos;
+ const ch = state.src.charCodeAt(pos);
+ if (ch !== 0x60 /* ` */) {
+ return false;
+ }
+ const start = pos;
+ pos++;
+ const max = state.posMax;
+
+ // scan marker length
+ while (pos < max && state.src.charCodeAt(pos) === 0x60 /* ` */) {
+ pos++;
+ }
+ const marker = state.src.slice(start, pos);
+ const openerLength = marker.length;
+ if (state.backticksScanned && (state.backticks[openerLength] || 0) <= start) {
+ if (!silent) state.pending += marker;
+ state.pos += openerLength;
+ return true;
+ }
+ let matchEnd = pos;
+ let matchStart;
+
+ // Nothing found in the cache, scan until the end of the line (or until marker is found)
+ while ((matchStart = state.src.indexOf('`', matchEnd)) !== -1) {
+ matchEnd = matchStart + 1;
+
+ // scan marker length
+ while (matchEnd < max && state.src.charCodeAt(matchEnd) === 0x60 /* ` */) {
+ matchEnd++;
+ }
+ const closerLength = matchEnd - matchStart;
+ if (closerLength === openerLength) {
+ // Found matching closer length.
+ if (!silent) {
+ const token = state.push('code_inline', 'code', 0);
+ token.markup = marker;
+ token.content = state.src.slice(pos, matchStart).replace(/\n/g, ' ').replace(/^ (.+) $/, '$1');
+ }
+ state.pos = matchEnd;
+ return true;
+ }
+
+ // Some different length found, put it in cache as upper limit of where closer can be found
+ state.backticks[closerLength] = matchStart;
+ }
+
+ // Scanned through the end, didn't find anything
+ state.backticksScanned = true;
+ if (!silent) state.pending += marker;
+ state.pos += openerLength;
+ return true;
+}
+
+// ~~strike through~~
+//
+
+// Insert each marker as a separate text token, and add it to delimiter list
+//
+function strikethrough_tokenize(state, silent) {
+ const start = state.pos;
+ const marker = state.src.charCodeAt(start);
+ if (silent) {
+ return false;
+ }
+ if (marker !== 0x7E /* ~ */) {
+ return false;
+ }
+ const scanned = state.scanDelims(state.pos, true);
+ let len = scanned.length;
+ const ch = String.fromCharCode(marker);
+ if (len < 2) {
+ return false;
+ }
+ let token;
+ if (len % 2) {
+ token = state.push('text', '', 0);
+ token.content = ch;
+ len--;
+ }
+ for (let i = 0; i < len; i += 2) {
+ token = state.push('text', '', 0);
+ token.content = ch + ch;
+ state.delimiters.push({
+ marker,
+ length: 0,
+ // disable "rule of 3" length checks meant for emphasis
+ token: state.tokens.length - 1,
+ end: -1,
+ open: scanned.can_open,
+ close: scanned.can_close
+ });
+ }
+ state.pos += scanned.length;
+ return true;
+}
+function postProcess$1(state, delimiters) {
+ let token;
+ const loneMarkers = [];
+ const max = delimiters.length;
+ for (let i = 0; i < max; i++) {
+ const startDelim = delimiters[i];
+ if (startDelim.marker !== 0x7E /* ~ */) {
+ continue;
+ }
+ if (startDelim.end === -1) {
+ continue;
+ }
+ const endDelim = delimiters[startDelim.end];
+ token = state.tokens[startDelim.token];
+ token.type = 's_open';
+ token.tag = 's';
+ token.nesting = 1;
+ token.markup = '~~';
+ token.content = '';
+ token = state.tokens[endDelim.token];
+ token.type = 's_close';
+ token.tag = 's';
+ token.nesting = -1;
+ token.markup = '~~';
+ token.content = '';
+ if (state.tokens[endDelim.token - 1].type === 'text' && state.tokens[endDelim.token - 1].content === '~') {
+ loneMarkers.push(endDelim.token - 1);
+ }
+ }
+
+ // If a marker sequence has an odd number of characters, it's splitted
+ // like this: `~~~~~` -> `~` + `~~` + `~~`, leaving one marker at the
+ // start of the sequence.
+ //
+ // So, we have to move all those markers after subsequent s_close tags.
+ //
+ while (loneMarkers.length) {
+ const i = loneMarkers.pop();
+ let j = i + 1;
+ while (j < state.tokens.length && state.tokens[j].type === 's_close') {
+ j++;
+ }
+ j--;
+ if (i !== j) {
+ token = state.tokens[j];
+ state.tokens[j] = state.tokens[i];
+ state.tokens[i] = token;
+ }
+ }
+}
+
+// Walk through delimiter list and replace text tokens with tags
+//
+function strikethrough_postProcess(state) {
+ const tokens_meta = state.tokens_meta;
+ const max = state.tokens_meta.length;
+ postProcess$1(state, state.delimiters);
+ for (let curr = 0; curr < max; curr++) {
+ if (tokens_meta[curr] && tokens_meta[curr].delimiters) {
+ postProcess$1(state, tokens_meta[curr].delimiters);
+ }
+ }
+}
+var r_strikethrough = {
+ tokenize: strikethrough_tokenize,
+ postProcess: strikethrough_postProcess
+};
+
+// Process *this* and _that_
+//
+
+// Insert each marker as a separate text token, and add it to delimiter list
+//
+function emphasis_tokenize(state, silent) {
+ const start = state.pos;
+ const marker = state.src.charCodeAt(start);
+ if (silent) {
+ return false;
+ }
+ if (marker !== 0x5F /* _ */ && marker !== 0x2A /* * */) {
+ return false;
+ }
+ const scanned = state.scanDelims(state.pos, marker === 0x2A);
+ for (let i = 0; i < scanned.length; i++) {
+ const token = state.push('text', '', 0);
+ token.content = String.fromCharCode(marker);
+ state.delimiters.push({
+ // Char code of the starting marker (number).
+ //
+ marker,
+ // Total length of these series of delimiters.
+ //
+ length: scanned.length,
+ // A position of the token this delimiter corresponds to.
+ //
+ token: state.tokens.length - 1,
+ // If this delimiter is matched as a valid opener, `end` will be
+ // equal to its position, otherwise it's `-1`.
+ //
+ end: -1,
+ // Boolean flags that determine if this delimiter could open or close
+ // an emphasis.
+ //
+ open: scanned.can_open,
+ close: scanned.can_close
+ });
+ }
+ state.pos += scanned.length;
+ return true;
+}
+function postProcess(state, delimiters) {
+ const max = delimiters.length;
+ for (let i = max - 1; i >= 0; i--) {
+ const startDelim = delimiters[i];
+ if (startDelim.marker !== 0x5F /* _ */ && startDelim.marker !== 0x2A /* * */) {
+ continue;
+ }
+
+ // Process only opening markers
+ if (startDelim.end === -1) {
+ continue;
+ }
+ const endDelim = delimiters[startDelim.end];
+
+ // If the previous delimiter has the same marker and is adjacent to this one,
+ // merge those into one strong delimiter.
+ //
+ // `whatever` -> `whatever`
+ //
+ const isStrong = i > 0 && delimiters[i - 1].end === startDelim.end + 1 &&
+ // check that first two markers match and adjacent
+ delimiters[i - 1].marker === startDelim.marker && delimiters[i - 1].token === startDelim.token - 1 &&
+ // check that last two markers are adjacent (we can safely assume they match)
+ delimiters[startDelim.end + 1].token === endDelim.token + 1;
+ const ch = String.fromCharCode(startDelim.marker);
+ const token_o = state.tokens[startDelim.token];
+ token_o.type = isStrong ? 'strong_open' : 'em_open';
+ token_o.tag = isStrong ? 'strong' : 'em';
+ token_o.nesting = 1;
+ token_o.markup = isStrong ? ch + ch : ch;
+ token_o.content = '';
+ const token_c = state.tokens[endDelim.token];
+ token_c.type = isStrong ? 'strong_close' : 'em_close';
+ token_c.tag = isStrong ? 'strong' : 'em';
+ token_c.nesting = -1;
+ token_c.markup = isStrong ? ch + ch : ch;
+ token_c.content = '';
+ if (isStrong) {
+ state.tokens[delimiters[i - 1].token].content = '';
+ state.tokens[delimiters[startDelim.end + 1].token].content = '';
+ i--;
+ }
+ }
+}
+
+// Walk through delimiter list and replace text tokens with tags
+//
+function emphasis_post_process(state) {
+ const tokens_meta = state.tokens_meta;
+ const max = state.tokens_meta.length;
+ postProcess(state, state.delimiters);
+ for (let curr = 0; curr < max; curr++) {
+ if (tokens_meta[curr] && tokens_meta[curr].delimiters) {
+ postProcess(state, tokens_meta[curr].delimiters);
+ }
+ }
+}
+var r_emphasis = {
+ tokenize: emphasis_tokenize,
+ postProcess: emphasis_post_process
+};
+
+// Process [link](
)
+ xhtmlOut: false,
+ // Convert '\n' in paragraphs into
+ breaks: false,
+ // CSS language prefix for fenced blocks
+ langPrefix: 'language-',
+ // autoconvert URL-like texts to links
+ linkify: false,
+ // Enable some language-neutral replacements + quotes beautification
+ typographer: false,
+ // Double + single quotes replacement pairs, when typographer enabled,
+ // and smartquotes on. Could be either a String or an Array.
+ //
+ // For example, you can use '«»„“' for Russian, '„“‚‘' for German,
+ // and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
+ quotes: '\u201c\u201d\u2018\u2019',
+ /* “”‘’ */
+
+ // Highlighter function. Should return escaped HTML,
+ // or '' if the source string is not changed and should be escaped externaly.
+ // If result starts with
+ breaks: false,
+ // CSS language prefix for fenced blocks
+ langPrefix: 'language-',
+ // autoconvert URL-like texts to links
+ linkify: false,
+ // Enable some language-neutral replacements + quotes beautification
+ typographer: false,
+ // Double + single quotes replacement pairs, when typographer enabled,
+ // and smartquotes on. Could be either a String or an Array.
+ //
+ // For example, you can use '«»„“' for Russian, '„“‚‘' for German,
+ // and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
+ quotes: '\u201c\u201d\u2018\u2019',
+ /* “”‘’ */
+
+ // Highlighter function. Should return escaped HTML,
+ // or '' if the source string is not changed and should be escaped externaly.
+ // If result starts with
+ breaks: false,
+ // CSS language prefix for fenced blocks
+ langPrefix: 'language-',
+ // autoconvert URL-like texts to links
+ linkify: false,
+ // Enable some language-neutral replacements + quotes beautification
+ typographer: false,
+ // Double + single quotes replacement pairs, when typographer enabled,
+ // and smartquotes on. Could be either a String or an Array.
+ //
+ // For example, you can use '«»„“' for Russian, '„“‚‘' for German,
+ // and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
+ quotes: '\u201c\u201d\u2018\u2019',
+ /* “”‘’ */
+
+ // Highlighter function. Should return escaped HTML,
+ // or '' if the source string is not changed and should be escaped externaly.
+ // If result starts with
`). This is needed only for full CommonMark compatibility. In real
+ * world you will need HTML output.
+ * - __breaks__ - `false`. Set `true` to convert `\n` in paragraphs into `
`.
+ * - __langPrefix__ - `language-`. CSS language class prefix for fenced blocks.
+ * Can be useful for external highlighters.
+ * - __linkify__ - `false`. Set `true` to autoconvert URL-like text to links.
+ * - __typographer__ - `false`. Set `true` to enable [some language-neutral
+ * replacement](https://github.com/markdown-it/markdown-it/blob/master/lib/rules_core/replacements.mjs) +
+ * quotes beautification (smartquotes).
+ * - __quotes__ - `“”‘’`, String or Array. Double + single quotes replacement
+ * pairs, when typographer enabled and smartquotes on. For example, you can
+ * use `'«»„“'` for Russian, `'„“‚‘'` for German, and
+ * `['«\xA0', '\xA0»', '‹\xA0', '\xA0›']` for French (including nbsp).
+ * - __highlight__ - `null`. Highlighter function for fenced code blocks.
+ * Highlighter `function (str, lang)` should return escaped HTML. It can also
+ * return empty string if the source was not changed and should be escaped
+ * externaly. If result starts with `):
+ *
+ * ```javascript
+ * var hljs = require('highlight.js') // https://highlightjs.org/
+ *
+ * // Actual default values
+ * var md = require('markdown-it')({
+ * highlight: function (str, lang) {
+ * if (lang && hljs.getLanguage(lang)) {
+ * try {
+ * return '
';
+ * } catch (__) {}
+ * }
+ *
+ * return '' +
+ * hljs.highlight(str, { language: lang, ignoreIllegals: true }).value +
+ * '
';
+ * }
+ * });
+ * ```
+ *
+ **/
+function MarkdownIt(presetName, options) {
+ if (!(this instanceof MarkdownIt)) {
+ return new MarkdownIt(presetName, options);
+ }
+ if (!options) {
+ if (!isString(presetName)) {
+ options = presetName || {};
+ presetName = 'default';
+ }
+ }
+
+ /**
+ * MarkdownIt#inline -> ParserInline
+ *
+ * Instance of [[ParserInline]]. You may need it to add new rules when
+ * writing plugins. For simple rules control use [[MarkdownIt.disable]] and
+ * [[MarkdownIt.enable]].
+ **/
+ this.inline = new ParserInline();
+
+ /**
+ * MarkdownIt#block -> ParserBlock
+ *
+ * Instance of [[ParserBlock]]. You may need it to add new rules when
+ * writing plugins. For simple rules control use [[MarkdownIt.disable]] and
+ * [[MarkdownIt.enable]].
+ **/
+ this.block = new ParserBlock();
+
+ /**
+ * MarkdownIt#core -> Core
+ *
+ * Instance of [[Core]] chain executor. You may need it to add new rules when
+ * writing plugins. For simple rules control use [[MarkdownIt.disable]] and
+ * [[MarkdownIt.enable]].
+ **/
+ this.core = new Core();
+
+ /**
+ * MarkdownIt#renderer -> Renderer
+ *
+ * Instance of [[Renderer]]. Use it to modify output look. Or to add rendering
+ * rules for new token types, generated by plugins.
+ *
+ * ##### Example
+ *
+ * ```javascript
+ * var md = require('markdown-it')();
+ *
+ * function myToken(tokens, idx, options, env, self) {
+ * //...
+ * return result;
+ * };
+ *
+ * md.renderer.rules['my_token'] = myToken
+ * ```
+ *
+ * See [[Renderer]] docs and [source code](https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.mjs).
+ **/
+ this.renderer = new Renderer();
+
+ /**
+ * MarkdownIt#linkify -> LinkifyIt
+ *
+ * [linkify-it](https://github.com/markdown-it/linkify-it) instance.
+ * Used by [linkify](https://github.com/markdown-it/markdown-it/blob/master/lib/rules_core/linkify.mjs)
+ * rule.
+ **/
+ this.linkify = new LinkifyIt();
+
+ /**
+ * MarkdownIt#validateLink(url) -> Boolean
+ *
+ * Link validation function. CommonMark allows too much in links. By default
+ * we disable `javascript:`, `vbscript:`, `file:` schemas, and almost all `data:...` schemas
+ * except some embedded image types.
+ *
+ * You can change this behaviour:
+ *
+ * ```javascript
+ * var md = require('markdown-it')();
+ * // enable everything
+ * md.validateLink = function () { return true; }
+ * ```
+ **/
+ this.validateLink = validateLink;
+
+ /**
+ * MarkdownIt#normalizeLink(url) -> String
+ *
+ * Function used to encode link url to a machine-readable format,
+ * which includes url-encoding, punycode, etc.
+ **/
+ this.normalizeLink = normalizeLink;
+
+ /**
+ * MarkdownIt#normalizeLinkText(url) -> String
+ *
+ * Function used to decode link url to a human-readable format`
+ **/
+ this.normalizeLinkText = normalizeLinkText;
+
+ // Expose utils & helpers for easy acces from plugins
+
+ /**
+ * MarkdownIt#utils -> utils
+ *
+ * Assorted utility functions, useful to write plugins. See details
+ * [here](https://github.com/markdown-it/markdown-it/blob/master/lib/common/utils.mjs).
+ **/
+ this.utils = utils;
+
+ /**
+ * MarkdownIt#helpers -> helpers
+ *
+ * Link components parser functions, useful to write plugins. See details
+ * [here](https://github.com/markdown-it/markdown-it/blob/master/lib/helpers).
+ **/
+ this.helpers = assign({}, helpers);
+ this.options = {};
+ this.configure(presetName);
+ if (options) {
+ this.set(options);
+ }
+}
+
+/** chainable
+ * MarkdownIt.set(options)
+ *
+ * Set parser options (in the same format as in constructor). Probably, you
+ * will never need it, but you can change options after constructor call.
+ *
+ * ##### Example
+ *
+ * ```javascript
+ * var md = require('markdown-it')()
+ * .set({ html: true, breaks: true })
+ * .set({ typographer, true });
+ * ```
+ *
+ * __Note:__ To achieve the best possible performance, don't modify a
+ * `markdown-it` instance options on the fly. If you need multiple configurations
+ * it's best to create multiple instances and initialize each with separate
+ * config.
+ **/
+MarkdownIt.prototype.set = function (options) {
+ assign(this.options, options);
+ return this;
+};
+
+/** chainable, internal
+ * MarkdownIt.configure(presets)
+ *
+ * Batch load of all options and compenent settings. This is internal method,
+ * and you probably will not need it. But if you will - see available presets
+ * and data structure [here](https://github.com/markdown-it/markdown-it/tree/master/lib/presets)
+ *
+ * We strongly recommend to use presets instead of direct config loads. That
+ * will give better compatibility with next versions.
+ **/
+MarkdownIt.prototype.configure = function (presets) {
+ const self = this;
+ if (isString(presets)) {
+ const presetName = presets;
+ presets = config[presetName];
+ if (!presets) {
+ throw new Error('Wrong `markdown-it` preset "' + presetName + '", check name');
+ }
+ }
+ if (!presets) {
+ throw new Error('Wrong `markdown-it` preset, can\'t be empty');
+ }
+ if (presets.options) {
+ self.set(presets.options);
+ }
+ if (presets.components) {
+ Object.keys(presets.components).forEach(function (name) {
+ if (presets.components[name].rules) {
+ self[name].ruler.enableOnly(presets.components[name].rules);
+ }
+ if (presets.components[name].rules2) {
+ self[name].ruler2.enableOnly(presets.components[name].rules2);
+ }
+ });
+ }
+ return this;
+};
+
+/** chainable
+ * MarkdownIt.enable(list, ignoreInvalid)
+ * - list (String|Array): rule name or list of rule names to enable
+ * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
+ *
+ * Enable list or rules. It will automatically find appropriate components,
+ * containing rules with given names. If rule not found, and `ignoreInvalid`
+ * not set - throws exception.
+ *
+ * ##### Example
+ *
+ * ```javascript
+ * var md = require('markdown-it')()
+ * .enable(['sub', 'sup'])
+ * .disable('smartquotes');
+ * ```
+ **/
+MarkdownIt.prototype.enable = function (list, ignoreInvalid) {
+ let result = [];
+ if (!Array.isArray(list)) {
+ list = [list];
+ }
+ ['core', 'block', 'inline'].forEach(function (chain) {
+ result = result.concat(this[chain].ruler.enable(list, true));
+ }, this);
+ result = result.concat(this.inline.ruler2.enable(list, true));
+ const missed = list.filter(function (name) {
+ return result.indexOf(name) < 0;
+ });
+ if (missed.length && !ignoreInvalid) {
+ throw new Error('MarkdownIt. Failed to enable unknown rule(s): ' + missed);
+ }
+ return this;
+};
+
+/** chainable
+ * MarkdownIt.disable(list, ignoreInvalid)
+ * - list (String|Array): rule name or list of rule names to disable.
+ * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
+ *
+ * The same as [[MarkdownIt.enable]], but turn specified rules off.
+ **/
+MarkdownIt.prototype.disable = function (list, ignoreInvalid) {
+ let result = [];
+ if (!Array.isArray(list)) {
+ list = [list];
+ }
+ ['core', 'block', 'inline'].forEach(function (chain) {
+ result = result.concat(this[chain].ruler.disable(list, true));
+ }, this);
+ result = result.concat(this.inline.ruler2.disable(list, true));
+ const missed = list.filter(function (name) {
+ return result.indexOf(name) < 0;
+ });
+ if (missed.length && !ignoreInvalid) {
+ throw new Error('MarkdownIt. Failed to disable unknown rule(s): ' + missed);
+ }
+ return this;
+};
+
+/** chainable
+ * MarkdownIt.use(plugin, params)
+ *
+ * Load specified plugin with given params into current parser instance.
+ * It's just a sugar to call `plugin(md, params)` with curring.
+ *
+ * ##### Example
+ *
+ * ```javascript
+ * var iterator = require('markdown-it-for-inline');
+ * var md = require('markdown-it')()
+ * .use(iterator, 'foo_replace', 'text', function (tokens, idx) {
+ * tokens[idx].content = tokens[idx].content.replace(/foo/g, 'bar');
+ * });
+ * ```
+ **/
+MarkdownIt.prototype.use = function (plugin /*, params, ... */) {
+ const args = [this].concat(Array.prototype.slice.call(arguments, 1));
+ plugin.apply(plugin, args);
+ return this;
+};
+
+/** internal
+ * MarkdownIt.parse(src, env) -> Array
+ * - src (String): source string
+ * - env (Object): environment sandbox
+ *
+ * Parse input string and return list of block tokens (special token type
+ * "inline" will contain list of inline tokens). You should not call this
+ * method directly, until you write custom renderer (for example, to produce
+ * AST).
+ *
+ * `env` is used to pass data between "distributed" rules and return additional
+ * metadata like reference info, needed for the renderer. It also can be used to
+ * inject data in specific cases. Usually, you will be ok to pass `{}`,
+ * and then pass updated object to renderer.
+ **/
+MarkdownIt.prototype.parse = function (src, env) {
+ if (typeof src !== 'string') {
+ throw new Error('Input data should be a String');
+ }
+ const state = new this.core.State(src, this, env);
+ this.core.process(state);
+ return state.tokens;
+};
+
+/**
+ * MarkdownIt.render(src [, env]) -> String
+ * - src (String): source string
+ * - env (Object): environment sandbox
+ *
+ * Render markdown string into html. It does all magic for you :).
+ *
+ * `env` can be used to inject additional metadata (`{}` by default).
+ * But you will not need it with high probability. See also comment
+ * in [[MarkdownIt.parse]].
+ **/
+MarkdownIt.prototype.render = function (src, env) {
+ env = env || {};
+ return this.renderer.render(this.parse(src, env), this.options, env);
+};
+
+/** internal
+ * MarkdownIt.parseInline(src, env) -> Array
+ * - src (String): source string
+ * - env (Object): environment sandbox
+ *
+ * The same as [[MarkdownIt.parse]] but skip all block rules. It returns the
+ * block tokens list with the single `inline` element, containing parsed inline
+ * tokens in `children` property. Also updates `env` object.
+ **/
+MarkdownIt.prototype.parseInline = function (src, env) {
+ const state = new this.core.State(src, this, env);
+ state.inlineMode = true;
+ this.core.process(state);
+ return state.tokens;
+};
+
+/**
+ * MarkdownIt.renderInline(src [, env]) -> String
+ * - src (String): source string
+ * - env (Object): environment sandbox
+ *
+ * Similar to [[MarkdownIt.render]] but for single paragraph content. Result
+ * will NOT be wrapped into `' + md.utils.escapeHtml(str) + '