Merge branch 'release/v6.0' into remove-restartdns

Signed-off-by: Adam Warner <me@adamwarner.co.uk>
This commit is contained in:
Adam Warner
2024-12-10 19:25:45 +00:00
committed by GitHub
14 changed files with 133 additions and 62 deletions

View File

@@ -16,7 +16,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout Repo - name: Checkout Repo
uses: actions/checkout@v4.2.0 uses: actions/checkout@v4.2.2
- name: Compress Images - name: Compress Images
uses: calibreapp/image-actions@1.1.0 # TODO: if they start using a tag like v1, switch to that uses: calibreapp/image-actions@1.1.0 # TODO: if they start using a tag like v1, switch to that

View File

@@ -17,19 +17,19 @@ jobs:
test: test:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4.2.0 - uses: actions/checkout@v4.2.2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v5.2.0 uses: actions/setup-python@v5.3.0
with: with:
python-version: "${{ env.PYTHON_VERSION }}" python-version: "${{ env.PYTHON_VERSION }}"
architecture: "x64" architecture: "x64"
cache: pip cache: pip
- name: Set up Node.js - name: Set up Node.js
uses: actions/setup-node@v4.0.4 uses: actions/setup-node@v4.1.0
with: with:
node-version: "${{ env.NODE }}" node-version: "${{ env.NODE }}"
cache: npm cache: npm

View File

@@ -10,7 +10,7 @@ jobs:
steps: steps:
- -
name: Checkout repository name: Checkout repository
uses: actions/checkout@v4.2.0 uses: actions/checkout@v4.2.2
- -
name: Spell-Checking name: Spell-Checking
uses: codespell-project/actions-codespell@master uses: codespell-project/actions-codespell@master

View File

@@ -9,6 +9,6 @@ jobs:
name: editorconfig-checker name: editorconfig-checker
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4.2.0 - uses: actions/checkout@v4.2.2
- uses: editorconfig-checker/action-editorconfig-checker@main - uses: editorconfig-checker/action-editorconfig-checker@main
- run: editorconfig-checker - run: editorconfig-checker

View File

@@ -1,10 +1,16 @@
# Debugging FTLDNS using `gdb` # Debugging FTLDNS using `gdb`
`gdb` is a powerful debugger that can be used to analyze and help fixing issues in software. It is a command-line tool that can be used to inspect the state of a running process, set breakpoints, and step through the code. Furthermore, it can be used to investigate crashes and give access to the state of the program (e.g. precise locations and variable contents) at the time of the crash.
This guide will show you how to use `gdb` to debug `pihole-FTL`.
Once you are used to it, you can skip most of the steps. Debugging *FTL*DNS is quite easy. `pihole-FTL` has been designed so that a debugger can be attached to an already running process. This will give you insights into how software (not limited to `pihole-FTL`) works. Once you are used to it, you can skip most of the steps. Debugging *FTL*DNS is quite easy. `pihole-FTL` has been designed so that a debugger can be attached to an already running process. This will give you insights into how software (not limited to `pihole-FTL`) works.
!!! note "Debugging parallelized applications"
There is one fundamental drawback when it comes to using `gdb` for debugging parallelized applications: `pihole-FTL` uses both threads and forks. When DNS queries are processed on dedicated (long-lived) TCP connections, `pihole-FTL` forks a new process to handle the query but `gdb` does not follow the forked process. This means that you will not be able to capture crashes that occur in forked processes. However, most crashes occur in the main process, so this limitation is not as severe as it might sound.
<!-- markdownlint-disable code-block-style --> <!-- markdownlint-disable code-block-style -->
!!! info "When running Pi-hole in a Docker container" !!! info "When running Pi-hole in a Docker container"
If you are running Pi-hole in a Docker container, you will to perform all the steps described here *inside* the Docker container. You can use, e.g., `docker exec -it <container_name> /bin/bash` to get a shell inside the container. If you are running Pi-hole in a Docker container, you will need to perform all the steps described here *inside* the Docker container. You can use, e.g., `docker exec -it <container_name> /bin/bash` to get a shell inside the container.
You will also need to (temporarily!) add the `SYS_PTRACE` capability. This is required to allow `gdb` to attach to the `pihole-FTL` process inside the container. You can do this by starting the container with the `--cap-add=SYS_PTRACE` option. Furthermore, you need to disable the Linux kernel's secure computing mode for the container. Add `--security-opt seccomp:unconfined` in order to allow `gdb` to disable address space randomization. You will also need to (temporarily!) add the `SYS_PTRACE` capability. This is required to allow `gdb` to attach to the `pihole-FTL` process inside the container. You can do this by starting the container with the `--cap-add=SYS_PTRACE` option. Furthermore, you need to disable the Linux kernel's secure computing mode for the container. Add `--security-opt seccomp:unconfined` in order to allow `gdb` to disable address space randomization.
@@ -38,7 +44,7 @@ Once you are used to it, you can skip most of the steps. Debugging *FTL*DNS is q
4. Configure `gdb` by installing a globally valid initialization file: 4. Configure `gdb` by installing a globally valid initialization file:
```bash ```bash
echo "handle SIGHUP nostop SIGPIPE nostop SIGTERM nostop SIG32 nostop SIG34 nostop SIG35 nostop SIG41 nostop" | sudo tee /root/.gdbinit echo "handle SIGHUP nostop SIGPIPE nostop SIGTERM nostop SIG32 nostop SIG33 nostop SIG34 nostop SIG35 nostop SIG36 nostop SIG37 nostop SIG38 nostop SIG39 nostop SIG40 nostop SIG41 nostop" | sudo tee /root/.gdbinit
``` ```
You can omit this step, however, you will have to remember to run the quoted line on *every start* of `gdb` in order to properly debug FTL. You can omit this step, however, you will have to remember to run the quoted line on *every start* of `gdb` in order to properly debug FTL.

View File

@@ -74,3 +74,11 @@ Re-resolve all clients and forward destination hostnames. This forces refreshing
## Real-time signal 5 (SIG39) ## Real-time signal 5 (SIG39)
Re-parse ARP/neighbour-cache now to update the Network table now Re-parse ARP/neighbour-cache now to update the Network table now
## Real-time signal 6 (SIG40)
Signal used internally to terminate the embedded `dnsmasq`. Please do not use this signal to prevent misbehaviour.
## Real-time signal 7 (SIG41)
Scan binary search lookup tables for hash collisions and report if any are found. This is a debugging signal and not meaningful production. Scanning the lookup tables is a time-consuming operation and may stall DNS resolution for a while on low-end devices.

View File

@@ -2,7 +2,7 @@
Occasionally, debugging may require us to run `pihole-FTL` in `valgrind`. We also use it to measure performance and check that our memory layout is optimal (= minimal footprint). Occasionally, debugging may require us to run `pihole-FTL` in `valgrind`. We also use it to measure performance and check that our memory layout is optimal (= minimal footprint).
`Valgrind` is a flexible program for debugging and profiling Linux executables. It consists of a core, which provides a synthetic CPU in software, and a series of debugging and profiling tools. `Valgrind` is a flexible program for debugging and profiling Linux executables. It consists of a core, which provides a synthetic CPU in software, and a series of debugging and profiling tools. The use of a synthetic CPU allows Valgrind to run the client program in a completely controlled environment and, hence, track the behaviour of the program in a very detailed way. Unfortunately, this also means that the program runs *much* slower than usual.
## `memcheck` ## `memcheck`
@@ -45,14 +45,14 @@ We suggest the following one-liner to run `pihole-FTL` in `memcheck`:
``` ```
sudo service pihole-FTL stop && sudo setcap -r /usr/bin/pihole-FTL sudo service pihole-FTL stop && sudo setcap -r /usr/bin/pihole-FTL
sudo valgrind --trace-children=yes --leak-check=full --track-origins=yes --log-file=valgrind.log -s /usr/bin/pihole-FTL sudo valgrind --trace-children=yes --leak-check=full --track-origins=yes --vgdb=full --log-file=valgrind.log -s /usr/bin/pihole-FTL
``` ```
If you compile FTL from source, use If you compile FTL from source, use
``` ```
sudo service pihole-FTL stop && sudo setcap -r /usr/bin/pihole-FTL sudo service pihole-FTL stop
./build.sh && sudo valgrind --trace-children=yes --leak-check=full --track-origins=yes --log-file=valgrind.log -s ./pihole-FTL ./build.sh && sudo valgrind --trace-children=yes --leak-check=full --track-origins=yes --vgdb=full --log-file=valgrind.log -s ./pihole-FTL
``` ```
The most useful information (about which memory is *possibly* and which is *definitely* lost) is written to `valgrind.log` at the end of the analysis. Terminate FTL by running: The most useful information (about which memory is *possibly* and which is *definitely* lost) is written to `valgrind.log` at the end of the analysis. Terminate FTL by running:
@@ -73,6 +73,51 @@ The used options are:
2. `leak-check=full` - When enabled, search for memory leaks when the client program finishes. Each individual leak will be shown in detail and/or counted as an error. 2. `leak-check=full` - When enabled, search for memory leaks when the client program finishes. Each individual leak will be shown in detail and/or counted as an error.
3. `track-origins=yes` - Memcheck tracks the origin of uninitialised values. By default, it does not, which means that although it can tell you that an uninitialised value is being used in a dangerous way, it cannot tell you where the uninitialised value came from. This often makes it difficult to track down the root problem. 3. `track-origins=yes` - Memcheck tracks the origin of uninitialised values. By default, it does not, which means that although it can tell you that an uninitialised value is being used in a dangerous way, it cannot tell you where the uninitialised value came from. This often makes it difficult to track down the root problem.
When set to `yes`, Memcheck keeps track of the origins of all uninitialised values. Then, when an uninitialised value error is reported, Memcheck will try to show the origin of the value. When set to `yes`, Memcheck keeps track of the origins of all uninitialised values. Then, when an uninitialised value error is reported, Memcheck will try to show the origin of the value.
4. `vgdb=full` - The Valgrind core provides a built-in gdbserver implementation. This is useful when you want to investigate a crash that is not easily reproducible and memory errors are suspected to be the cause. This gdbserver allows the process running on Valgrind's synthetic CPU to be debugged remotely. GDB sends protocol query packets (such as "get register contents") to the Valgrind embedded gdbserver. The gdbserver executes the queries (for example, it will get the register values of the synthetic CPU) and gives the results back to GDB.
<!-- markdownlint-disable code-block-style -->
!!! info "When running Pi-hole in a Docker container"
If you are running Pi-hole in a Docker container, you will need to perform all the steps described here *inside* the Docker container. As the Docker container is dependent on the `pihole-FTL` process, you need to modify your `Dockerfile` to spawn shell inside the container instead of starting the `pihole-FTL` process directly. We also add a few extra settings here, see [the `gdb` guide](gdb.md) for more information about this:
```yaml
services:
pihole:
# your other options ...
cap_add:
- # your other added capabilities ...
- SYS_PTRACE
security_opt:
- seccomp:unconfined
entrypoint: /bin/bash
tty: true
```
<!-- markdownlint-enable code-block-style -->
### Combining `valgrind` with `gdb`
You can also combine `valgrind` with `gdb` to get both the memory error detection and the ability to step through the code after a crash (or other unexpected behaviour).
1. Prepare `gdb` as described in [the `gdb` guide](gdb.md).
2. Start `pihole-FTL` in `valgrind` as described above. The `--vgdb=full` option tells `valgrind` to start a GDB server.
3. Once FTL has started, you can attach `gdb` to the running process using
``` bash
sudo gdb /usr/bin/pihole-FTL
```
and then at the `(gdb)` prompt,
``` plain
target remote | vgdb
```
to connect `gdb` to the "remote" `valgrind` process
4. Don't forget to enter `continue` to continue the execution of `pihole-FTL` in `valgrind`
5. At the time of a crash, you can step through the code, and investigate the state of the program as usual with `gdb`. Note that variables and functions may have different names than in the source code, as `valgrind` modifies the program with additional instrumentation code.
### False-positive memory issues ### False-positive memory issues

View File

@@ -9,12 +9,12 @@ It is worth noting, however, that the upstream DNS-Over-HTTPS provider will stil
## Configuring DNS-Over-HTTPS ## Configuring DNS-Over-HTTPS
Along with releasing their DNS service [1.1.1.1](https://blog.cloudflare.com/announcing-1111/), Cloudflare implemented DNS-Over-HTTPS proxy functionality into one of their tools: [`cloudflared`](https://github.com/cloudflare/cloudflared). Along with releasing their DNS service [1.1.1.1](https://blog.cloudflare.com/announcing-1111/) (and later [1.1.1.1 for Families](https://blog.cloudflare.com/introducing-1-1-1-1-for-families)) Cloudflare implemented DNS-Over-HTTPS proxy functionality into one of their tools: [`cloudflared`](https://github.com/cloudflare/cloudflared).
In the following sections, we will be covering how to install and configure this tool on `Pi-hole`. In the following sections, we will be covering how to install and configure this tool on `Pi-hole`.
!!! info !!! info
The `cloudflared` binary will work with other DoH providers (for example, you could use `https://8.8.8.8/dns-query` for Google's DNS-Over-HTTPS service). The `cloudflared` binary will also work with other DoH providers (for example, [Google's DoH service](https://developers.google.com/speed/public-dns/docs/doh) or [Quad9's DoH service](https://quad9.net/service/service-addresses-and-features)).
### Installing `cloudflared` ### Installing `cloudflared`
@@ -81,7 +81,18 @@ Edit configuration file by copying the following in to `/etc/default/cloudflared
```bash ```bash
# Commandline args for cloudflared, using Cloudflare DNS # Commandline args for cloudflared, using Cloudflare DNS
CLOUDFLARED_OPTS=--port 5053 --upstream https://1.1.1.1/dns-query --upstream https://1.0.0.1/dns-query CLOUDFLARED_OPTS=--port 5053 --upstream https://cloudflare-dns.com/dns-query
```
!!! info
See the other available [Cloudflare endpoints](https://developers.cloudflare.com/1.1.1.1/infrastructure/network-operators/#available-endpoints).
If you're running cloudflared on different host than pi-hole, you can add listening address to all IPs (for security, change 0.0.0.0 to your machine's IP, e.g. 192.168.1.1):
```bash
# Commandline args for cloudflared, using Cloudflare DNS
CLOUDFLARED_OPTS=--port 5053 --upstream https://1.1.1.1/dns-query --upstream https://1.0.0.1/dns-query --address 0.0.0.0
``` ```
Update the permissions for the configuration file and `cloudflared` binary to allow access for the cloudflared user: Update the permissions for the configuration file and `cloudflared` binary to allow access for the cloudflared user:
@@ -160,7 +171,7 @@ Finally, configure Pi-hole to use the local `cloudflared` service as the upstrea
### Updating `cloudflared` ### Updating `cloudflared`
The `cloudflared` tool will not receive updates through the package manager. However, you should keep the program update to date. You can either do this manually, or via a cron script. The `cloudflared` tool will not receive updates through the package manager. However, you should keep the program update to date. You can either do this manually (e.g. by watching their [repo](https://github.com/cloudflare/cloudflared) for new releases), or via a cron script.
The procedure for updating depends on how you configured the `cloudflared` binary. The procedure for updating depends on how you configured the `cloudflared` binary.
@@ -200,7 +211,7 @@ sudo chown root:root /etc/cron.weekly/cloudflared-updater
<!-- markdownlint-disable code-block-style --> <!-- markdownlint-disable code-block-style -->
!!! warning !!! warning
Make sure to add shebang `#!/bin/bash` in the beginning of `/etc/cron.weekly/cloudflared-updater`. Make sure to add shebang `#!/bin/bash` in the beginning of `/etc/cron.weekly/cloudflared-updater`.
Otherwise, the command would not executed. Otherwise, the command will not be executed.
<!-- markdownlint-enable code-block-style --> <!-- markdownlint-enable code-block-style -->
The system will now attempt to update the cloudflared binary automatically, once per week. The system will now attempt to update the cloudflared binary automatically, once per week.

View File

@@ -25,14 +25,14 @@ Pi-hole is supported on distributions utilizing [systemd](https://systemd.io/) o
The following operating systems are **officially** supported: The following operating systems are **officially** supported:
| Distribution | Release | Architecture | - Raspberry Pi OS (formerly Raspbian)
| ------------ | ---------------- | ------------------- | - Armbian OS
| Raspberry Pi OS <br>(formerly Raspbian) | Buster / Bullseye | ARM | - Ubuntu
| Armbian OS | Any | ARM / x86_64 / riscv64 | - Debian
| Ubuntu | 20.x / 22.x / 23.x / 24.x | ARM / x86_64 | - Fedora
| Debian | 10 / 11 / 12 | ARM / x86_64 / i386 | - CentOS Stream
| Fedora | 39 / 40 | ARM / x86_64 |
| CentOS Stream | 9 | x86_64 | Pi-hole only supports actively maintained versions of these systems.
<!-- markdownlint-disable code-block-style --> <!-- markdownlint-disable code-block-style -->
!!! info !!! info

View File

@@ -2,7 +2,7 @@
## Only match specific query types ## Only match specific query types
You can amend the regular expressions by special keywords added at the end to fine-tine regular expressions to match only specific [query types](../database/query-database.md#supported-query-types). In contrast to the description of `OTHER` as being deprecated for storing queries in the database, it is still supported for regular expressions and will match all queries that are not *explicitly* covered by the other query types (see also example below). You can amend the regular expressions by special keywords added at the end to fine-tune regular expressions to match only specific [query types](../database/query-database.md#supported-query-types). In contrast to the description of `OTHER` as being deprecated for storing queries in the database, it is still supported for regular expressions and will match all queries that are not *explicitly* covered by the other query types (see also example below).
Example: Example:

View File

@@ -147,7 +147,7 @@ nav:
- 'Cache dump': ftldns/cache_dump.md - 'Cache dump': ftldns/cache_dump.md
- 'Packet dump': ftldns/package_dump.md - 'Packet dump': ftldns/package_dump.md
- 'Debugging': - 'Debugging':
- 'gdb': ftldns/debugging.md - 'gdb': ftldns/gdb.md
- 'valgrind': ftldns/valgrind.md - 'valgrind': ftldns/valgrind.md
- 'Group Management': - 'Group Management':
- 'Group Management': group_management/index.md - 'Group Management': group_management/index.md
@@ -276,3 +276,4 @@ plugins:
'guides/misc/tor/overview.md': guides/misc/tor/index.md 'guides/misc/tor/overview.md': guides/misc/tor/index.md
'guides/github/contributing.md': guides/github/index.md 'guides/github/contributing.md': guides/github/index.md
'guides/misc/whitelist-blacklist.md': guides/misc/allowlist-denylist.md 'guides/misc/whitelist-blacklist.md': guides/misc/allowlist-denylist.md
'ftldns/debugging.md': ftldns/gdb.md

60
package-lock.json generated
View File

@@ -9,8 +9,8 @@
"version": "1.0.0", "version": "1.0.0",
"license": "CC-BY-SA-4.0", "license": "CC-BY-SA-4.0",
"devDependencies": { "devDependencies": {
"linkinator": "^6.1.1", "linkinator": "^6.1.2",
"markdownlint-cli2": "0.14.0" "markdownlint-cli2": "0.15.0"
} }
}, },
"node_modules/@isaacs/cliui": { "node_modules/@isaacs/cliui": {
@@ -574,9 +574,9 @@
} }
}, },
"node_modules/linkinator": { "node_modules/linkinator": {
"version": "6.1.1", "version": "6.1.2",
"resolved": "https://registry.npmjs.org/linkinator/-/linkinator-6.1.1.tgz", "resolved": "https://registry.npmjs.org/linkinator/-/linkinator-6.1.2.tgz",
"integrity": "sha512-VNFhw71A8ORQKdNdUz6MqcdmoCK2SKWI+22dmcN/7KuERTxv9yfezh5MqwetH66DmRPvj9FMtATk+ck2P5XJjw==", "integrity": "sha512-PndSrQe21Hf4sn2vZldEzJmD0EUJbIsEy4jcZLcHd6IZfQ6rC6iv+Fwo666TWM9DcXjbCrHpxnVX6xaGrcJ/eA==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"chalk": "^5.0.0", "chalk": "^5.0.0",
@@ -624,13 +624,13 @@
} }
}, },
"node_modules/markdownlint": { "node_modules/markdownlint": {
"version": "0.35.0", "version": "0.36.1",
"resolved": "https://registry.npmjs.org/markdownlint/-/markdownlint-0.35.0.tgz", "resolved": "https://registry.npmjs.org/markdownlint/-/markdownlint-0.36.1.tgz",
"integrity": "sha512-wgp8yesWjFBL7bycA3hxwHRdsZGJhjhyP1dSxKVKrza0EPFYtn+mHtkVy6dvP1kGSjovyG5B8yNP6Frj0UFUJg==", "integrity": "sha512-s73fU2CQN7WCgjhaQUQ8wYESQNzGRNOKDd+3xgVqu8kuTEhmwepd/mxOv1LR2oV046ONrTLBFsM7IoKWNvmy5g==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"markdown-it": "14.1.0", "markdown-it": "14.1.0",
"markdownlint-micromark": "0.1.10" "markdownlint-micromark": "0.1.12"
}, },
"engines": { "engines": {
"node": ">=18" "node": ">=18"
@@ -640,15 +640,15 @@
} }
}, },
"node_modules/markdownlint-cli2": { "node_modules/markdownlint-cli2": {
"version": "0.14.0", "version": "0.15.0",
"resolved": "https://registry.npmjs.org/markdownlint-cli2/-/markdownlint-cli2-0.14.0.tgz", "resolved": "https://registry.npmjs.org/markdownlint-cli2/-/markdownlint-cli2-0.15.0.tgz",
"integrity": "sha512-2cqdWy56frU2FTpbuGb83mEWWYuUIYv6xS8RVEoUAuKNw/hXPar2UYGpuzUhlFMngE8Omaz4RBH52MzfRbGshw==", "integrity": "sha512-4P/lnxQxU2R5lywRJs4b2ajm8z65CW8qqR1bTIcdQ5EG+nZpC6HJlJUnmIR5ee+uecUkoMroazxWcLB7etSmrg==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"globby": "14.0.2", "globby": "14.0.2",
"js-yaml": "4.1.0", "js-yaml": "4.1.0",
"jsonc-parser": "3.3.1", "jsonc-parser": "3.3.1",
"markdownlint": "0.35.0", "markdownlint": "0.36.1",
"markdownlint-cli2-formatter-default": "0.0.5", "markdownlint-cli2-formatter-default": "0.0.5",
"micromatch": "4.0.8" "micromatch": "4.0.8"
}, },
@@ -675,9 +675,9 @@
} }
}, },
"node_modules/markdownlint-micromark": { "node_modules/markdownlint-micromark": {
"version": "0.1.10", "version": "0.1.12",
"resolved": "https://registry.npmjs.org/markdownlint-micromark/-/markdownlint-micromark-0.1.10.tgz", "resolved": "https://registry.npmjs.org/markdownlint-micromark/-/markdownlint-micromark-0.1.12.tgz",
"integrity": "sha512-no5ZfdqAdWGxftCLlySHSgddEjyW4kui4z7amQcGsSKfYC5v/ou+8mIQVyg9KQMeEZLNtz9OPDTj7nnTnoR4FQ==", "integrity": "sha512-RlB6EwMGgc0sxcIhOQ2+aq7Zw1V2fBnzbXKGgYK/mVWdT7cz34fteKSwfYeo4rL6+L/q2tyC9QtD/PgZbkdyJQ==",
"dev": true, "dev": true,
"engines": { "engines": {
"node": ">=18" "node": ">=18"
@@ -1636,9 +1636,9 @@
} }
}, },
"linkinator": { "linkinator": {
"version": "6.1.1", "version": "6.1.2",
"resolved": "https://registry.npmjs.org/linkinator/-/linkinator-6.1.1.tgz", "resolved": "https://registry.npmjs.org/linkinator/-/linkinator-6.1.2.tgz",
"integrity": "sha512-VNFhw71A8ORQKdNdUz6MqcdmoCK2SKWI+22dmcN/7KuERTxv9yfezh5MqwetH66DmRPvj9FMtATk+ck2P5XJjw==", "integrity": "sha512-PndSrQe21Hf4sn2vZldEzJmD0EUJbIsEy4jcZLcHd6IZfQ6rC6iv+Fwo666TWM9DcXjbCrHpxnVX6xaGrcJ/eA==",
"dev": true, "dev": true,
"requires": { "requires": {
"chalk": "^5.0.0", "chalk": "^5.0.0",
@@ -1674,25 +1674,25 @@
} }
}, },
"markdownlint": { "markdownlint": {
"version": "0.35.0", "version": "0.36.1",
"resolved": "https://registry.npmjs.org/markdownlint/-/markdownlint-0.35.0.tgz", "resolved": "https://registry.npmjs.org/markdownlint/-/markdownlint-0.36.1.tgz",
"integrity": "sha512-wgp8yesWjFBL7bycA3hxwHRdsZGJhjhyP1dSxKVKrza0EPFYtn+mHtkVy6dvP1kGSjovyG5B8yNP6Frj0UFUJg==", "integrity": "sha512-s73fU2CQN7WCgjhaQUQ8wYESQNzGRNOKDd+3xgVqu8kuTEhmwepd/mxOv1LR2oV046ONrTLBFsM7IoKWNvmy5g==",
"dev": true, "dev": true,
"requires": { "requires": {
"markdown-it": "14.1.0", "markdown-it": "14.1.0",
"markdownlint-micromark": "0.1.10" "markdownlint-micromark": "0.1.12"
} }
}, },
"markdownlint-cli2": { "markdownlint-cli2": {
"version": "0.14.0", "version": "0.15.0",
"resolved": "https://registry.npmjs.org/markdownlint-cli2/-/markdownlint-cli2-0.14.0.tgz", "resolved": "https://registry.npmjs.org/markdownlint-cli2/-/markdownlint-cli2-0.15.0.tgz",
"integrity": "sha512-2cqdWy56frU2FTpbuGb83mEWWYuUIYv6xS8RVEoUAuKNw/hXPar2UYGpuzUhlFMngE8Omaz4RBH52MzfRbGshw==", "integrity": "sha512-4P/lnxQxU2R5lywRJs4b2ajm8z65CW8qqR1bTIcdQ5EG+nZpC6HJlJUnmIR5ee+uecUkoMroazxWcLB7etSmrg==",
"dev": true, "dev": true,
"requires": { "requires": {
"globby": "14.0.2", "globby": "14.0.2",
"js-yaml": "4.1.0", "js-yaml": "4.1.0",
"jsonc-parser": "3.3.1", "jsonc-parser": "3.3.1",
"markdownlint": "0.35.0", "markdownlint": "0.36.1",
"markdownlint-cli2-formatter-default": "0.0.5", "markdownlint-cli2-formatter-default": "0.0.5",
"micromatch": "4.0.8" "micromatch": "4.0.8"
} }
@@ -1705,9 +1705,9 @@
"requires": {} "requires": {}
}, },
"markdownlint-micromark": { "markdownlint-micromark": {
"version": "0.1.10", "version": "0.1.12",
"resolved": "https://registry.npmjs.org/markdownlint-micromark/-/markdownlint-micromark-0.1.10.tgz", "resolved": "https://registry.npmjs.org/markdownlint-micromark/-/markdownlint-micromark-0.1.12.tgz",
"integrity": "sha512-no5ZfdqAdWGxftCLlySHSgddEjyW4kui4z7amQcGsSKfYC5v/ou+8mIQVyg9KQMeEZLNtz9OPDTj7nnTnoR4FQ==", "integrity": "sha512-RlB6EwMGgc0sxcIhOQ2+aq7Zw1V2fBnzbXKGgYK/mVWdT7cz34fteKSwfYeo4rL6+L/q2tyC9QtD/PgZbkdyJQ==",
"dev": true "dev": true
}, },
"marked": { "marked": {

View File

@@ -23,7 +23,7 @@
"serve": "mkdocs serve --dev-addr 0.0.0.0:8000" "serve": "mkdocs serve --dev-addr 0.0.0.0:8000"
}, },
"devDependencies": { "devDependencies": {
"linkinator": "^6.1.1", "linkinator": "^6.1.2",
"markdownlint-cli2": "0.14.0" "markdownlint-cli2": "0.15.0"
} }
} }

View File

@@ -1,5 +1,5 @@
mkdocs==1.6.1 mkdocs==1.6.1
mkdocs-git-revision-date-localized-plugin==1.2.9 mkdocs-git-revision-date-localized-plugin==1.3.0
mkdocs-material==9.5.38 mkdocs-material==9.5.47
mkdocs-redirects==1.2.1 mkdocs-redirects==1.2.2
markdown-include==0.8.1 markdown-include==0.8.1