1
0
mirror of https://github.com/home-assistant/supervisor.git synced 2026-05-08 08:58:31 +01:00

Migrate (almost) all docker container interactions to aiodocker (#6489)

* Migrate all docker container interactions to aiodocker

* Remove containers_legacy since its no longer used

* Add back remove color logic

* Revert accidental invert of conditional in setup_network

* Fix typos found by copilot

* Apply suggestions from code review

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Revert "Apply suggestions from code review"

This reverts commit 0a475433ea.

---------

Co-authored-by: Stefan Agner <stefan@agner.ch>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
Mike Degatano
2026-01-27 06:42:17 -05:00
committed by GitHub
parent 515114fa69
commit 909a2dda2f
40 changed files with 1481 additions and 1510 deletions
-7
View File
@@ -1176,13 +1176,6 @@ class Addon(AddonModel):
await self.stop()
return await self.start()
def logs(self) -> Awaitable[bytes]:
"""Return add-ons log output.
Return a coroutine.
"""
return self.instance.logs()
def is_running(self) -> Awaitable[bool]:
"""Return True if Docker container is running.
+2 -2
View File
@@ -6,7 +6,7 @@ import base64
from functools import cached_property
import json
import logging
from pathlib import Path
from pathlib import Path, PurePath
from typing import TYPE_CHECKING, Any
from awesomeversion import AwesomeVersion
@@ -250,7 +250,7 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
return {
"command": build_cmd,
"volumes": volumes,
"working_dir": "/addon",
"working_dir": PurePath("/addon"),
}
def _fix_label(self, label_name: str) -> str:
+1 -1
View File
@@ -434,7 +434,7 @@ class Core(CoreSysAttributes):
async def repair(self) -> None:
"""Repair system integrity."""
_LOGGER.info("Starting repair of Supervisor Environment")
await self.sys_run_in_executor(self.sys_docker.repair)
await self.sys_docker.repair()
# Fix plugins
await self.sys_plugins.repair()
+86 -89
View File
@@ -2,22 +2,17 @@
from __future__ import annotations
from collections.abc import Awaitable
from contextlib import suppress
from http import HTTPStatus
from ipaddress import IPv4Address
import logging
import os
from pathlib import Path
from socket import SocketIO
import tempfile
from typing import TYPE_CHECKING, Literal, cast
from typing import TYPE_CHECKING, Any, Literal, cast
import aiodocker
from attr import evolve
from awesomeversion import AwesomeVersion
import docker
import docker.errors
import requests
from ..addons.build import AddonBuild
from ..addons.const import MappingType
@@ -690,34 +685,43 @@ class DockerAddon(DockerInterface):
await build_env.is_valid()
_LOGGER.info("Starting build for %s:%s", self.image, version)
if build_env.squash:
_LOGGER.warning(
"Ignoring squash build option for %s as Docker BuildKit does not support it.",
self.addon.slug,
)
def build_image() -> tuple[str, str]:
if build_env.squash:
_LOGGER.warning(
"Ignoring squash build option for %s as Docker BuildKit does not support it.",
self.addon.slug,
)
addon_image_tag = f"{image or self.addon.image}:{version!s}"
addon_image_tag = f"{image or self.addon.image}:{version!s}"
docker_version = self.sys_docker.info.version
builder_version_tag = (
f"{docker_version.major}.{docker_version.minor}.{docker_version.micro}-cli"
)
docker_version = self.sys_docker.info.version
builder_version_tag = f"{docker_version.major}.{docker_version.minor}.{docker_version.micro}-cli"
builder_name = f"addon_builder_{self.addon.slug}"
builder_name = f"addon_builder_{self.addon.slug}"
# Remove dangling builder container if it exists by any chance
# E.g. because of an abrupt host shutdown/reboot during a build
try:
container = await self.sys_docker.containers.get(builder_name)
await container.delete(force=True, v=True)
except aiodocker.DockerError as err:
if err.status != HTTPStatus.NOT_FOUND:
raise DockerBuildError(
f"Can't clean up existing builder container: {err!s}", _LOGGER.error
) from err
# Remove dangling builder container if it exists by any chance
# E.g. because of an abrupt host shutdown/reboot during a build
with suppress(docker.errors.NotFound):
self.sys_docker.containers_legacy.get(builder_name).remove(
force=True, v=True
)
# Generate Docker config with registry credentials for base image if needed
docker_config_content = build_env.get_docker_config_json()
temp_dir: tempfile.TemporaryDirectory | None = None
# Generate Docker config with registry credentials for base image if needed
docker_config_path: Path | None = None
docker_config_content = build_env.get_docker_config_json()
temp_dir: tempfile.TemporaryDirectory | None = None
try:
try:
def pre_build_setup() -> tuple[
tempfile.TemporaryDirectory | None, dict[str, Any]
]:
docker_config_path: Path | None = None
temp_dir: tempfile.TemporaryDirectory | None = None
if docker_config_content:
# Create temporary directory for docker config
temp_dir = tempfile.TemporaryDirectory(
@@ -732,43 +736,46 @@ class DockerAddon(DockerInterface):
docker_config_path,
)
result = self.sys_docker.run_command(
ADDON_BUILDER_IMAGE,
version=builder_version_tag,
name=builder_name,
**build_env.get_docker_args(
return (
temp_dir,
build_env.get_docker_args(
version, addon_image_tag, docker_config_path
),
)
finally:
# Clean up temporary directory
if temp_dir:
temp_dir.cleanup()
logs = result.output.decode("utf-8")
temp_dir, build_args = await self.sys_run_in_executor(pre_build_setup)
if result.exit_code != 0:
error_message = f"Docker build failed for {addon_image_tag} (exit code {result.exit_code}). Build output:\n{logs}"
raise docker.errors.DockerException(error_message)
return addon_image_tag, logs
try:
addon_image_tag, log = await self.sys_run_in_executor(build_image)
_LOGGER.debug("Build %s:%s done: %s", self.image, version, log)
# Update meta data
self._meta = await self.sys_docker.images.inspect(addon_image_tag)
except (
docker.errors.DockerException,
requests.RequestException,
aiodocker.DockerError,
) as err:
result = await self.sys_docker.run_command(
ADDON_BUILDER_IMAGE,
tag=builder_version_tag,
name=builder_name,
**build_args,
)
except DockerError as err:
raise DockerBuildError(
f"Can't build {self.image}:{version}: {err!s}", _LOGGER.error
) from err
finally:
# Clean up temporary directory
if temp_dir:
await self.sys_run_in_executor(temp_dir.cleanup)
logs = "\n".join(result.log)
if result.exit_code != 0:
raise DockerBuildError(
f"Docker build failed for {addon_image_tag} (exit code {result.exit_code}). Build output:\n{logs}",
_LOGGER.error,
)
_LOGGER.debug("Build %s:%s done: %s", self.image, version, logs)
try:
# Update meta data
self._meta = await self.sys_docker.images.inspect(addon_image_tag)
except aiodocker.DockerError as err:
raise DockerBuildError(
f"Can't get image metadata for {addon_image_tag} after build: {err!s}"
) from err
_LOGGER.info("Build %s:%s done", self.image, version)
@@ -826,34 +833,26 @@ class DockerAddon(DockerInterface):
on_condition=DockerJobError,
concurrency=JobConcurrency.GROUP_REJECT,
)
def write_stdin(self, data: bytes) -> Awaitable[None]:
async def write_stdin(self, data: bytes) -> None:
"""Write to add-on stdin."""
return self.sys_run_in_executor(self._write_stdin, data)
def _write_stdin(self, data: bytes) -> None:
"""Write to add-on stdin.
Need run inside executor.
"""
try:
# Load needed docker objects
container = self.sys_docker.containers_legacy.get(self.name)
# attach_socket returns SocketIO for local Docker connections (Unix socket)
socket = cast(
SocketIO, container.attach_socket(params={"stdin": 1, "stream": 1})
)
except (docker.errors.DockerException, requests.RequestException) as err:
_LOGGER.error("Can't attach to %s stdin: %s", self.name, err)
raise DockerError() from err
container = await self.sys_docker.containers.get(self.name)
socket = container.attach(stdin=True)
except aiodocker.DockerError as err:
raise DockerError(
f"Can't attach to {self.name} stdin: {err!s}", _LOGGER.error
) from err
try:
# Write to stdin
data += b"\n"
os.write(socket.fileno(), data)
socket.close()
except OSError as err:
_LOGGER.error("Can't write to %s stdin: %s", self.name, err)
raise DockerError() from err
await socket.write_in(data + b"\n")
await socket.close()
# Seems to raise very generic exceptions like RuntimeError or AssertionError
# So we catch all exceptions and re-raise them as DockerError
except Exception as err:
raise DockerError(
f"Can't write to {self.name} stdin: {err!s}", _LOGGER.error
) from err
@Job(
name="docker_addon_stop",
@@ -899,15 +898,13 @@ class DockerAddon(DockerInterface):
return
try:
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers_legacy.get, self.name
)
except docker.errors.NotFound:
if self._hw_listener:
self.sys_bus.remove_listener(self._hw_listener)
self._hw_listener = None
return
except (docker.errors.DockerException, requests.RequestException) as err:
docker_container = await self.sys_docker.containers.get(self.name)
except aiodocker.DockerError as err:
if err.status == HTTPStatus.NOT_FOUND:
if self._hw_listener:
self.sys_bus.remove_listener(self._hw_listener)
self._hw_listener = None
return
raise DockerError(
f"Can't process Hardware Event on {self.name}: {err!s}", _LOGGER.error
) from err
+3 -4
View File
@@ -210,12 +210,11 @@ class DockerHomeAssistant(DockerInterface):
on_condition=DockerJobError,
concurrency=JobConcurrency.GROUP_REJECT,
)
async def execute_command(self, command: str) -> CommandReturn:
async def execute_command(self, command: list[str]) -> CommandReturn:
"""Create a temporary container and run command."""
return await self.sys_run_in_executor(
self.sys_docker.run_command,
return await self.sys_docker.run_command(
self.image,
version=self.sys_homeassistant.version,
tag=str(self.sys_homeassistant.version),
command=command,
privileged=True,
init=True,
+36 -73
View File
@@ -15,8 +15,6 @@ from uuid import uuid4
import aiodocker
from awesomeversion import AwesomeVersion
from awesomeversion.strategy import AwesomeVersionStrategy
import docker
from docker.models.containers import Container
import requests
from ..bus import EventListener
@@ -52,7 +50,7 @@ from .const import (
PullImageLayerStage,
RestartPolicy,
)
from .manager import CommandReturn, PullLogEntry
from .manager import CommandReturn, ExecReturn, PullLogEntry
from .monitor import DockerContainerStateEvent
from .stats import DockerStats
@@ -67,18 +65,21 @@ MAP_ARCH: dict[CpuArch, str] = {
}
def _container_state_from_model(docker_container: Container) -> ContainerState:
def _container_state_from_model(container_metadata: dict[str, Any]) -> ContainerState:
"""Get container state from model."""
if docker_container.status == "running":
if "Health" in docker_container.attrs["State"]:
if "State" not in container_metadata:
return ContainerState.UNKNOWN
if container_metadata["State"]["Status"] == "running":
if "Health" in container_metadata["State"]:
return (
ContainerState.HEALTHY
if docker_container.attrs["State"]["Health"]["Status"] == "healthy"
if container_metadata["State"]["Health"]["Status"] == "healthy"
else ContainerState.UNHEALTHY
)
return ContainerState.RUNNING
if docker_container.attrs["State"]["ExitCode"] > 0:
if container_metadata["State"]["ExitCode"] > 0:
return ContainerState.FAILED
return ContainerState.STOPPED
@@ -412,18 +413,6 @@ class DockerInterface(JobGroup, ABC):
await self.sys_docker.images.tag(
docker_image["Id"], image, tag="latest"
)
except docker.errors.APIError as err:
if err.status_code == HTTPStatus.TOO_MANY_REQUESTS:
self.sys_resolution.create_issue(
IssueType.DOCKER_RATELIMIT,
ContextType.SYSTEM,
suggestions=[SuggestionType.REGISTRY_LOGIN],
)
raise DockerHubRateLimitExceeded(_LOGGER.error) from err
await async_capture_exception(err)
raise DockerError(
f"Can't install {image}:{version!s}: {err}", _LOGGER.error
) from err
except aiodocker.DockerError as err:
if err.status == HTTPStatus.TOO_MANY_REQUESTS:
self.sys_resolution.create_issue(
@@ -436,14 +425,6 @@ class DockerInterface(JobGroup, ABC):
raise DockerError(
f"Can't install {image}:{version!s}: {err}", _LOGGER.error
) from err
except (
docker.errors.DockerException,
requests.RequestException,
) as err:
await async_capture_exception(err)
raise DockerError(
f"Unknown error with {image}:{version!s} -> {err!s}", _LOGGER.error
) from err
finally:
if listener:
self.sys_bus.remove_listener(listener)
@@ -457,15 +438,14 @@ class DockerInterface(JobGroup, ABC):
return True
return False
async def _get_container(self) -> Container | None:
async def _get_container(self) -> dict[str, Any] | None:
"""Get docker container, returns None if not found."""
try:
return await self.sys_run_in_executor(
self.sys_docker.containers_legacy.get, self.name
)
except docker.errors.NotFound:
return None
except docker.errors.DockerException as err:
container = await self.sys_docker.containers.get(self.name)
return await container.show()
except aiodocker.DockerError as err:
if err.status == HTTPStatus.NOT_FOUND:
return None
raise DockerAPIError(
f"Docker API error occurred while getting container information: {err!s}"
) from err
@@ -476,14 +456,16 @@ class DockerInterface(JobGroup, ABC):
async def is_running(self) -> bool:
"""Return True if Docker is running."""
if docker_container := await self._get_container():
return docker_container.status == "running"
return False
return bool(
(container_metadata := await self._get_container())
and "State" in container_metadata
and container_metadata["State"]["Running"]
)
async def current_state(self) -> ContainerState:
"""Return current state of container."""
if docker_container := await self._get_container():
return _container_state_from_model(docker_container)
if container_metadata := await self._get_container():
return _container_state_from_model(container_metadata)
return ContainerState.UNKNOWN
@Job(name="docker_interface_attach", concurrency=JobConcurrency.GROUP_QUEUE)
@@ -491,14 +473,12 @@ class DockerInterface(JobGroup, ABC):
self, version: AwesomeVersion, *, skip_state_event_if_down: bool = False
) -> None:
"""Attach to running Docker container."""
with suppress(docker.errors.DockerException, requests.RequestException):
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers_legacy.get, self.name
)
self._meta = docker_container.attrs
self.sys_docker.monitor.watch_container(docker_container)
with suppress(aiodocker.DockerError, requests.RequestException):
docker_container = await self.sys_docker.containers.get(self.name)
self._meta = await docker_container.show()
self.sys_docker.monitor.watch_container(self._meta)
state = _container_state_from_model(docker_container)
state = _container_state_from_model(self._meta)
if not (
skip_state_event_if_down
and state in [ContainerState.STOPPED, ContainerState.FAILED]
@@ -507,7 +487,7 @@ class DockerInterface(JobGroup, ABC):
self.sys_bus.fire_event(
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
DockerContainerStateEvent(
self.name, state, cast(str, docker_container.id), int(time())
self.name, state, docker_container.id, int(time())
),
)
@@ -563,11 +543,8 @@ class DockerInterface(JobGroup, ABC):
async def stop(self, remove_container: bool = True) -> None:
"""Stop/remove Docker container."""
with suppress(DockerNotFound):
await self.sys_run_in_executor(
self.sys_docker.stop_container,
self.name,
self.timeout,
remove_container,
await self.sys_docker.stop_container(
self.name, self.timeout, remove_container
)
@Job(
@@ -577,7 +554,7 @@ class DockerInterface(JobGroup, ABC):
)
def start(self) -> Awaitable[None]:
"""Start Docker container."""
return self.sys_run_in_executor(self.sys_docker.start_container, self.name)
return self.sys_docker.start_container(self.name)
@Job(
name="docker_interface_remove",
@@ -703,9 +680,7 @@ class DockerInterface(JobGroup, ABC):
)
def restart(self) -> Awaitable[None]:
"""Restart docker container."""
return self.sys_run_in_executor(
self.sys_docker.restart_container, self.name, self.timeout
)
return self.sys_docker.restart_container(self.name, self.timeout)
@Job(
name="docker_interface_execute_command",
@@ -718,22 +693,12 @@ class DockerInterface(JobGroup, ABC):
async def stats(self) -> DockerStats:
"""Read and return stats from container."""
stats = await self.sys_run_in_executor(
self.sys_docker.container_stats, self.name
)
stats = await self.sys_docker.container_stats(self.name)
return DockerStats(stats)
async def is_failed(self) -> bool:
"""Return True if Docker is failing state."""
if not (docker_container := await self._get_container()):
return False
# container is not running
if docker_container.status != "exited":
return False
# Check return value
return int(docker_container.attrs["State"]["ExitCode"]) != 0
return await self.current_state() == ContainerState.FAILED
async def get_latest_version(self) -> AwesomeVersion:
"""Return latest version of local image."""
@@ -771,8 +736,6 @@ class DockerInterface(JobGroup, ABC):
on_condition=DockerJobError,
concurrency=JobConcurrency.GROUP_REJECT,
)
def run_inside(self, command: str) -> Awaitable[CommandReturn]:
def run_inside(self, command: str) -> Awaitable[ExecReturn]:
"""Execute a command inside Docker container."""
return self.sys_run_in_executor(
self.sys_docker.container_run_inside, self.name, command
)
return self.sys_docker.container_run_inside(self.name, command)
+260 -222
View File
@@ -11,12 +11,12 @@ from ipaddress import IPv4Address
import json
import logging
import os
from pathlib import Path
from pathlib import Path, PurePath
import re
from typing import Any, Final, Literal, Self, cast
import aiodocker
from aiodocker.containers import DockerContainers
from aiodocker.containers import DockerContainer, DockerContainers
from aiodocker.images import DockerImages
from aiodocker.types import JSONObject
from aiohttp import ClientSession, ClientTimeout, UnixConnector
@@ -25,9 +25,8 @@ from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
from docker import errors as docker_errors
from docker.api.client import APIClient
from docker.client import DockerClient
from docker.models.containers import Container, ContainerCollection
from docker.models.containers import Container
from docker.models.networks import Network
from docker.types import Mount
from docker.types.daemon import CancellableStream
import requests
@@ -73,12 +72,20 @@ DOCKER_NETWORK_HOST: Final = "host"
RE_IMPORT_IMAGE_STREAM = re.compile(r"(^Loaded image ID: |^Loaded image: )(.+)$")
@attr.s(frozen=True)
@dataclass(slots=True, frozen=True)
class ExecReturn:
"""Return object from exec run."""
exit_code: int
output: bytes
@dataclass(slots=True, frozen=True)
class CommandReturn:
"""Return object from command run."""
exit_code: int = attr.ib()
output: bytes = attr.ib()
exit_code: int
log: list[str]
@attr.s(frozen=True)
@@ -313,11 +320,6 @@ class DockerAPI(CoreSysAttributes):
"""Return API containers."""
return self.docker.containers
@property
def containers_legacy(self) -> ContainerCollection:
"""Return API containers from Dockerpy."""
return self.dockerpy.containers
@property
def api(self) -> APIClient:
"""Return API containers."""
@@ -364,7 +366,7 @@ class DockerAPI(CoreSysAttributes):
mounts: list[DockerMount] | None = None,
ports: dict[str, str | int | None] | None = None,
oom_score_adj: int | None = None,
network_mode: Literal["host"] | None = None,
network_mode: str | None = None,
privileged: bool = False,
device_cgroup_rules: list[str] | None = None,
tmpfs: dict[str, str] | None = None,
@@ -375,6 +377,9 @@ class DockerAPI(CoreSysAttributes):
stdin_open: bool = False,
pid_mode: str | None = None,
uts_mode: str | None = None,
command: list[str] | None = None,
networking_config: dict[str, Any] | None = None,
working_dir: PurePath | None = None,
) -> JSONObject:
"""Map kwargs to create container config.
@@ -431,8 +436,14 @@ class DockerAPI(CoreSysAttributes):
env if val is None else f"{env}={val}"
for env, val in environment.items()
]
if entrypoint:
if entrypoint is not None:
config["Entrypoint"] = entrypoint
if command:
config["Cmd"] = command
if networking_config:
config["NetworkingConfig"] = networking_config
if working_dir:
config["WorkingDir"] = working_dir.as_posix()
# Set up networking
if dns:
@@ -459,53 +470,59 @@ class DockerAPI(CoreSysAttributes):
return config
async def run(
async def _run(
self,
image: str,
*,
name: str,
name: str | None = None,
tag: str = "latest",
hostname: str | None = None,
mounts: list[DockerMount] | None = None,
network_mode: Literal["host"] | None = None,
network_mode: str | None = None,
networking_config: dict[str, Any] | None = None,
ipv4: IPv4Address | None = None,
skip_cidfile: bool = False,
**kwargs,
) -> dict[str, Any]:
) -> DockerContainer:
"""Create a Docker container and run it."""
if not image or not name:
raise ValueError("image, name and tag cannot be an empty string!")
if not image or not tag:
raise ValueError("image and tag cannot be an empty string!")
# Setup cidfile and bind mount it
cidfile_path = self.coresys.config.path_cid_files / f"{name}.cid"
cidfile_path: Path | None = None
if name and not skip_cidfile:
# Setup cidfile and bind mount it
cidfile_path = self.coresys.config.path_cid_files / f"{name}.cid"
def create_cidfile() -> None:
# Remove the file/directory if it exists e.g. as a leftover from unclean shutdown
# Note: Can be a directory if Docker auto-started container with restart policy
# before Supervisor could write the CID file
with suppress(OSError):
if cidfile_path.is_dir():
cidfile_path.rmdir()
elif cidfile_path.is_file():
cidfile_path.unlink(missing_ok=True)
def create_cidfile() -> None:
# Remove the file/directory if it exists e.g. as a leftover from unclean shutdown
# Note: Can be a directory if Docker auto-started container with restart policy
# before Supervisor could write the CID file
with suppress(OSError):
if cidfile_path.is_dir():
cidfile_path.rmdir()
elif cidfile_path.is_file():
cidfile_path.unlink(missing_ok=True)
# Create empty CID file before adding it to volumes to prevent Docker
# from creating it as a directory if container auto-starts
cidfile_path.touch()
# Create empty CID file before adding it to volumes to prevent Docker
# from creating it as a directory if container auto-starts
cidfile_path.touch()
await self.sys_run_in_executor(create_cidfile)
await self.sys_run_in_executor(create_cidfile)
# Bind mount to /run/cid in container
extern_cidfile_path = self.coresys.config.path_extern_cid_files / f"{name}.cid"
cid_mount = DockerMount(
type=MountType.BIND,
source=extern_cidfile_path.as_posix(),
target="/run/cid",
read_only=True,
)
if mounts is None:
mounts = [cid_mount]
else:
mounts = [*mounts, cid_mount]
# Bind mount to /run/cid in container
extern_cidfile_path = (
self.coresys.config.path_extern_cid_files / f"{name}.cid"
)
cid_mount = DockerMount(
type=MountType.BIND,
source=extern_cidfile_path.as_posix(),
target="/run/cid",
read_only=True,
)
if mounts is None:
mounts = [cid_mount]
else:
mounts = [*mounts, cid_mount]
# Create container
config = self._create_container_config(
@@ -521,22 +538,23 @@ class DockerAPI(CoreSysAttributes):
except aiodocker.DockerError as err:
if err.status == HTTPStatus.NOT_FOUND:
raise DockerNotFound(
f"Image {image}:{tag} does not exist for {name}", _LOGGER.error
f"Image {image}:{tag} does not exist", _LOGGER.error
) from err
raise DockerAPIError(
f"Can't create container from {name}: {err}", _LOGGER.error
) from err
except requests.RequestException as err:
raise DockerRequestError(
f"Dockerd connection issue for {name}: {err}", _LOGGER.error
f"Can't create container from {image}:{tag}: {err}", _LOGGER.error
) from err
# Setup network and store container id in cidfile
def setup_network_and_cidfile() -> None:
# Store container id in cidfile
def setup_cidfile(cidfile_path: Path) -> None:
# Write cidfile
with cidfile_path.open("w", encoding="ascii") as cidfile:
cidfile.write(str(container.id))
if cidfile_path:
await self.sys_run_in_executor(setup_cidfile, cidfile_path)
# Setup network
def setup_network(network_mode: Literal["host"] | None) -> None:
# Attach network
if not network_mode:
alias = [hostname] if hostname else None
@@ -545,7 +563,9 @@ class DockerAPI(CoreSysAttributes):
container.id, name, alias=alias, ipv4=ipv4
)
except DockerError:
_LOGGER.warning("Can't attach %s to hassio-network!", name)
_LOGGER.warning(
"Can't attach %s to hassio-network!", name or container.id
)
else:
with suppress(DockerError):
self.network.detach_default_bridge(container.id, name)
@@ -561,18 +581,25 @@ class DockerAPI(CoreSysAttributes):
with suppress(docker_errors.NotFound):
host_network.disconnect(name, force=True)
await self.sys_run_in_executor(setup_network_and_cidfile)
if not networking_config and network_mode in ("host", None):
await self.sys_run_in_executor(setup_network, network_mode)
# Run container
try:
await container.start()
except aiodocker.DockerError as err:
raise DockerAPIError(f"Can't start {name}: {err}", _LOGGER.error) from err
except requests.RequestException as err:
raise DockerRequestError(
f"Dockerd connection issue for {name}: {err}", _LOGGER.error
raise DockerAPIError(
f"Can't start {name or container.id}: {err}", _LOGGER.error
) from err
return container
async def run(
self, image: str, *, name: str, tag: str = "latest", **kwargs
) -> dict[str, Any]:
"""Create and run a container from provided config, returning its inspect metadata."""
container = await self._run(image, name=name, tag=tag, **kwargs)
# Get container metadata after the container is started
try:
container_attrs = await container.show()
@@ -587,6 +614,45 @@ class DockerAPI(CoreSysAttributes):
return container_attrs
async def run_command(
self,
image: str,
command: list[str],
tag: str = "latest",
stdout: bool = True,
stderr: bool = True,
**kwargs: Any,
) -> CommandReturn:
"""Create a temporary container and run command, returning its output."""
_LOGGER.info("Running command '%s' on %s:%s", command, image, tag)
container: DockerContainer | None = None
try:
container = await self._run(
image,
tag=tag,
command=command,
detach=True,
network_mode=self.network.name,
networking_config={self.network.name: None},
skip_cidfile=True,
**kwargs,
)
# wait until command is done
result = await container.wait()
log = await container.log(stdout=stdout, stderr=stderr, follow=False)
except (DockerError, aiodocker.DockerError) as err:
raise DockerError(f"Can't execute command: {err}", _LOGGER.error) from err
finally:
# cleanup container
if container:
with suppress(aiodocker.DockerError):
await container.delete(force=True, v=True)
return CommandReturn(result["StatusCode"], log)
async def pull_image(
self,
job_id: str,
@@ -617,140 +683,103 @@ class DockerAPI(CoreSysAttributes):
sep = "@" if tag.startswith("sha256:") else ":"
return await self.images.inspect(f"{repository}{sep}{tag}")
def run_command(
self,
image: str,
version: str = "latest",
command: str | list[str] | None = None,
*,
mounts: list[DockerMount] | None = None,
**kwargs: Any,
) -> CommandReturn:
"""Create a temporary container and run command.
Need run inside executor.
"""
stdout = kwargs.get("stdout", True)
stderr = kwargs.get("stderr", True)
image_with_tag = f"{image}:{version}"
_LOGGER.info("Running command '%s' on %s", command, image_with_tag)
container = None
try:
container = self.dockerpy.containers.run(
image_with_tag,
command=command,
detach=True,
network=self.network.name,
use_config_proxy=False,
mounts=(
[cast(Mount, mount.to_dict()) for mount in mounts]
if mounts
else None
),
**kwargs,
)
# wait until command is done
result = container.wait()
output = container.logs(stdout=stdout, stderr=stderr)
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(f"Can't execute command: {err}", _LOGGER.error) from err
finally:
# cleanup container
if container:
with suppress(docker_errors.DockerException, requests.RequestException):
container.remove(force=True, v=True)
return CommandReturn(result["StatusCode"], output)
def repair(self) -> None:
async def repair(self) -> None:
"""Repair local docker overlayfs2 issues."""
_LOGGER.info("Prune stale containers")
try:
output = self.dockerpy.api.prune_containers()
_LOGGER.debug("Containers prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for containers prune: %s", err)
_LOGGER.info("Prune stale images")
try:
output = self.dockerpy.api.prune_images(filters={"dangling": False})
_LOGGER.debug("Images prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for images prune: %s", err)
def repair_docker_blocking():
_LOGGER.info("Prune stale containers")
try:
output = self.dockerpy.api.prune_containers()
_LOGGER.debug("Containers prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for containers prune: %s", err)
_LOGGER.info("Prune stale builds")
try:
output = self.dockerpy.api.prune_builds()
_LOGGER.debug("Builds prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for builds prune: %s", err)
_LOGGER.info("Prune stale images")
try:
output = self.dockerpy.api.prune_images(filters={"dangling": False})
_LOGGER.debug("Images prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for images prune: %s", err)
_LOGGER.info("Prune stale volumes")
try:
output = self.dockerpy.api.prune_volumes()
_LOGGER.debug("Volumes prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for volumes prune: %s", err)
_LOGGER.info("Prune stale builds")
try:
output = self.dockerpy.api.prune_builds()
_LOGGER.debug("Builds prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for builds prune: %s", err)
_LOGGER.info("Prune stale networks")
try:
output = self.dockerpy.api.prune_networks()
_LOGGER.debug("Networks prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for networks prune: %s", err)
_LOGGER.info("Prune stale volumes")
try:
output = self.dockerpy.api.prune_volumes()
_LOGGER.debug("Volumes prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for volumes prune: %s", err)
_LOGGER.info("Prune stale networks")
try:
output = self.dockerpy.api.prune_networks()
_LOGGER.debug("Networks prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for networks prune: %s", err)
await self.sys_run_in_executor(repair_docker_blocking)
_LOGGER.info("Fix stale container on hassio network")
try:
self.prune_networks(DOCKER_NETWORK)
await self.prune_networks(DOCKER_NETWORK)
except docker_errors.APIError as err:
_LOGGER.warning("Error for networks hassio prune: %s", err)
_LOGGER.info("Fix stale container on host network")
try:
self.prune_networks(DOCKER_NETWORK_HOST)
await self.prune_networks(DOCKER_NETWORK_HOST)
except docker_errors.APIError as err:
_LOGGER.warning("Error for networks host prune: %s", err)
def prune_networks(self, network_name: str) -> None:
async def prune_networks(self, network_name: str) -> None:
"""Prune stale container from network.
Fix: https://github.com/moby/moby/issues/23302
"""
network: Network = self.dockerpy.networks.get(network_name)
network: Network = await self.sys_run_in_executor(
self.dockerpy.networks.get, network_name
)
corrupt_containers: list[str] = []
for cid, data in network.attrs.get("Containers", {}).items():
try:
self.dockerpy.containers.get(cid)
await self.containers.get(cid)
continue
except docker_errors.NotFound:
except aiodocker.DockerError as err:
if err.status != HTTPStatus.NOT_FOUND:
_LOGGER.warning(
"Docker fatal error on container %s on %s: %s",
cid,
network_name,
err,
)
continue
_LOGGER.debug(
"Docker network %s is corrupt on container: %s", network_name, cid
)
except (docker_errors.DockerException, requests.RequestException):
_LOGGER.warning(
"Docker fatal error on container %s on %s", cid, network_name
)
continue
corrupt_containers.append(data.get("Name", cid))
with suppress(docker_errors.DockerException, requests.RequestException):
network.disconnect(data.get("Name", cid), force=True)
def disconnect_corrupt_containers():
for name in corrupt_containers:
with suppress(docker_errors.DockerException, requests.RequestException):
network.disconnect(name, force=True)
await self.sys_run_in_executor(disconnect_corrupt_containers)
async def container_is_initialized(
self, name: str, image: str, version: AwesomeVersion
) -> bool:
"""Return True if docker container exists in good state and is built from expected image."""
try:
docker_container = await self.sys_run_in_executor(
self.containers_legacy.get, name
)
docker_container = await self.containers.get(name)
container_metadata = await docker_container.show()
docker_image = await self.images.inspect(f"{image}:{version}")
except docker_errors.NotFound:
return False
except aiodocker.DockerError as err:
if err.status == HTTPStatus.NOT_FOUND:
return False
@@ -758,90 +787,93 @@ class DockerAPI(CoreSysAttributes):
f"Could not get container {name} or image {image}:{version} to check state: {err!s}",
_LOGGER.error,
) from err
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not get container {name} or image {image}:{version} to check state: {err!s}",
_LOGGER.error,
) from err
# Check the image is correct and state is good
return (
docker_container.image is not None
and docker_container.image.id == docker_image["Id"]
and docker_container.status in ("exited", "running", "created")
metadata_image = container_metadata.get("ImageID", container_metadata["Image"])
status = container_metadata["State"]["Status"]
return metadata_image == docker_image["Id"] and status in (
"exited",
"running",
"created",
)
def stop_container(
async def stop_container(
self, name: str, timeout: int, remove_container: bool = True
) -> None:
"""Stop/remove Docker container."""
try:
docker_container: Container = self.containers_legacy.get(name)
except docker_errors.NotFound:
# Generally suppressed so we don't log this
raise DockerNotFound() from None
except (docker_errors.DockerException, requests.RequestException) as err:
docker_container = await self.containers.get(name)
container_metadata = await docker_container.show()
except aiodocker.DockerError as err:
if err.status == HTTPStatus.NOT_FOUND:
# Generally suppressed so we don't log this
raise DockerNotFound() from None
raise DockerError(
f"Could not get container {name} for stopping: {err!s}",
_LOGGER.error,
) from err
if docker_container.status == "running":
if container_metadata["State"]["Status"] == "running":
_LOGGER.info("Stopping %s application", name)
with suppress(docker_errors.DockerException, requests.RequestException):
docker_container.stop(timeout=timeout)
with suppress(aiodocker.DockerError):
await docker_container.stop(timeout=timeout)
if remove_container:
with suppress(docker_errors.DockerException, requests.RequestException):
with suppress(aiodocker.DockerError):
_LOGGER.info("Cleaning %s application", name)
docker_container.remove(force=True, v=True)
await docker_container.delete(force=True, v=True)
cidfile_path = self.coresys.config.path_cid_files / f"{name}.cid"
with suppress(OSError):
cidfile_path.unlink(missing_ok=True)
await self.sys_run_in_executor(cidfile_path.unlink, missing_ok=True)
def start_container(self, name: str) -> None:
async def start_container(self, name: str) -> None:
"""Start Docker container."""
try:
docker_container: Container = self.containers_legacy.get(name)
except docker_errors.NotFound:
raise DockerNotFound(
f"{name} not found for starting up", _LOGGER.error
) from None
except (docker_errors.DockerException, requests.RequestException) as err:
docker_container = await self.containers.get(name)
except aiodocker.DockerError as err:
if err.status == HTTPStatus.NOT_FOUND:
raise DockerNotFound(
f"{name} not found for starting up", _LOGGER.error
) from None
raise DockerError(
f"Could not get {name} for starting up", _LOGGER.error
) from err
_LOGGER.info("Starting %s", name)
try:
docker_container.start()
except (docker_errors.DockerException, requests.RequestException) as err:
await docker_container.start()
except aiodocker.DockerError as err:
raise DockerError(f"Can't start {name}: {err}", _LOGGER.error) from err
def restart_container(self, name: str, timeout: int) -> None:
async def restart_container(self, name: str, timeout: int) -> None:
"""Restart docker container."""
try:
container: Container = self.containers_legacy.get(name)
except docker_errors.NotFound:
raise DockerNotFound(
f"Container {name} not found for restarting", _LOGGER.warning
) from None
except (docker_errors.DockerException, requests.RequestException) as err:
container = await self.containers.get(name)
except aiodocker.DockerError as err:
if err.status == HTTPStatus.NOT_FOUND:
raise DockerNotFound(
f"Container {name} not found for restarting", _LOGGER.warning
) from None
raise DockerError(
f"Could not get container {name} for restarting: {err!s}", _LOGGER.error
) from err
_LOGGER.info("Restarting %s", name)
try:
container.restart(timeout=timeout)
except (docker_errors.DockerException, requests.RequestException) as err:
await container.restart(timeout=timeout)
except aiodocker.DockerError as err:
raise DockerError(f"Can't restart {name}: {err}", _LOGGER.warning) from err
def container_logs(self, name: str, tail: int = 100) -> bytes:
"""Return Docker logs of container."""
"""Return Docker logs of container.
Must be run in executor.
"""
# Remains on docker py for now because aiodocker doesn't seem to have a way to get
# the raw binary of the logs. Only provides list[str] or AsyncIterator[str] options.
try:
docker_container: Container = self.containers_legacy.get(name)
docker_container: Container = self.dockerpy.containers.get(name)
except docker_errors.NotFound:
raise DockerNotFound(
f"Container {name} not found for logs", _LOGGER.warning
@@ -858,53 +890,59 @@ class DockerAPI(CoreSysAttributes):
f"Can't grep logs from {name}: {err}", _LOGGER.warning
) from err
def container_stats(self, name: str) -> dict[str, Any]:
async def container_stats(self, name: str) -> dict[str, Any]:
"""Read and return stats from container."""
try:
docker_container: Container = self.containers_legacy.get(name)
except docker_errors.NotFound:
raise DockerNotFound(
f"Container {name} not found for stats", _LOGGER.warning
) from None
except (docker_errors.DockerException, requests.RequestException) as err:
docker_container = await self.containers.get(name)
container_metadata = await docker_container.show()
except aiodocker.DockerError as err:
if err.status == HTTPStatus.NOT_FOUND:
raise DockerNotFound(
f"Container {name} not found for stats", _LOGGER.warning
) from None
raise DockerError(
f"Could not inspect container '{name}': {err!s}", _LOGGER.error
) from err
# container is not running
if docker_container.status != "running":
if container_metadata["State"]["Status"] != "running":
raise DockerError(f"Container {name} is not running", _LOGGER.error)
try:
# When stream=False, stats() returns dict, not Iterator
return cast(dict[str, Any], docker_container.stats(stream=False))
except (docker_errors.DockerException, requests.RequestException) as err:
stats = await docker_container.stats(stream=False)
except aiodocker.DockerError as err:
raise DockerError(
f"Can't read stats from {name}: {err}", _LOGGER.error
) from err
def container_run_inside(self, name: str, command: str) -> CommandReturn:
if not stats:
raise DockerError(f"Could not get stats for {name}", _LOGGER.error)
return stats[-1]
async def container_run_inside(self, name: str, command: str) -> ExecReturn:
"""Execute a command inside Docker container."""
try:
docker_container: Container = self.containers_legacy.get(name)
except docker_errors.NotFound:
raise DockerNotFound(
f"Container {name} not found for running command", _LOGGER.warning
) from None
except (docker_errors.DockerException, requests.RequestException) as err:
docker_container = await self.containers.get(name)
except aiodocker.DockerError as err:
if err.status == HTTPStatus.NOT_FOUND:
raise DockerNotFound(
f"Container {name} not found for running command", _LOGGER.warning
) from None
raise DockerError(
f"Can't get container {name} to run command: {err!s}"
) from err
# Execute
try:
code, output = docker_container.exec_run(command)
except (docker_errors.DockerException, requests.RequestException) as err:
docker_exec = await docker_container.exec(command)
output = await docker_exec.start(detach=True)
exec_metadata = await docker_exec.inspect()
except aiodocker.DockerError as err:
raise DockerError(
f"Can't run command in container {name}: {err!s}"
) from err
return CommandReturn(code, output)
return ExecReturn(exec_metadata["ExitCode"], output)
async def remove_image(
self, image: str, version: AwesomeVersion, latest: bool = True
+9 -4
View File
@@ -4,8 +4,8 @@ from contextlib import suppress
from dataclasses import dataclass
import logging
from threading import Thread
from typing import Any
from docker.models.containers import Container
from docker.types.daemon import CancellableStream
from ..const import BusEvent
@@ -35,10 +35,15 @@ class DockerMonitor(CoreSysAttributes, Thread):
self._events: CancellableStream | None = None
self._unlabeled_managed_containers: list[str] = []
def watch_container(self, container: Container):
def watch_container(self, container_metadata: dict[str, Any]):
"""If container is missing the managed label, add name to list."""
if LABEL_MANAGED not in container.labels and container.name:
self._unlabeled_managed_containers += [container.name]
labels: dict[str, str] = container_metadata.get("Config", {}).get("Labels", {})
name: str | None = container_metadata.get("Name")
if name:
name = name.lstrip("/")
if LABEL_MANAGED not in labels and name:
self._unlabeled_managed_containers += [name]
async def load(self):
"""Start docker events monitor."""
+5 -5
View File
@@ -220,7 +220,7 @@ class DockerNetwork:
def attach_container(
self,
container_id: str,
name: str,
name: str | None,
alias: list[str] | None = None,
ipv4: IPv4Address | None = None,
) -> None:
@@ -233,7 +233,7 @@ class DockerNetwork:
self.network.reload()
# Check stale Network
if name in (
if name and name in (
val.get("Name") for val in self.network.attrs.get("Containers", {}).values()
):
self.stale_cleanup(name)
@@ -250,7 +250,7 @@ class DockerNetwork:
requests.RequestException,
) as err:
raise DockerError(
f"Can't connect {name} to Supervisor network: {err}",
f"Can't connect {name or container_id} to Supervisor network: {err}",
_LOGGER.error,
) from err
@@ -280,7 +280,7 @@ class DockerNetwork:
if container_id not in self.containers:
self.attach_container(container_id, name, alias, ipv4)
def detach_default_bridge(self, container_id: str, name: str) -> None:
def detach_default_bridge(self, container_id: str, name: str | None = None) -> None:
"""Detach default Docker bridge.
Need run inside executor.
@@ -296,7 +296,7 @@ class DockerNetwork:
requests.RequestException,
) as err:
raise DockerError(
f"Can't disconnect {name} from default network: {err}",
f"Can't disconnect {name or container_id} from default network: {err}",
_LOGGER.warning,
) from err
+45 -38
View File
@@ -1,15 +1,12 @@
"""Init file for Supervisor Docker object."""
import asyncio
from collections.abc import Awaitable
from ipaddress import IPv4Address
import logging
import os
import aiodocker
from awesomeversion.awesomeversion import AwesomeVersion
import docker
import requests
from ..exceptions import DockerError
from ..jobs.const import JobConcurrency
@@ -53,13 +50,13 @@ class DockerSupervisor(DockerInterface):
) -> None:
"""Attach to running docker container."""
try:
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers_legacy.get, self.name
)
except (docker.errors.DockerException, requests.RequestException) as err:
raise DockerError() from err
docker_container = await self.sys_docker.containers.get(self.name)
self._meta = await docker_container.show()
except aiodocker.DockerError as err:
raise DockerError(
f"Could not get supervisor container metadata: {err!s}"
) from err
self._meta = docker_container.attrs
_LOGGER.info(
"Attaching to Supervisor %s with version %s",
self.image,
@@ -81,32 +78,32 @@ class DockerSupervisor(DockerInterface):
)
@Job(name="docker_supervisor_retag", concurrency=JobConcurrency.GROUP_QUEUE)
def retag(self) -> Awaitable[None]:
async def retag(self) -> None:
"""Retag latest image to version."""
return self.sys_run_in_executor(self._retag)
def _retag(self) -> None:
"""Retag latest image to version.
Need run inside executor.
"""
try:
docker_container = self.sys_docker.containers_legacy.get(self.name)
except (docker.errors.DockerException, requests.RequestException) as err:
docker_container = await self.sys_docker.containers.get(self.name)
container_metadata = await docker_container.show()
except aiodocker.DockerError as err:
raise DockerError(
f"Could not get Supervisor container for retag: {err}", _LOGGER.error
) from err
if not self.image or not docker_container.image:
# See https://github.com/docker/docker-py/blob/df3f8e2abc5a03de482e37214dddef9e0cee1bb1/docker/models/containers.py#L41
metadata_image = container_metadata.get("ImageID", container_metadata["Image"])
if not self.image or not metadata_image:
raise DockerError(
"Could not locate image from container metadata for retag",
_LOGGER.error,
)
try:
docker_container.image.tag(self.image, tag=str(self.version))
docker_container.image.tag(self.image, tag="latest")
except (docker.errors.DockerException, requests.RequestException) as err:
await asyncio.gather(
self.sys_docker.images.tag(
metadata_image, self.image, tag=str(self.version)
),
self.sys_docker.images.tag(metadata_image, self.image, tag="latest"),
)
except aiodocker.DockerError as err:
raise DockerError(
f"Can't retag Supervisor version: {err}", _LOGGER.error
) from err
@@ -118,28 +115,38 @@ class DockerSupervisor(DockerInterface):
async def update_start_tag(self, image: str, version: AwesomeVersion) -> None:
"""Update start tag to new version."""
try:
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers_legacy.get, self.name
)
docker_image = await self.sys_docker.images.inspect(f"{image}:{version!s}")
except (
aiodocker.DockerError,
docker.errors.DockerException,
requests.RequestException,
) as err:
docker_container = await self.sys_docker.containers.get(self.name)
container_metadata = await docker_container.show()
except aiodocker.DockerError as err:
raise DockerError(
f"Can't get image or container to fix start tag: {err}", _LOGGER.error
f"Can't get container to fix start tag: {err}", _LOGGER.error
) from err
if not docker_container.image:
# See https://github.com/docker/docker-py/blob/df3f8e2abc5a03de482e37214dddef9e0cee1bb1/docker/models/containers.py#L41
metadata_image = container_metadata.get("ImageID", container_metadata["Image"])
if not metadata_image:
raise DockerError(
"Cannot locate image from container metadata to fix start tag",
_LOGGER.error,
)
try:
container_image, new_image = await asyncio.gather(
self.sys_docker.images.inspect(metadata_image),
self.sys_docker.images.inspect(f"{image}:{version!s}"),
)
except aiodocker.DockerError as err:
raise DockerError(
f"Can't get image metadata to fix start tag: {err}", _LOGGER.error
) from err
try:
# Find start tag
for tag in docker_container.image.tags:
for tag in container_image["RepoTags"]:
# See https://github.com/docker/docker-py/blob/df3f8e2abc5a03de482e37214dddef9e0cee1bb1/docker/models/images.py#L47
if tag == "<none>:<none>":
continue
start_image = tag.partition(":")[0]
start_tag = tag.partition(":")[2] or "latest"
@@ -148,12 +155,12 @@ class DockerSupervisor(DockerInterface):
continue
await asyncio.gather(
self.sys_docker.images.tag(
docker_image["Id"], start_image, tag=start_tag
new_image["Id"], start_image, tag=start_tag
),
self.sys_docker.images.tag(
docker_image["Id"], start_image, tag=version.string
new_image["Id"], start_image, tag=version.string
),
)
except (aiodocker.DockerError, requests.RequestException) as err:
except aiodocker.DockerError as err:
raise DockerError(f"Can't fix start tag: {err}", _LOGGER.error) from err
+12 -10
View File
@@ -13,6 +13,8 @@ from typing import Final
from awesomeversion import AwesomeVersion
from supervisor.utils import remove_colors
from ..const import ATTR_HOMEASSISTANT, BusEvent
from ..coresys import CoreSys
from ..docker.const import ContainerState
@@ -33,7 +35,6 @@ from ..jobs.const import JOB_GROUP_HOME_ASSISTANT_CORE, JobConcurrency, JobThrot
from ..jobs.decorator import Job, JobCondition
from ..jobs.job_group import JobGroup
from ..resolution.const import ContextType, IssueType
from ..utils import convert_to_ascii
from ..utils.sentry import async_capture_exception
from .const import (
LANDINGPAGE,
@@ -421,13 +422,6 @@ class HomeAssistantCore(JobGroup):
await self.instance.stop()
await self.start()
def logs(self) -> Awaitable[bytes]:
"""Get HomeAssistant docker logs.
Return a coroutine.
"""
return self.instance.logs()
async def stats(self) -> DockerStats:
"""Return stats of Home Assistant."""
try:
@@ -458,7 +452,15 @@ class HomeAssistantCore(JobGroup):
"""Run Home Assistant config check."""
try:
result = await self.instance.execute_command(
"python3 -m homeassistant -c /config --script check_config"
[
"python3",
"-m",
"homeassistant",
"-c",
"/config",
"--script",
"check_config",
]
)
except DockerError as err:
raise HomeAssistantError() from err
@@ -468,7 +470,7 @@ class HomeAssistantCore(JobGroup):
raise HomeAssistantError("Fatal error on config check!", _LOGGER.error)
# Convert output
log = convert_to_ascii(result.output)
log = remove_colors("\n".join(result.log))
_LOGGER.debug("Result config check: %s", log.strip())
# Parse output
-7
View File
@@ -76,13 +76,6 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
"""Return True if a task is in progress."""
return self.instance.in_progress
def logs(self) -> Awaitable[bytes]:
"""Get docker plugin logs.
Return Coroutine.
"""
return self.instance.logs()
def is_running(self) -> Awaitable[bool]:
"""Return True if Docker container is running.
@@ -1,9 +1,9 @@
"""Evaluation class for container."""
import asyncio
import logging
from docker.errors import DockerException
from requests import RequestException
import aiodocker
from ...const import CoreState
from ...coresys import CoreSys
@@ -73,10 +73,9 @@ class EvaluateContainer(EvaluateBase):
self._images.clear()
try:
containers = await self.sys_run_in_executor(
self.sys_docker.containers_legacy.list
)
except (DockerException, RequestException) as err:
containers = await self.sys_docker.containers.list()
containers_metadata = await asyncio.gather(*[c.show() for c in containers])
except aiodocker.DockerError as err:
_LOGGER.error("Corrupt docker overlayfs detect: %s", err)
self.sys_resolution.create_issue(
IssueType.CORRUPT_DOCKER,
@@ -87,8 +86,8 @@ class EvaluateContainer(EvaluateBase):
images = {
image
for container in containers
if (config := container.attrs.get("Config")) is not None
for container in containers_metadata
if (config := container.get("Config")) is not None
and (image := config.get("Image")) is not None
}
for image in images:
+3 -3
View File
@@ -19,9 +19,9 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
RE_STRING: re.Pattern = re.compile(r"\x1b(\[.*?[@-~]|\].*?(\x07|\x1b\\))")
def convert_to_ascii(raw: bytes) -> str:
"""Convert binary to ascii and remove colors."""
return RE_STRING.sub("", raw.decode())
def remove_colors(log: str) -> str:
"""Remove color characters from log."""
return RE_STRING.sub("", log)
def process_lock(method):
+137 -153
View File
@@ -9,8 +9,8 @@ from typing import Any
from unittest.mock import MagicMock, PropertyMock, call, patch
import aiodocker
from aiodocker.containers import DockerContainer
from awesomeversion import AwesomeVersion
from docker.errors import APIError, DockerException, NotFound
import pytest
from securetar import SecureTarFile
@@ -33,7 +33,6 @@ from supervisor.exceptions import (
)
from supervisor.hardware.helper import HwHelper
from supervisor.ingress import Ingress
from supervisor.store.repository import Repository
from supervisor.utils.dt import utcnow
from .test_manager import BOOT_FAIL_ISSUE, BOOT_FAIL_SUGGESTIONS
@@ -218,18 +217,14 @@ async def test_watchdog_on_stop(coresys: CoreSys, install_addon_ssh: Addon) -> N
restart.assert_called_once()
async def test_listener_attached_on_install(
coresys: CoreSys, mock_amd64_arch_supported: None, test_repository
):
@pytest.mark.usefixtures("mock_amd64_arch_supported", "test_repository")
async def test_listener_attached_on_install(coresys: CoreSys):
"""Test events listener attached on addon install."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
container_collection = MagicMock()
container_collection.get.side_effect = DockerException()
coresys.docker.containers.get.side_effect = aiodocker.DockerError(
500, {"message": "fail"}
)
with (
patch(
"supervisor.docker.manager.DockerAPI.containers_legacy",
new=PropertyMock(return_value=container_collection),
),
patch("pathlib.Path.is_dir", return_value=True),
patch(
"supervisor.addons.addon.Addon.need_build",
@@ -253,9 +248,9 @@ async def test_listener_attached_on_install(
@pytest.mark.parametrize(
"boot_timedelta,restart_count", [(timedelta(), 1), (timedelta(days=1), 0)]
)
@pytest.mark.usefixtures("test_repository")
async def test_watchdog_during_attach(
coresys: CoreSys,
test_repository: Repository,
boot_timedelta: timedelta,
restart_count: int,
):
@@ -287,9 +282,8 @@ async def test_watchdog_during_attach(
assert restart.call_count == restart_count
async def test_install_update_fails_if_out_of_date(
coresys: CoreSys, install_addon_ssh: Addon
):
@pytest.mark.usefixtures("install_addon_ssh")
async def test_install_update_fails_if_out_of_date(coresys: CoreSys):
"""Test install or update of addon fails when supervisor or plugin is out of date."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
@@ -342,13 +336,8 @@ async def test_listeners_removed_on_uninstall(
)
async def test_start(
coresys: CoreSys,
install_addon_ssh: Addon,
container,
tmp_supervisor_data,
path_extern,
) -> None:
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_start(coresys: CoreSys, install_addon_ssh: Addon) -> None:
"""Test starting an addon without healthcheck."""
install_addon_ssh.path_data.mkdir()
await install_addon_ssh.load()
@@ -364,17 +353,16 @@ async def test_start(
@pytest.mark.parametrize("state", [ContainerState.HEALTHY, ContainerState.UNHEALTHY])
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_start_wait_healthcheck(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
container: DockerContainer,
state: ContainerState,
tmp_supervisor_data,
path_extern,
) -> None:
"""Test starting an addon with a healthcheck waits for health status."""
install_addon_ssh.path_data.mkdir()
container.attrs["Config"] = {"Healthcheck": "exists"}
container.show.return_value["Config"] = {"Healthcheck": "exists"}
await install_addon_ssh.load()
await asyncio.sleep(0)
assert install_addon_ssh.state == AddonState.STOPPED
@@ -395,13 +383,9 @@ async def test_start_wait_healthcheck(
assert install_addon_ssh.state == AddonState.STARTED
@pytest.mark.usefixtures("coresys", "tmp_supervisor_data", "path_extern")
async def test_start_timeout(
coresys: CoreSys,
install_addon_ssh: Addon,
caplog: pytest.LogCaptureFixture,
container,
tmp_supervisor_data,
path_extern,
install_addon_ssh: Addon, caplog: pytest.LogCaptureFixture
) -> None:
"""Test starting an addon times out while waiting."""
install_addon_ssh.path_data.mkdir()
@@ -421,13 +405,8 @@ async def test_start_timeout(
assert "Timeout while waiting for addon Terminal & SSH to start" in caplog.text
async def test_restart(
coresys: CoreSys,
install_addon_ssh: Addon,
container,
tmp_supervisor_data,
path_extern,
) -> None:
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_restart(coresys: CoreSys, install_addon_ssh: Addon) -> None:
"""Test restarting an addon."""
install_addon_ssh.path_data.mkdir()
await install_addon_ssh.load()
@@ -443,16 +422,16 @@ async def test_restart(
@pytest.mark.parametrize("status", ["running", "stopped"])
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_backup(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
container: DockerContainer,
status: str,
tmp_supervisor_data,
path_extern,
) -> None:
"""Test backing up an addon."""
container.status = status
container.show.return_value["State"]["Status"] = status
container.show.return_value["State"]["Running"] = status == "running"
install_addon_ssh.path_data.mkdir()
await install_addon_ssh.load()
@@ -461,16 +440,16 @@ async def test_backup(
@pytest.mark.parametrize("status", ["running", "stopped"])
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_backup_no_config(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
container: DockerContainer,
status: str,
tmp_supervisor_data,
path_extern,
) -> None:
"""Test backing up an addon with deleted config directory."""
container.status = status
container.show.return_value["State"]["Status"] = status
container.show.return_value["State"]["Running"] = status == "running"
install_addon_ssh.data["map"].append({"type": "addon_config", "read_only": False})
assert not install_addon_ssh.path_config.exists()
@@ -481,16 +460,15 @@ async def test_backup_no_config(
assert await install_addon_ssh.backup(tarfile) is None
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_backup_with_pre_post_command(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
tmp_supervisor_data,
path_extern,
container: DockerContainer,
) -> None:
"""Test backing up an addon with pre and post command."""
container.status = "running"
container.exec_run.return_value = (0, None)
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
install_addon_ssh.path_data.mkdir()
await install_addon_ssh.load()
@@ -503,32 +481,63 @@ async def test_backup_with_pre_post_command(
):
assert await install_addon_ssh.backup(tarfile) is None
assert container.exec_run.call_count == 2
assert container.exec_run.call_args_list[0].args[0] == "backup_pre"
assert container.exec_run.call_args_list[1].args[0] == "backup_post"
assert container.exec.call_count == 2
assert container.exec.call_args_list[0].args[0] == "backup_pre"
assert container.exec.call_args_list[1].args[0] == "backup_post"
@pytest.mark.parametrize(
("container_get_side_effect", "exec_run_side_effect", "exc_type_raised"),
(
"container_get_side_effect",
"exec_start_side_effect",
"exec_inspect_side_effect",
"exc_type_raised",
),
[
(NotFound("missing"), [(1, None)], AddonUnknownError),
(DockerException(), [(1, None)], AddonUnknownError),
(None, DockerException(), AddonUnknownError),
(None, [(1, None)], AddonPrePostBackupCommandReturnedError),
(
aiodocker.DockerError(HTTPStatus.NOT_FOUND, {"message": "missing"}),
None,
[{"ExitCode": 1}],
AddonUnknownError,
),
(
aiodocker.DockerError(HTTPStatus.INTERNAL_SERVER_ERROR, {"message": "bad"}),
None,
[{"ExitCode": 1}],
AddonUnknownError,
),
(
None,
aiodocker.DockerError(HTTPStatus.INTERNAL_SERVER_ERROR, {"message": "bad"}),
[{"ExitCode": 1}],
AddonUnknownError,
),
(
None,
None,
aiodocker.DockerError(HTTPStatus.INTERNAL_SERVER_ERROR, {"message": "bad"}),
AddonUnknownError,
),
(None, None, [{"ExitCode": 1}], AddonPrePostBackupCommandReturnedError),
],
)
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_backup_with_pre_command_error(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
container_get_side_effect: DockerException | None,
exec_run_side_effect: DockerException | list[tuple[int, Any]],
container_get_side_effect: aiodocker.DockerError | None,
exec_start_side_effect: aiodocker.DockerError | None,
exec_inspect_side_effect: aiodocker.DockerError | list[dict[str, Any]] | None,
exc_type_raised: type[HassioError],
) -> None:
"""Test backing up an addon with error running pre command."""
coresys.docker.containers_legacy.get.side_effect = container_get_side_effect
container.exec_run.side_effect = exec_run_side_effect
coresys.docker.containers.get.side_effect = container_get_side_effect
coresys.docker.containers.get.return_value.exec.return_value.start.side_effect = (
exec_start_side_effect
)
coresys.docker.containers.get.return_value.exec.return_value.inspect.side_effect = (
exec_inspect_side_effect
)
install_addon_ssh.path_data.mkdir()
await install_addon_ssh.load()
@@ -545,16 +554,16 @@ async def test_backup_with_pre_command_error(
@pytest.mark.parametrize("status", ["running", "stopped"])
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_backup_cold_mode(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
container: DockerContainer,
status: str,
tmp_supervisor_data,
path_extern,
) -> None:
"""Test backing up an addon in cold mode."""
container.status = status
container.show.return_value["State"]["Status"] = status
container.show.return_value["State"]["Running"] = status == "running"
install_addon_ssh.path_data.mkdir()
await install_addon_ssh.load()
@@ -574,22 +583,25 @@ async def test_backup_cold_mode(
assert bool(start_task) is (status == "running")
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_backup_cold_mode_with_watchdog(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
tmp_supervisor_data,
path_extern,
container: DockerContainer,
):
"""Test backing up an addon in cold mode with watchdog active."""
container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
install_addon_ssh.watchdog = True
install_addon_ssh.path_data.mkdir()
await install_addon_ssh.load()
# Clear task queue, including the event fired for running container
await asyncio.sleep(0)
# Simulate stop firing the docker event for stopped container like it normally would
async def mock_stop(*args, **kwargs):
container.status = "stopped"
container.show.return_value["State"]["Status"] = "stopped"
container.show.return_value["State"]["Running"] = False
_fire_test_event(coresys, f"addon_{TEST_ADDON_SLUG}", ContainerState.STOPPED)
# Patching out the normal end of backup process leaves the container in a stopped state
@@ -613,15 +625,10 @@ async def test_backup_cold_mode_with_watchdog(
@pytest.mark.parametrize("status", ["running", "stopped"])
async def test_restore(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
status: str,
tmp_supervisor_data,
path_extern,
mock_aarch64_arch_supported: None,
) -> None:
@pytest.mark.usefixtures(
"tmp_supervisor_data", "path_extern", "mock_aarch64_arch_supported"
)
async def test_restore(coresys: CoreSys, install_addon_ssh: Addon, status: str) -> None:
"""Test restoring an addon."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
install_addon_ssh.path_data.mkdir()
@@ -634,16 +641,15 @@ async def test_restore(
assert bool(start_task) is (status == "running")
@pytest.mark.usefixtures(
"tmp_supervisor_data", "path_extern", "mock_aarch64_arch_supported"
)
async def test_restore_while_running(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
tmp_supervisor_data,
path_extern,
mock_aarch64_arch_supported: None,
coresys: CoreSys, install_addon_ssh: Addon, container: DockerContainer
):
"""Test restore of a running addon."""
container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
install_addon_ssh.path_data.mkdir()
await install_addon_ssh.load()
@@ -659,16 +665,15 @@ async def test_restore_while_running(
container.stop.assert_called_once()
@pytest.mark.usefixtures(
"tmp_supervisor_data", "path_extern", "mock_aarch64_arch_supported"
)
async def test_restore_while_running_with_watchdog(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
tmp_supervisor_data,
path_extern,
mock_aarch64_arch_supported: None,
coresys: CoreSys, install_addon_ssh: Addon, container: DockerContainer
):
"""Test restore of a running addon with watchdog interference."""
container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
install_addon_ssh.path_data.mkdir()
install_addon_ssh.watchdog = True
@@ -676,7 +681,8 @@ async def test_restore_while_running_with_watchdog(
# Simulate stop firing the docker event for stopped container like it normally would
async def mock_stop(*args, **kwargs):
container.status = "stopped"
container.show.return_value["State"]["Status"] = "stopped"
container.show.return_value["State"]["Running"] = False
_fire_test_event(coresys, f"addon_{TEST_ADDON_SLUG}", ContainerState.STOPPED)
# We restore a stopped backup so restore will not restart it
@@ -694,14 +700,15 @@ async def test_restore_while_running_with_watchdog(
restart.assert_not_called()
@pytest.mark.usefixtures("coresys")
async def test_start_when_running(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
container: DockerContainer,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test starting an addon without healthcheck."""
container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
await install_addon_ssh.load()
await asyncio.sleep(0)
assert install_addon_ssh.state == AddonState.STARTED
@@ -714,13 +721,8 @@ async def test_start_when_running(
assert "local_ssh is already running" in caplog.text
async def test_local_example_install(
coresys: CoreSys,
container: MagicMock,
tmp_supervisor_data: Path,
test_repository,
mock_aarch64_arch_supported: None,
):
@pytest.mark.usefixtures("test_repository", "mock_aarch64_arch_supported")
async def test_local_example_install(coresys: CoreSys, tmp_supervisor_data: Path):
"""Test install of an addon."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
assert not (
@@ -734,12 +736,9 @@ async def test_local_example_install(
assert data_dir.is_dir()
@pytest.mark.usefixtures("coresys", "path_extern")
async def test_local_example_start(
coresys: CoreSys,
container: MagicMock,
tmp_supervisor_data: Path,
install_addon_example: Addon,
path_extern,
tmp_supervisor_data: Path, install_addon_example: Addon
):
"""Test start of an addon."""
install_addon_example.path_data.mkdir()
@@ -756,12 +755,8 @@ async def test_local_example_start(
assert addon_config_dir.is_dir()
async def test_local_example_ingress_port_set(
coresys: CoreSys,
container: MagicMock,
tmp_supervisor_data: Path,
install_addon_example: Addon,
):
@pytest.mark.usefixtures("coresys", "tmp_supervisor_data")
async def test_local_example_ingress_port_set(install_addon_example: Addon):
"""Test start of an addon."""
install_addon_example.path_data.mkdir()
await install_addon_example.load()
@@ -769,11 +764,9 @@ async def test_local_example_ingress_port_set(
assert install_addon_example.ingress_port != 0
@pytest.mark.usefixtures("tmp_supervisor_data")
async def test_addon_pulse_error(
coresys: CoreSys,
install_addon_example: Addon,
caplog: pytest.LogCaptureFixture,
tmp_supervisor_data,
coresys: CoreSys, install_addon_example: Addon, caplog: pytest.LogCaptureFixture
):
"""Test error writing pulse config for addon."""
with patch(
@@ -793,7 +786,8 @@ async def test_addon_pulse_error(
assert coresys.core.healthy is False
def test_auto_update_available(coresys: CoreSys, install_addon_example: Addon):
@pytest.mark.usefixtures("coresys")
def test_auto_update_available(install_addon_example: Addon):
"""Test auto update availability based on versions."""
assert install_addon_example.auto_update is False
assert install_addon_example.need_update is False
@@ -838,11 +832,9 @@ async def test_paths_cache(coresys: CoreSys, install_addon_ssh: Addon):
assert install_addon_ssh.with_documentation
@pytest.mark.usefixtures("mock_amd64_arch_supported")
async def test_addon_loads_wrong_image(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
mock_amd64_arch_supported,
coresys: CoreSys, install_addon_ssh: Addon, container: DockerContainer
):
"""Test addon is loaded with incorrect image for architecture."""
coresys.addons.data.save_data.reset_mock()
@@ -854,7 +846,7 @@ async def test_addon_loads_wrong_image(
patch.object(
coresys.docker,
"run_command",
new=PropertyMock(return_value=CommandReturn(0, b"Build successful")),
return_value=CommandReturn(0, ["Build successful"]),
) as mock_run_command,
patch.object(
type(coresys.config),
@@ -864,7 +856,7 @@ async def test_addon_loads_wrong_image(
):
await install_addon_ssh.load()
container.remove.assert_called_with(force=True, v=True)
container.delete.assert_called_with(force=True, v=True)
# one for removing the addon, one for removing the addon builder
assert coresys.docker.images.delete.call_count == 2
@@ -876,7 +868,7 @@ async def test_addon_loads_wrong_image(
)
mock_run_command.assert_called_once()
assert mock_run_command.call_args.args[0] == "docker.io/library/docker"
assert mock_run_command.call_args.kwargs["version"] == "1.0.0-cli"
assert mock_run_command.call_args.kwargs["tag"] == "1.0.0-cli"
command = mock_run_command.call_args.kwargs["command"]
assert is_in_list(
["--platform", "linux/amd64"],
@@ -890,12 +882,8 @@ async def test_addon_loads_wrong_image(
coresys.addons.data.save_data.assert_called_once()
async def test_addon_loads_missing_image(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
mock_amd64_arch_supported,
):
@pytest.mark.usefixtures("mock_amd64_arch_supported")
async def test_addon_loads_missing_image(coresys: CoreSys, install_addon_ssh: Addon):
"""Test addon corrects a missing image on load."""
coresys.docker.images.inspect.side_effect = aiodocker.DockerError(
HTTPStatus.NOT_FOUND, {"message": "missing"}
@@ -906,7 +894,7 @@ async def test_addon_loads_missing_image(
patch.object(
coresys.docker,
"run_command",
new=PropertyMock(return_value=CommandReturn(0, b"Build successful")),
return_value=CommandReturn(0, ["Build successful"]),
) as mock_run_command,
patch.object(
type(coresys.config),
@@ -918,7 +906,7 @@ async def test_addon_loads_missing_image(
mock_run_command.assert_called_once()
assert mock_run_command.call_args.args[0] == "docker.io/library/docker"
assert mock_run_command.call_args.kwargs["version"] == "1.0.0-cli"
assert mock_run_command.call_args.kwargs["tag"] == "1.0.0-cli"
command = mock_run_command.call_args.kwargs["command"]
assert is_in_list(
["--platform", "linux/amd64"],
@@ -931,16 +919,9 @@ async def test_addon_loads_missing_image(
assert install_addon_ssh.image == "local/amd64-addon-ssh"
@pytest.mark.parametrize(
"pull_image_exc",
[APIError("error"), aiodocker.DockerError(400, {"message": "error"})],
)
@pytest.mark.usefixtures("container", "mock_amd64_arch_supported")
async def test_addon_load_succeeds_with_docker_errors(
coresys: CoreSys,
install_addon_ssh: Addon,
caplog: pytest.LogCaptureFixture,
pull_image_exc: Exception,
coresys: CoreSys, install_addon_ssh: Addon, caplog: pytest.LogCaptureFixture
):
"""Docker errors while building/pulling an image during load should not raise and fail setup."""
# Build env invalid failure
@@ -959,26 +940,29 @@ async def test_addon_load_succeeds_with_docker_errors(
CoreConfig, "local_to_extern_path", return_value="/addon/path/on/host"
),
patch.object(
DockerAPI,
"run_command",
return_value=MagicMock(exit_code=1, output=b"error"),
DockerAPI, "run_command", return_value=CommandReturn(1, ["error"])
),
):
await install_addon_ssh.load()
assert (
"Can't build local/amd64-addon-ssh:9.2.1: Docker build failed for local/amd64-addon-ssh:9.2.1 (exit code 1). Build output:\nerror"
"Docker build failed for local/amd64-addon-ssh:9.2.1 (exit code 1). Build output:\nerror"
in caplog.text
)
# Image pull failure
install_addon_ssh.data["image"] = "test/amd64-addon-ssh"
caplog.clear()
with patch.object(DockerAPI, "pull_image", side_effect=pull_image_exc):
with patch.object(
DockerAPI,
"pull_image",
side_effect=aiodocker.DockerError(400, {"message": "error"}),
):
await install_addon_ssh.load()
assert "Can't install test/amd64-addon-ssh:9.2.1:" in caplog.text
async def test_addon_manual_only_boot(coresys: CoreSys, install_addon_example: Addon):
@pytest.mark.usefixtures("coresys")
async def test_addon_manual_only_boot(install_addon_example: Addon):
"""Test an addon with manual only boot mode."""
assert install_addon_example.boot_config == "manual_only"
assert install_addon_example.boot == "manual"
+24 -27
View File
@@ -4,8 +4,9 @@ import asyncio
from collections.abc import AsyncGenerator, Generator
from copy import deepcopy
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, call, patch
from unittest.mock import AsyncMock, Mock, PropertyMock, call, patch
from aiodocker.containers import DockerContainer
from awesomeversion import AwesomeVersion
import pytest
@@ -191,8 +192,9 @@ async def test_addon_shutdown_error(
)
@pytest.mark.usefixtures("websession")
async def test_addon_uninstall_removes_discovery(
coresys: CoreSys, install_addon_ssh: Addon, websession: MagicMock
coresys: CoreSys, install_addon_ssh: Addon
):
"""Test discovery messages removed when addon uninstalled."""
assert coresys.discovery.list_messages == []
@@ -223,9 +225,8 @@ async def test_addon_uninstall_removes_discovery(
assert coresys.discovery.list_messages == []
async def test_load(
coresys: CoreSys, install_addon_ssh: Addon, caplog: pytest.LogCaptureFixture
):
@pytest.mark.usefixtures("install_addon_ssh")
async def test_load(coresys: CoreSys, caplog: pytest.LogCaptureFixture):
"""Test addon manager load."""
caplog.clear()
@@ -241,13 +242,8 @@ async def test_load(
assert "Found 1 installed add-ons" in caplog.text
async def test_boot_waits_for_addons(
coresys: CoreSys,
install_addon_ssh: Addon,
container,
tmp_supervisor_data,
path_extern,
):
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_boot_waits_for_addons(coresys: CoreSys, install_addon_ssh: Addon):
"""Test addon manager boot waits for addons."""
install_addon_ssh.path_data.mkdir()
await install_addon_ssh.load()
@@ -278,16 +274,16 @@ async def test_boot_waits_for_addons(
@pytest.mark.parametrize("status", ["running", "stopped"])
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_update(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
container: DockerContainer,
status: str,
tmp_supervisor_data,
path_extern,
):
"""Test addon update."""
container.status = status
container.show.return_value["State"]["Status"] = status
container.show.return_value["State"]["Running"] = status == "running"
install_addon_ssh.path_data.mkdir()
await install_addon_ssh.load()
with patch(
@@ -308,16 +304,16 @@ async def test_update(
@pytest.mark.parametrize("status", ["running", "stopped"])
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_rebuild(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
container: DockerContainer,
status: str,
tmp_supervisor_data,
path_extern,
):
"""Test addon rebuild."""
container.status = status
container.show.return_value["State"]["Status"] = status
container.show.return_value["State"]["Running"] = status == "running"
install_addon_ssh.path_data.mkdir()
await install_addon_ssh.load()
@@ -331,17 +327,16 @@ async def test_rebuild(
assert bool(start_task) is (status == "running")
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_start_wait_cancel_on_uninstall(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
container: DockerContainer,
caplog: pytest.LogCaptureFixture,
tmp_supervisor_data,
path_extern,
) -> None:
"""Test the addon wait task is cancelled when addon is uninstalled."""
install_addon_ssh.path_data.mkdir()
container.attrs["Config"] = {"Healthcheck": "exists"}
container.show.return_value["Config"] = {"Healthcheck": "exists"}
await install_addon_ssh.load()
await asyncio.sleep(0)
assert install_addon_ssh.state == AddonState.STOPPED
@@ -458,10 +453,11 @@ async def test_store_data_changes_during_update(
async def test_watchdog_runs_during_update(
coresys: CoreSys, install_addon_ssh: Addon, container: MagicMock
coresys: CoreSys, install_addon_ssh: Addon, container: DockerContainer
):
"""Test watchdog running during a long update."""
container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
install_addon_ssh.watchdog = True
coresys.store.data.addons["local_ssh"]["image"] = "test_image"
coresys.store.data.addons["local_ssh"]["version"] = AwesomeVersion("1.1.1")
@@ -469,7 +465,8 @@ async def test_watchdog_runs_during_update(
# Simulate stop firing the docker event for stopped container like it normally would
async def mock_stop(*args, **kwargs):
container.status = "stopped"
container.show.return_value["State"]["Status"] = "stopped"
container.show.return_value["State"]["Running"] = False
coresys.bus.fire_event(
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
DockerContainerStateEvent(
+39 -55
View File
@@ -1,11 +1,13 @@
"""Test addons api."""
import asyncio
from collections.abc import Awaitable, Callable
from unittest.mock import MagicMock, PropertyMock, patch
import aiodocker
from aiodocker.containers import DockerContainer
from aiohttp import ClientResponse
from aiohttp.test_utils import TestClient
from docker.errors import DockerException
import pytest
from supervisor.addons.addon import Addon
@@ -33,9 +35,7 @@ def _create_test_event(name: str, state: ContainerState) -> DockerContainerState
)
async def test_addons_info(
api_client: TestClient, coresys: CoreSys, install_addon_ssh: Addon
):
async def test_addons_info(api_client: TestClient, install_addon_ssh: Addon):
"""Test getting addon info."""
install_addon_ssh.state = AddonState.STOPPED
install_addon_ssh.ingress_panel = True
@@ -71,9 +71,9 @@ async def test_addons_info_not_installed(
}
@pytest.mark.usefixtures("install_addon_ssh")
async def test_api_addon_logs(
advanced_logs_tester,
install_addon_ssh: Addon,
advanced_logs_tester: Callable[[str, str], Awaitable[None]],
):
"""Test addon logs."""
await advanced_logs_tester("/addons/local_ssh", "addon_local_ssh")
@@ -89,12 +89,8 @@ async def test_api_addon_logs_not_installed(api_client: TestClient):
assert content == "Addon hic_sunt_leones does not exist"
async def test_api_addon_logs_error(
api_client: TestClient,
journald_logs: MagicMock,
docker_logs: MagicMock,
install_addon_ssh: Addon,
):
@pytest.mark.usefixtures("docker_logs", "install_addon_ssh")
async def test_api_addon_logs_error(api_client: TestClient, journald_logs: MagicMock):
"""Test errors are properly handled for add-on logs."""
journald_logs.side_effect = HassioError("Something bad happened!")
resp = await api_client.get("/addons/local_ssh/logs")
@@ -105,17 +101,13 @@ async def test_api_addon_logs_error(
assert content == "Something bad happened!"
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_api_addon_start_healthcheck(
api_client: TestClient,
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
tmp_supervisor_data,
path_extern,
api_client: TestClient, install_addon_ssh: Addon, container: DockerContainer
):
"""Test starting an addon waits for healthy."""
install_addon_ssh.path_data.mkdir()
container.attrs["Config"] = {"Healthcheck": "exists"}
container.show.return_value["Config"] = {"Healthcheck": "exists"}
await install_addon_ssh.load()
await asyncio.sleep(0)
assert install_addon_ssh.state == AddonState.STOPPED
@@ -148,17 +140,13 @@ async def test_api_addon_start_healthcheck(
assert resp.status == 200
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_api_addon_restart_healthcheck(
api_client: TestClient,
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
tmp_supervisor_data,
path_extern,
api_client: TestClient, install_addon_ssh: Addon, container: DockerContainer
):
"""Test restarting an addon waits for healthy."""
install_addon_ssh.path_data.mkdir()
container.attrs["Config"] = {"Healthcheck": "exists"}
container.show.return_value["Config"] = {"Healthcheck": "exists"}
await install_addon_ssh.load()
await asyncio.sleep(0)
assert install_addon_ssh.state == AddonState.STOPPED
@@ -191,19 +179,19 @@ async def test_api_addon_restart_healthcheck(
assert resp.status == 200
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_api_addon_rebuild_healthcheck(
api_client: TestClient,
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
tmp_supervisor_data,
path_extern,
container: DockerContainer,
):
"""Test rebuilding an addon waits for healthy."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
install_addon_ssh.path_data.mkdir()
container.attrs["Config"] = {"Healthcheck": "exists"}
container.show.return_value["Config"] = {"Healthcheck": "exists"}
await install_addon_ssh.load()
await asyncio.sleep(0)
assert install_addon_ssh.state == AddonState.STARTUP
@@ -244,7 +232,7 @@ async def test_api_addon_rebuild_healthcheck(
patch.object(
coresys.docker,
"run_command",
new=PropertyMock(return_value=CommandReturn(0, b"Build successful")),
return_value=CommandReturn(0, ["Build successful"]),
),
patch.object(
DockerAddon, "healthcheck", new=PropertyMock(return_value={"exists": True})
@@ -262,19 +250,19 @@ async def test_api_addon_rebuild_healthcheck(
assert resp.status == 200
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_api_addon_rebuild_force(
api_client: TestClient,
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
tmp_supervisor_data,
path_extern,
container: DockerContainer,
):
"""Test rebuilding an image-based addon with force parameter."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
install_addon_ssh.path_data.mkdir()
container.attrs["Config"] = {"Healthcheck": "exists"}
container.show.return_value["Config"] = {"Healthcheck": "exists"}
await install_addon_ssh.load()
await asyncio.sleep(0)
assert install_addon_ssh.state == AddonState.STARTUP
@@ -338,7 +326,7 @@ async def test_api_addon_rebuild_force(
patch.object(
coresys.docker,
"run_command",
new=PropertyMock(return_value=CommandReturn(0, b"Build successful")),
return_value=CommandReturn(0, ["Build successful"]),
),
patch.object(
DockerAddon, "healthcheck", new=PropertyMock(return_value={"exists": True})
@@ -358,12 +346,9 @@ async def test_api_addon_rebuild_force(
await _container_events_task
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_api_addon_uninstall(
api_client: TestClient,
coresys: CoreSys,
install_addon_example: Addon,
tmp_supervisor_data,
path_extern,
api_client: TestClient, coresys: CoreSys, install_addon_example: Addon
):
"""Test uninstall."""
install_addon_example.data["map"].append(
@@ -378,12 +363,9 @@ async def test_api_addon_uninstall(
assert test_file.exists()
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_api_addon_uninstall_remove_config(
api_client: TestClient,
coresys: CoreSys,
install_addon_example: Addon,
tmp_supervisor_data,
path_extern,
api_client: TestClient, coresys: CoreSys, install_addon_example: Addon
):
"""Test uninstall and remove config."""
install_addon_example.data["map"].append(
@@ -400,13 +382,12 @@ async def test_api_addon_uninstall_remove_config(
assert not test_folder.exists()
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_api_addon_system_managed(
api_client: TestClient,
coresys: CoreSys,
install_addon_example: Addon,
caplog: pytest.LogCaptureFixture,
tmp_supervisor_data,
path_extern,
):
"""Test setting system managed for an addon."""
install_addon_example.data["ingress"] = False
@@ -583,9 +564,8 @@ async def test_addon_reset_options(
assert install_addon_example.persist["options"] == {}
async def test_addon_set_options_error(
api_client: TestClient, install_addon_example: Addon
):
@pytest.mark.usefixtures("install_addon_example")
async def test_addon_set_options_error(api_client: TestClient):
"""Test setting options for an addon."""
resp = await api_client.post(
"/addons/local_example/options", json={"options": {"message": True}}
@@ -679,7 +659,9 @@ async def test_addon_write_stdin_not_supported_error(api_client: TestClient):
async def test_addon_rebuild_fails_error(api_client: TestClient, coresys: CoreSys):
"""Test error when build fails during rebuild for addon."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
coresys.docker.containers_legacy.run.side_effect = DockerException("fail")
coresys.docker.containers.create.side_effect = aiodocker.DockerError(
500, {"message": "fail"}
)
with (
patch.object(
@@ -688,7 +670,9 @@ async def test_addon_rebuild_fails_error(api_client: TestClient, coresys: CoreSy
patch.object(
CpuArchManager, "default", new=PropertyMock(return_value="aarch64")
),
patch.object(AddonBuild, "get_docker_args", return_value={}),
patch.object(
AddonBuild, "get_docker_args", return_value={"command": ["build"]}
),
):
resp = await api_client.post("/addons/local_ssh/rebuild")
assert resp.status == 500
+6 -3
View File
@@ -6,6 +6,7 @@ from shutil import copy
from typing import Any
from unittest.mock import ANY, AsyncMock, MagicMock, PropertyMock, patch
from aiodocker.containers import DockerContainer
from aiohttp import MultipartWriter
from aiohttp.test_utils import TestClient
from awesomeversion import AwesomeVersion
@@ -1496,14 +1497,16 @@ async def test_immediate_list_after_missing_file_restore(
@pytest.mark.parametrize("command", ["backup_pre", "backup_post"])
@pytest.mark.usefixtures("install_addon_example", "tmp_supervisor_data")
async def test_pre_post_backup_command_error(
api_client: TestClient, coresys: CoreSys, container: MagicMock, command: str
api_client: TestClient, coresys: CoreSys, container: DockerContainer, command: str
):
"""Test pre/post backup command error."""
await coresys.core.set_state(CoreState.RUNNING)
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
container.status = "running"
container.exec_run.return_value = (1, b"")
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
container.exec.return_value.inspect.return_value = {"ExitCode": 1}
with patch.object(Addon, command, new=PropertyMock(return_value="test")):
resp = await api_client.post(
"/backups/new/partial", json={"addons": ["local_example"]}
+96 -19
View File
@@ -2,14 +2,15 @@
import asyncio
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
from unittest.mock import AsyncMock, PropertyMock, patch
from aiodocker.containers import DockerContainer
from aiohttp.test_utils import TestClient
from awesomeversion import AwesomeVersion
import pytest
from supervisor.backups.manager import BackupManager
from supervisor.const import CoreState
from supervisor.const import DNS_SUFFIX, CoreState
from supervisor.coresys import CoreSys
from supervisor.docker.homeassistant import DockerHomeAssistant
from supervisor.docker.interface import DockerInterface
@@ -33,11 +34,12 @@ async def test_api_core_logs(
)
async def test_api_stats(api_client: TestClient, coresys: CoreSys):
async def test_api_stats(api_client: TestClient, container: DockerContainer):
"""Test stats."""
coresys.docker.containers_legacy.get.return_value.status = "running"
coresys.docker.containers_legacy.get.return_value.stats.return_value = (
load_json_fixture("container_stats.json")
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
container.stats = AsyncMock(
return_value=[load_json_fixture("container_stats.json")]
)
resp = await api_client.get("/homeassistant/stats")
@@ -49,7 +51,7 @@ async def test_api_stats(api_client: TestClient, coresys: CoreSys):
assert result["data"]["memory_percent"] == 1.49
async def test_api_set_options(api_client: TestClient, coresys: CoreSys):
async def test_api_set_options(api_client: TestClient):
"""Test setting options for homeassistant."""
resp = await api_client.get("/homeassistant/info")
assert resp.status == 200
@@ -103,9 +105,7 @@ async def test_api_set_image(api_client: TestClient, coresys: CoreSys):
async def test_api_restart(
api_client: TestClient,
container: MagicMock,
tmp_supervisor_data: Path,
api_client: TestClient, container: DockerContainer, tmp_supervisor_data: Path
):
"""Test restarting homeassistant."""
safe_mode_marker = tmp_supervisor_data / "homeassistant" / "safe-mode"
@@ -123,12 +123,12 @@ async def test_api_restart(
assert safe_mode_marker.exists()
@pytest.mark.usefixtures("path_extern")
async def test_api_rebuild(
api_client: TestClient,
coresys: CoreSys,
container: MagicMock,
container: DockerContainer,
tmp_supervisor_data: Path,
path_extern,
):
"""Test rebuilding homeassistant."""
coresys.homeassistant.version = AwesomeVersion("2023.09.0")
@@ -137,23 +137,21 @@ async def test_api_rebuild(
with patch.object(HomeAssistantCore, "_block_till_run"):
await api_client.post("/homeassistant/rebuild")
assert container.remove.call_count == 2
coresys.docker.containers.create.return_value.start.assert_called_once()
assert container.delete.call_count == 2
container.start.assert_called_once()
assert not safe_mode_marker.exists()
with patch.object(HomeAssistantCore, "_block_till_run"):
await api_client.post("/homeassistant/rebuild", json={"safe_mode": True})
assert container.remove.call_count == 4
assert coresys.docker.containers.create.return_value.start.call_count == 2
assert container.delete.call_count == 4
assert container.start.call_count == 2
assert safe_mode_marker.exists()
@pytest.mark.parametrize("action", ["rebuild", "restart", "stop", "update"])
async def test_migration_blocks_stopping_core(
api_client: TestClient,
coresys: CoreSys,
action: str,
api_client: TestClient, coresys: CoreSys, action: str
):
"""Test that an offline db migration in progress stops users from stopping/restarting core."""
coresys.homeassistant.api.get_api_state.return_value = APIState("NOT_RUNNING", True)
@@ -359,3 +357,82 @@ async def test_api_progress_updates_home_assistant_update(
"done": True,
},
]
@pytest.mark.usefixtures("path_extern")
async def test_config_check(
api_client: TestClient, coresys: CoreSys, container: DockerContainer
):
"""Test config check API."""
coresys.homeassistant.version = AwesomeVersion("2025.1.0")
result = await api_client.post("/core/check")
assert result.status == 200
coresys.docker.containers.create.assert_called_once_with(
{
"Image": "ghcr.io/home-assistant/qemux86-64-homeassistant:2025.1.0",
"Labels": {"supervisor_managed": ""},
"OpenStdin": False,
"StdinOnce": False,
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"HostConfig": {
"NetworkMode": "hassio",
"Init": True,
"Privileged": True,
"Mounts": [
{
"Type": "bind",
"Source": "/mnt/data/supervisor/homeassistant",
"Target": "/config",
"ReadOnly": False,
},
{
"Type": "bind",
"Source": "/mnt/data/supervisor/ssl",
"Target": "/ssl",
"ReadOnly": True,
},
{
"Type": "bind",
"Source": "/mnt/data/supervisor/share",
"Target": "/share",
"ReadOnly": False,
},
],
"Dns": [str(coresys.docker.network.dns)],
"DnsSearch": [DNS_SUFFIX],
"DnsOptions": ["timeout:10"],
},
"Env": ["TZ=Etc/UTC"],
"Entrypoint": [],
"Cmd": [
"python3",
"-m",
"homeassistant",
"-c",
"/config",
"--script",
"check_config",
],
},
name=None,
)
container.start.assert_called_once()
@pytest.mark.usefixtures("path_extern")
async def test_config_check_error(api_client: TestClient, container: DockerContainer):
"""Test config check API strips color coding from log output on error."""
container.log.return_value = [
"\x1b[36mTest logs 1\x1b[0m",
"\x1b[36mTest logs 2\x1b[0m",
]
container.wait.return_value = {"StatusCode": 1}
result = await api_client.post("/core/check")
assert result.status == 400
resp = await result.json()
assert resp["message"] == "Test logs 1\nTest logs 2"
+7 -6
View File
@@ -2,8 +2,9 @@
import asyncio
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
from unittest.mock import AsyncMock, PropertyMock, patch
from aiodocker.containers import DockerContainer
from aiohttp.test_utils import TestClient
from awesomeversion import AwesomeVersion
import pytest
@@ -202,18 +203,18 @@ async def test_api_store_repair_repository_git_error(
)
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_api_store_update_healthcheck(
api_client: TestClient,
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
tmp_supervisor_data,
path_extern,
container: DockerContainer,
):
"""Test updating an addon with healthcheck waits for health status."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
container.status = "running"
container.attrs["Config"] = {"Healthcheck": "exists"}
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
container.show.return_value["Config"] = {"Healthcheck": "exists"}
install_addon_ssh.path_data.mkdir()
await install_addon_ssh.load()
with patch(
+14 -7
View File
@@ -4,10 +4,11 @@
import time
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
import aiodocker
from aiodocker.containers import DockerContainer
from aiohttp.test_utils import TestClient
from awesomeversion import AwesomeVersion
from blockbuster import BlockingError
from docker.errors import DockerException
import pytest
from supervisor.const import CoreState
@@ -410,11 +411,12 @@ async def test_api_progress_updates_supervisor_update(
]
async def test_api_supervisor_stats(api_client: TestClient, coresys: CoreSys):
async def test_api_supervisor_stats(api_client: TestClient, container: DockerContainer):
"""Test supervisor stats."""
coresys.docker.containers_legacy.get.return_value.status = "running"
coresys.docker.containers_legacy.get.return_value.stats.return_value = (
load_json_fixture("container_stats.json")
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
container.stats = AsyncMock(
return_value=[load_json_fixture("container_stats.json")]
)
resp = await api_client.get("/supervisor/stats")
@@ -430,7 +432,9 @@ async def test_supervisor_api_stats_failure(
api_client: TestClient, coresys: CoreSys, caplog: pytest.LogCaptureFixture
):
"""Test supervisor stats failure."""
coresys.docker.containers_legacy.get.side_effect = DockerException("fail")
coresys.docker.containers.get.side_effect = aiodocker.DockerError(
500, {"message": "fail"}
)
resp = await api_client.get("/supervisor/stats")
assert resp.status == 500
@@ -441,4 +445,7 @@ async def test_supervisor_api_stats_failure(
)
assert body["error_key"] == "supervisor_unknown_error"
assert body["extra_fields"] == {"logs_command": "ha supervisor logs"}
assert "Could not inspect container 'hassio_supervisor': fail" in caplog.text
assert (
"Could not inspect container 'hassio_supervisor': DockerError(500, 'fail')"
in caplog.text
)
+132 -195
View File
@@ -8,6 +8,7 @@ from shutil import copy, rmtree
from types import SimpleNamespace
from unittest.mock import ANY, AsyncMock, MagicMock, Mock, PropertyMock, patch
from aiodocker.containers import DockerContainer
from awesomeversion import AwesomeVersion
from dbus_fast import DBusError
import pytest
@@ -98,9 +99,8 @@ async def test_do_backup_full_with_filename(
assert coresys.core.state == CoreState.RUNNING
async def test_do_backup_full_uncompressed(
coresys: CoreSys, backup_mock, install_addon_ssh
):
@pytest.mark.usefixtures("backup_mock")
async def test_do_backup_full_uncompressed(coresys: CoreSys, install_addon_ssh: Addon):
"""Test creating Backup."""
await coresys.core.set_state(CoreState.RUNNING)
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
@@ -128,9 +128,8 @@ async def test_do_backup_full_uncompressed(
assert coresys.core.state == CoreState.RUNNING
async def test_do_backup_partial_minimal(
coresys: CoreSys, backup_mock, install_addon_ssh
):
@pytest.mark.usefixtures("backup_mock", "install_addon_ssh")
async def test_do_backup_partial_minimal(coresys: CoreSys):
"""Test creating minimal partial Backup."""
await coresys.core.set_state(CoreState.RUNNING)
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
@@ -155,9 +154,8 @@ async def test_do_backup_partial_minimal(
assert coresys.core.state == CoreState.RUNNING
async def test_do_backup_partial_minimal_uncompressed(
coresys: CoreSys, backup_mock, install_addon_ssh
):
@pytest.mark.usefixtures("backup_mock", "install_addon_ssh")
async def test_do_backup_partial_minimal_uncompressed(coresys: CoreSys):
"""Test creating minimal partial Backup."""
await coresys.core.set_state(CoreState.RUNNING)
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
@@ -184,9 +182,8 @@ async def test_do_backup_partial_minimal_uncompressed(
assert coresys.core.state == CoreState.RUNNING
async def test_do_backup_partial_maximal(
coresys: CoreSys, backup_mock, install_addon_ssh
):
@pytest.mark.usefixtures("backup_mock")
async def test_do_backup_partial_maximal(coresys: CoreSys, install_addon_ssh: Addon):
"""Test creating maximal partial Backup."""
await coresys.core.set_state(CoreState.RUNNING)
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
@@ -218,8 +215,9 @@ async def test_do_backup_partial_maximal(
assert coresys.core.state == CoreState.RUNNING
@pytest.mark.usefixtures("supervisor_internet")
async def test_do_restore_full(
coresys: CoreSys, supervisor_internet, full_backup_mock, install_addon_ssh
coresys: CoreSys, full_backup_mock: Backup, install_addon_ssh: Addon
):
"""Test restoring full Backup."""
await coresys.core.set_state(CoreState.RUNNING)
@@ -249,8 +247,9 @@ async def test_do_restore_full(
assert coresys.core.state == CoreState.RUNNING
@pytest.mark.usefixtures("supervisor_internet")
async def test_do_restore_full_different_addon(
coresys: CoreSys, supervisor_internet, full_backup_mock, install_addon_ssh
coresys: CoreSys, full_backup_mock: Backup, install_addon_ssh: Addon
):
"""Test restoring full Backup with different addons than installed."""
await coresys.core.set_state(CoreState.RUNNING)
@@ -281,8 +280,9 @@ async def test_do_restore_full_different_addon(
assert coresys.core.state == CoreState.RUNNING
@pytest.mark.usefixtures("supervisor_internet", "install_addon_ssh")
async def test_do_restore_partial_minimal(
coresys: CoreSys, supervisor_internet, partial_backup_mock, install_addon_ssh
coresys: CoreSys, partial_backup_mock: Backup
):
"""Test restoring partial Backup minimal."""
await coresys.core.set_state(CoreState.RUNNING)
@@ -306,8 +306,9 @@ async def test_do_restore_partial_minimal(
assert coresys.core.state == CoreState.RUNNING
@pytest.mark.usefixtures("supervisor_internet")
async def test_do_restore_partial_maximal(
coresys: CoreSys, supervisor_internet, partial_backup_mock
coresys: CoreSys, partial_backup_mock: Backup
):
"""Test restoring partial Backup minimal."""
await coresys.core.set_state(CoreState.RUNNING)
@@ -337,9 +338,9 @@ async def test_do_restore_partial_maximal(
assert coresys.core.state == CoreState.RUNNING
@pytest.mark.usefixtures("supervisor_internet")
async def test_fail_invalid_full_backup(
coresys: CoreSys,
supervisor_internet,
full_backup_mock: MagicMock,
partial_backup_mock: MagicMock,
):
@@ -372,8 +373,9 @@ async def test_fail_invalid_full_backup(
await manager.do_restore_full(backup_instance)
@pytest.mark.usefixtures("supervisor_internet")
async def test_fail_invalid_partial_backup(
coresys: CoreSys, supervisor_internet, partial_backup_mock: MagicMock
coresys: CoreSys, partial_backup_mock: MagicMock
):
"""Test restore fails with invalid backup."""
await coresys.core.set_state(CoreState.RUNNING)
@@ -406,12 +408,8 @@ async def test_fail_invalid_partial_backup(
await manager.do_restore_partial(backup_instance)
async def test_backup_error_homeassistant(
coresys: CoreSys,
backup_mock: MagicMock,
install_addon_ssh: Addon,
capture_exception: Mock,
):
@pytest.mark.usefixtures("install_addon_ssh", "capture_exception")
async def test_backup_error_homeassistant(coresys: CoreSys, backup_mock: MagicMock):
"""Test error collected and file deleted when Home Assistant Core backup fails."""
await coresys.core.set_state(CoreState.RUNNING)
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
@@ -434,11 +432,9 @@ async def test_backup_error_homeassistant(
backup_instance.tarfile.unlink.assert_called_once()
@pytest.mark.usefixtures("install_addon_ssh")
async def test_backup_error_capture(
coresys: CoreSys,
backup_mock: MagicMock,
install_addon_ssh: Addon,
capture_exception: Mock,
coresys: CoreSys, backup_mock: MagicMock, capture_exception: Mock
):
"""Test error captured when backup fails."""
await coresys.core.set_state(CoreState.RUNNING)
@@ -452,11 +448,9 @@ async def test_backup_error_capture(
capture_exception.assert_called_once_with(err)
@pytest.mark.usefixtures("supervisor_internet")
async def test_restore_error(
coresys: CoreSys,
supervisor_internet,
full_backup_mock: MagicMock,
capture_exception: Mock,
coresys: CoreSys, full_backup_mock: MagicMock, capture_exception: Mock
):
"""Test restoring full Backup with errors."""
await coresys.core.set_state(CoreState.RUNNING)
@@ -476,14 +470,15 @@ async def test_restore_error(
capture_exception.assert_called_once_with(err)
@pytest.mark.usefixtures(
"supervisor_internet",
"tmp_supervisor_data",
"path_extern",
"mount_propagation",
"mock_is_mount",
)
async def test_backup_media_with_mounts(
coresys: CoreSys,
supervisor_internet,
all_dbus_services: dict[str, DBusServiceMock],
tmp_supervisor_data,
path_extern,
mount_propagation,
mock_is_mount,
coresys: CoreSys, all_dbus_services: dict[str, DBusServiceMock]
):
"""Test backing up media folder with mounts."""
systemd_service: SystemdService = all_dbus_services["systemd"]
@@ -539,14 +534,15 @@ async def test_backup_media_with_mounts(
assert not mount_dir.exists()
@pytest.mark.usefixtures(
"supervisor_internet",
"tmp_supervisor_data",
"path_extern",
"mount_propagation",
"mock_is_mount",
)
async def test_backup_media_with_mounts_retains_files(
coresys: CoreSys,
supervisor_internet,
all_dbus_services: dict[str, DBusServiceMock],
tmp_supervisor_data,
path_extern,
mount_propagation,
mock_is_mount,
coresys: CoreSys, all_dbus_services: dict[str, DBusServiceMock]
):
"""Test backing up media folder with mounts retains mount files."""
systemd_service: SystemdService = all_dbus_services["systemd"]
@@ -594,14 +590,15 @@ async def test_backup_media_with_mounts_retains_files(
]
@pytest.mark.usefixtures(
"supervisor_internet",
"tmp_supervisor_data",
"path_extern",
"mount_propagation",
"mock_is_mount",
)
async def test_backup_share_with_mounts(
coresys: CoreSys,
supervisor_internet,
all_dbus_services: dict[str, DBusServiceMock],
tmp_supervisor_data,
path_extern,
mount_propagation,
mock_is_mount,
coresys: CoreSys, all_dbus_services: dict[str, DBusServiceMock]
):
"""Test backing up share folder with mounts."""
systemd_service: SystemdService = all_dbus_services["systemd"]
@@ -664,14 +661,14 @@ async def test_backup_share_with_mounts(
assert not mount_dir.exists()
async def test_full_backup_to_mount(
coresys: CoreSys,
supervisor_internet,
tmp_supervisor_data,
path_extern,
mount_propagation,
mock_is_mount,
):
@pytest.mark.usefixtures(
"supervisor_internet",
"tmp_supervisor_data",
"path_extern",
"mount_propagation",
"mock_is_mount",
)
async def test_full_backup_to_mount(coresys: CoreSys):
"""Test full backup to and restoring from a mount."""
(marker := coresys.config.path_homeassistant / "test.txt").touch()
@@ -711,14 +708,14 @@ async def test_full_backup_to_mount(
assert marker.exists()
async def test_partial_backup_to_mount(
coresys: CoreSys,
supervisor_internet,
tmp_supervisor_data,
path_extern,
mount_propagation,
mock_is_mount,
):
@pytest.mark.usefixtures(
"supervisor_internet",
"tmp_supervisor_data",
"path_extern",
"mount_propagation",
"mock_is_mount",
)
async def test_partial_backup_to_mount(coresys: CoreSys):
"""Test partial backup to and restoring from a mount."""
(marker := coresys.config.path_homeassistant / "test.txt").touch()
@@ -767,13 +764,8 @@ async def test_partial_backup_to_mount(
assert marker.exists()
async def test_backup_to_down_mount_error(
coresys: CoreSys,
mock_is_mount: MagicMock,
tmp_supervisor_data,
path_extern,
mount_propagation,
):
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern", "mount_propagation")
async def test_backup_to_down_mount_error(coresys: CoreSys, mock_is_mount: MagicMock):
"""Test backup to mount when down raises error."""
# Add a backup mount
(mount_dir := coresys.config.path_mounts / "backup_test").mkdir()
@@ -804,13 +796,10 @@ async def test_backup_to_down_mount_error(
)
async def test_backup_to_local_with_default(
coresys: CoreSys,
tmp_supervisor_data,
path_extern,
mount_propagation,
mock_is_mount,
):
@pytest.mark.usefixtures(
"tmp_supervisor_data", "path_extern", "mount_propagation", "mock_is_mount"
)
async def test_backup_to_local_with_default(coresys: CoreSys):
"""Test making backup to local when a default mount is specified."""
# Add a default backup mount
await coresys.mounts.load()
@@ -843,13 +832,10 @@ async def test_backup_to_local_with_default(
assert (coresys.config.path_backup / f"{backup.slug}.tar").exists()
async def test_backup_to_default(
coresys: CoreSys,
tmp_supervisor_data,
path_extern,
mount_propagation,
mock_is_mount,
):
@pytest.mark.usefixtures(
"tmp_supervisor_data", "path_extern", "mount_propagation", "mock_is_mount"
)
async def test_backup_to_default(coresys: CoreSys):
"""Test making backup to default mount."""
# Add a default backup mount
(mount_dir := coresys.config.path_mounts / "backup_test").mkdir()
@@ -883,12 +869,9 @@ async def test_backup_to_default(
assert (mount_dir / f"{backup.slug}.tar").exists()
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern", "mount_propagation")
async def test_backup_to_default_mount_down_error(
coresys: CoreSys,
mock_is_mount: MagicMock,
tmp_supervisor_data,
path_extern,
mount_propagation,
coresys: CoreSys, mock_is_mount: MagicMock
):
"""Test making backup to default mount when it is down."""
# Add a default backup mount
@@ -916,13 +899,9 @@ async def test_backup_to_default_mount_down_error(
await coresys.backups.do_backup_partial("test", homeassistant=True)
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern", "mount_propagation")
async def test_load_network_error(
coresys: CoreSys,
caplog: pytest.LogCaptureFixture,
mock_is_mount: MagicMock,
tmp_supervisor_data,
path_extern,
mount_propagation,
coresys: CoreSys, caplog: pytest.LogCaptureFixture, mock_is_mount: MagicMock
):
"""Test load of backup manager when there is a network error."""
(coresys.config.path_mounts / "backup_test").mkdir()
@@ -951,16 +930,14 @@ async def test_load_network_error(
assert "Could not list backups from /data/backup_test" in caplog.text
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_backup_with_healthcheck(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
tmp_supervisor_data,
path_extern,
coresys: CoreSys, install_addon_ssh: Addon, container: DockerContainer
):
"""Test backup of addon with healthcheck in cold mode."""
container.status = "running"
container.attrs["Config"] = {"Healthcheck": "exists"}
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
container.show.return_value["Config"] = {"Healthcheck": "exists"}
install_addon_ssh.path_data.mkdir()
await coresys.core.set_state(CoreState.RUNNING)
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
@@ -1029,17 +1006,14 @@ async def test_backup_with_healthcheck(
await _container_events_task
@pytest.mark.usefixtures("supervisor_internet", "tmp_supervisor_data", "path_extern")
async def test_restore_with_healthcheck(
coresys: CoreSys,
supervisor_internet,
install_addon_ssh: Addon,
container: MagicMock,
tmp_supervisor_data,
path_extern,
coresys: CoreSys, install_addon_ssh: Addon, container: DockerContainer
):
"""Test backup of addon with healthcheck in cold mode."""
container.status = "running"
container.attrs["Config"] = {"Healthcheck": "exists"}
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
container.show.return_value["Config"] = {"Healthcheck": "exists"}
install_addon_ssh.path_data.mkdir()
await coresys.core.set_state(CoreState.RUNNING)
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
@@ -1137,11 +1111,12 @@ def _make_backup_message_for_assert(
async def test_backup_progress(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
container: DockerContainer,
ha_ws_client: AsyncMock,
):
"""Test progress is tracked during backups."""
container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
install_addon_ssh.path_data.mkdir()
await coresys.core.set_state(CoreState.RUNNING)
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
@@ -1239,11 +1214,12 @@ async def test_backup_progress(
async def test_restore_progress(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
container: DockerContainer,
ha_ws_client: AsyncMock,
):
"""Test progress is tracked during backups."""
container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
install_addon_ssh.path_data.mkdir()
install_addon_ssh.state = AddonState.STARTED
await coresys.core.set_state(CoreState.RUNNING)
@@ -1365,7 +1341,8 @@ async def test_restore_progress(
),
]
container.status = "stopped"
container.show.return_value["State"]["Status"] = "stopped"
container.show.return_value["State"]["Running"] = False
install_addon_ssh.state = AddonState.STOPPED
addon_backup: Backup = await coresys.backups.do_backup_partial(addons=["local_ssh"])
@@ -1417,15 +1394,15 @@ async def test_restore_progress(
async def test_freeze_thaw(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
container: DockerContainer,
ha_ws_client: AsyncMock,
):
"""Test manual freeze and thaw for external snapshots."""
container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
install_addon_ssh.path_data.mkdir()
await coresys.core.set_state(CoreState.RUNNING)
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
container.exec_run.return_value = (0, None)
ha_ws_client.ha_version = AwesomeVersion("2022.1.0")
with (
@@ -1438,7 +1415,7 @@ async def test_freeze_thaw(
):
# Run the freeze
await coresys.backups.freeze_all()
container.exec_run.assert_called_once_with("pre_backup")
container.exec.assert_called_once_with("pre_backup")
assert coresys.core.state == CoreState.FREEZE
await asyncio.sleep(0)
@@ -1473,10 +1450,10 @@ async def test_freeze_thaw(
]
# Release the thaw task
container.exec_run.reset_mock()
container.exec.reset_mock()
ha_ws_client.async_send_command.reset_mock()
await coresys.backups.thaw_all()
container.exec_run.assert_called_once_with("post_backup")
container.exec.assert_called_once_with("post_backup")
assert coresys.core.state == CoreState.RUNNING
await asyncio.sleep(0)
@@ -1505,12 +1482,9 @@ async def test_freeze_thaw(
]
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_freeze_thaw_timeout(
coresys: CoreSys,
ha_ws_client: AsyncMock,
caplog: pytest.LogCaptureFixture,
tmp_supervisor_data,
path_extern,
coresys: CoreSys, ha_ws_client: AsyncMock, caplog: pytest.LogCaptureFixture
):
"""Test manual freeze ends due to timeout expiration."""
await coresys.core.set_state(CoreState.RUNNING)
@@ -1542,12 +1516,9 @@ async def test_cannot_manually_thaw_normal_freeze(coresys: CoreSys):
await coresys.backups.thaw_all()
@pytest.mark.usefixtures("supervisor_internet", "tmp_supervisor_data", "path_extern")
async def test_restore_only_reloads_ingress_on_change(
coresys: CoreSys,
supervisor_internet,
install_addon_ssh: Addon,
tmp_supervisor_data,
path_extern,
coresys: CoreSys, install_addon_ssh: Addon
):
"""Test restore only tells core to reload ingress when something has changed."""
install_addon_ssh.path_data.mkdir()
@@ -1603,14 +1574,8 @@ async def test_restore_only_reloads_ingress_on_change(
make_request.assert_called_once_with("post", "api/hassio_push/panel/local_ssh")
async def test_restore_new_addon(
coresys: CoreSys,
supervisor_internet,
install_addon_example: Addon,
container: MagicMock,
tmp_supervisor_data,
path_extern,
):
@pytest.mark.usefixtures("supervisor_internet", "tmp_supervisor_data", "path_extern")
async def test_restore_new_addon(coresys: CoreSys, install_addon_example: Addon):
"""Test restore installing new addon."""
await coresys.core.set_state(CoreState.RUNNING)
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
@@ -1635,13 +1600,9 @@ async def test_restore_new_addon(
assert install_addon_example.path_config.exists()
@pytest.mark.usefixtures("supervisor_internet", "tmp_supervisor_data", "path_extern")
async def test_restore_preserves_data_config(
coresys: CoreSys,
supervisor_internet,
install_addon_example: Addon,
container: MagicMock,
tmp_supervisor_data,
path_extern,
coresys: CoreSys, install_addon_example: Addon
):
"""Test restore preserves data and config."""
await coresys.core.set_state(CoreState.RUNNING)
@@ -1673,13 +1634,11 @@ async def test_restore_preserves_data_config(
assert not test_config2.exists()
@pytest.mark.usefixtures(
"tmp_supervisor_data", "path_extern", "mount_propagation", "mock_is_mount"
)
async def test_backup_to_mount_bypasses_free_space_condition(
coresys: CoreSys,
all_dbus_services: dict[str, DBusServiceMock],
tmp_supervisor_data,
path_extern,
mount_propagation,
mock_is_mount,
coresys: CoreSys, all_dbus_services: dict[str, DBusServiceMock]
):
"""Test backing up to a mount bypasses the check on local free space."""
await coresys.core.set_state(CoreState.RUNNING)
@@ -1726,13 +1685,9 @@ async def test_backup_to_mount_bypasses_free_space_condition(
"partial_backup,exclude_db_setting",
[(False, True), (True, True), (False, False), (True, False)],
)
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_skip_homeassistant_database(
coresys: CoreSys,
container: MagicMock,
partial_backup: bool,
exclude_db_setting: bool | None,
tmp_supervisor_data,
path_extern,
coresys: CoreSys, partial_backup: bool, exclude_db_setting: bool | None
):
"""Test exclude database option skips database in backup."""
await coresys.core.set_state(CoreState.RUNNING)
@@ -1830,14 +1785,13 @@ async def test_backup_remove_error(
"error_path,healthy_expected",
[(Path("/data/backup"), False), (Path("/data/mounts/backup_test"), True)],
)
@pytest.mark.usefixtures("path_extern", "mount_propagation")
async def test_reload_error(
coresys: CoreSys,
caplog: pytest.LogCaptureFixture,
error_path: Path,
healthy_expected: bool,
mock_is_mount: MagicMock,
path_extern,
mount_propagation,
):
"""Test error during reload."""
err = OSError()
@@ -1882,12 +1836,9 @@ async def test_reload_error(
assert coresys.core.healthy is healthy_expected
@pytest.mark.usefixtures("supervisor_internet", "install_addon_ssh")
async def test_monitoring_after_full_restore(
coresys: CoreSys,
supervisor_internet,
full_backup_mock,
install_addon_ssh,
container,
coresys: CoreSys, full_backup_mock: Backup
):
"""Test monitoring of addon state still works after full restore."""
await coresys.core.set_state(CoreState.RUNNING)
@@ -1907,12 +1858,9 @@ async def test_monitoring_after_full_restore(
coresys.docker.unload.assert_not_called()
@pytest.mark.usefixtures("supervisor_internet", "install_addon_ssh")
async def test_monitoring_after_partial_restore(
coresys: CoreSys,
supervisor_internet,
partial_backup_mock,
install_addon_ssh,
container,
coresys: CoreSys, partial_backup_mock: Backup
):
"""Test monitoring of addon state still works after full restore."""
await coresys.core.set_state(CoreState.RUNNING)
@@ -1939,13 +1887,12 @@ async def test_monitoring_after_partial_restore(
{"code": "unknown_command", "message": "Unknown command."},
],
)
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_core_pre_backup_actions_failed(
coresys: CoreSys,
ha_ws_client: AsyncMock,
caplog: pytest.LogCaptureFixture,
pre_backup_error: dict[str, str],
tmp_supervisor_data,
path_extern,
):
"""Test pre-backup actions failed in HA core stops backup."""
await coresys.core.set_state(CoreState.RUNNING)
@@ -2111,10 +2058,8 @@ async def test_backup_remove_one_location_of_multiple(coresys: CoreSys):
}
@pytest.mark.usefixtures("tmp_supervisor_data")
async def test_addon_backup_excludes(
coresys: CoreSys, supervisor_internet, install_addon_example: Addon
):
@pytest.mark.usefixtures("tmp_supervisor_data", "supervisor_internet")
async def test_addon_backup_excludes(coresys: CoreSys, install_addon_example: Addon):
"""Test backup excludes option for addons."""
await coresys.core.set_state(CoreState.RUNNING)
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
@@ -2140,9 +2085,7 @@ async def test_addon_backup_excludes(
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_remove_non_existing_backup_raises(
coresys: CoreSys,
):
async def test_remove_non_existing_backup_raises(coresys: CoreSys):
"""Test removing a backup error."""
location: LOCATION_TYPE = None
backup_base_path = coresys.backups._get_base_path(location) # pylint: disable=protected-access
@@ -2200,10 +2143,7 @@ async def test_backup_multiple_locations_oserror(
@pytest.mark.parametrize("same_mount", [True, False])
async def test_get_upload_path_for_backup_location(
coresys: CoreSys,
same_mount,
):
async def test_get_upload_path_for_backup_location(coresys: CoreSys, same_mount: bool):
"""Test get_upload_path_for_location with local backup location."""
manager = BackupManager(coresys)
@@ -2231,13 +2171,10 @@ async def test_get_upload_path_for_backup_location(
assert result == target_path
async def test_get_upload_path_for_mount_location(
coresys: CoreSys,
tmp_supervisor_data,
path_extern,
mount_propagation,
mock_is_mount,
):
@pytest.mark.usefixtures(
"tmp_supervisor_data", "path_extern", "mount_propagation", "mock_is_mount"
)
async def test_get_upload_path_for_mount_location(coresys: CoreSys):
"""Test get_upload_path_for_location with a Mount location."""
manager = BackupManager(coresys)
+34 -37
View File
@@ -11,6 +11,7 @@ from uuid import uuid4
from aiodocker.containers import DockerContainer, DockerContainers
from aiodocker.docker import DockerImages
from aiodocker.execs import Exec
from aiohttp import ClientSession, web
from aiohttp.test_utils import TestClient
from awesomeversion import AwesomeVersion
@@ -66,6 +67,10 @@ from .common import (
)
from .const import TEST_ADDON_SLUG
from .dbus_service_mocks.base import DBusServiceMock
from .dbus_service_mocks.network_active_connection import (
DEFAULT_OBJECT_PATH as DEFAULT_ACTIVE_CONNECTION_OBJECT_PATH,
ActiveConnection as ActiveConnectionService,
)
from .dbus_service_mocks.network_connection_settings import (
DEFAULT_OBJECT_PATH as DEFAULT_CONNECTION_SETTINGS_OBJECT_PATH,
ConnectionSettings as ConnectionSettingsService,
@@ -73,11 +78,6 @@ from .dbus_service_mocks.network_connection_settings import (
from .dbus_service_mocks.network_dns_manager import DnsManager as DnsManagerService
from .dbus_service_mocks.network_manager import NetworkManager as NetworkManagerService
from tests.dbus_service_mocks.network_active_connection import (
DEFAULT_OBJECT_PATH as DEFAULT_ACTIVE_CONNECTION_OBJECT_PATH,
ActiveConnection as ActiveConnectionService,
)
# pylint: disable=redefined-outer-name, protected-access
@@ -121,14 +121,13 @@ async def docker() -> DockerAPI:
"Id": "test123",
"RepoTags": ["ghcr.io/home-assistant/amd64-hassio-supervisor:latest"],
}
container_inspect = image_inspect | {"State": {"ExitCode": 0}}
container_inspect = image_inspect | {
"State": {"ExitCode": 0, "Status": "stopped", "Running": False},
"Image": "abc123",
}
with (
patch("supervisor.docker.manager.DockerClient", return_value=MagicMock()),
patch(
"supervisor.docker.manager.DockerAPI.containers_legacy",
return_value=MagicMock(),
),
patch("supervisor.docker.manager.DockerAPI.api", return_value=MagicMock()),
patch("supervisor.docker.manager.DockerAPI.info", return_value=MagicMock()),
patch("supervisor.docker.manager.DockerAPI.unload"),
@@ -159,11 +158,17 @@ async def docker() -> DockerAPI:
docker_images.pull.return_value = AsyncIterator([{}])
docker_containers.get.return_value = docker_container = MagicMock(
spec=DockerContainer
spec=DockerContainer, id=container_inspect["Id"]
)
docker_containers.list.return_value = [docker_container]
docker_containers.create.return_value = docker_container
docker_container.show.return_value = container_inspect
docker_container.wait.return_value = {"StatusCode": 0}
docker_container.log = AsyncMock(return_value=[])
docker_container.exec.return_value = docker_exec = MagicMock(spec=Exec)
docker_exec.start = AsyncMock(return_value=b"")
docker_exec.inspect.return_value = {"ExitCode": 0}
docker_obj.info.logging = "journald"
docker_obj.info.storage = "overlay2"
@@ -805,7 +810,7 @@ async def docker_logs(docker: DockerAPI, supervisor_name) -> MagicMock:
"""Mock log output for a container from docker."""
container_mock = MagicMock()
container_mock.logs.return_value = load_binary_fixture("logs_docker_container.txt")
docker.containers_legacy.get.return_value = container_mock
docker.dockerpy.containers.get.return_value = container_mock
yield container_mock.logs
@@ -837,34 +842,26 @@ async def os_available(request: pytest.FixtureRequest) -> None:
@pytest.fixture
async def mount_propagation(docker: DockerAPI, coresys: CoreSys) -> None:
"""Mock supervisor connected to container with propagation set."""
docker.containers_legacy.get.return_value = supervisor = MagicMock()
supervisor.attrs = {
"Mounts": [
{
"Type": "bind",
"Source": "/mnt/data/supervisor",
"Destination": "/data",
"Mode": "rw",
"RW": True,
"Propagation": "slave",
}
]
}
await coresys.supervisor.load()
yield
async def container(docker: DockerAPI) -> DockerContainer:
"""Mock attrs and status for container on attach."""
yield docker.containers.get.return_value
@pytest.fixture
async def container(docker: DockerAPI) -> MagicMock:
"""Mock attrs and status for container on attach."""
attrs = {"State": {"ExitCode": 0}}
docker.containers_legacy.get.return_value = addon = MagicMock(
status="stopped", attrs=attrs
)
docker.containers.create.return_value.show.return_value = attrs
yield addon
async def mount_propagation(container: DockerContainer, coresys: CoreSys) -> None:
"""Mock supervisor connected to container with propagation set."""
container.show.return_value["Mounts"] = [
{
"Type": "bind",
"Source": "/mnt/data/supervisor",
"Destination": "/data",
"Mode": "rw",
"RW": True,
"Propagation": "slave",
}
]
await coresys.supervisor.load()
yield
@pytest.fixture
+8 -3
View File
@@ -2,8 +2,9 @@
from ipaddress import IPv4Address
from pathlib import Path
from unittest.mock import MagicMock, patch
from unittest.mock import patch
from aiodocker.containers import DockerContainer
import pytest
from supervisor.coresys import CoreSys
@@ -14,12 +15,16 @@ from . import DEV_MOUNT
@pytest.mark.usefixtures("path_extern")
async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, container: MagicMock):
async def test_start(
coresys: CoreSys, tmp_supervisor_data: Path, container: DockerContainer
):
"""Test starting audio plugin."""
config_file = tmp_supervisor_data / "audio" / "pulse_audio.json"
assert not config_file.exists()
with patch.object(DockerAPI, "run", return_value=container.attrs) as run:
with patch.object(
DockerAPI, "run", return_value=container.show.return_value
) as run:
await coresys.plugins.audio.start()
run.assert_called_once()
+8 -3
View File
@@ -2,8 +2,9 @@
from ipaddress import IPv4Address
from pathlib import Path
from unittest.mock import MagicMock, patch
from unittest.mock import patch
from aiodocker.containers import DockerContainer
import pytest
from supervisor.coresys import CoreSys
@@ -12,12 +13,16 @@ from supervisor.docker.manager import DockerAPI
@pytest.mark.usefixtures("path_extern")
async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, container: MagicMock):
async def test_start(
coresys: CoreSys, tmp_supervisor_data: Path, container: DockerContainer
):
"""Test starting dns plugin."""
config_file = tmp_supervisor_data / "dns" / "coredns.json"
assert not config_file.exists()
with patch.object(DockerAPI, "run", return_value=container.attrs) as run:
with patch.object(
DockerAPI, "run", return_value=container.show.return_value
) as run:
await coresys.plugins.dns.start()
run.assert_called_once()
+10 -9
View File
@@ -1,8 +1,9 @@
"""Test Home Assistant container."""
from ipaddress import IPv4Address
from unittest.mock import ANY, MagicMock, patch
from unittest.mock import ANY, patch
from aiodocker.containers import DockerContainer
from awesomeversion import AwesomeVersion
import pytest
@@ -21,12 +22,12 @@ from . import DEV_MOUNT
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_homeassistant_start(coresys: CoreSys, container: MagicMock):
async def test_homeassistant_start(coresys: CoreSys, container: DockerContainer):
"""Test starting homeassistant."""
coresys.homeassistant.version = AwesomeVersion("2023.8.1")
with (
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
patch.object(DockerAPI, "run", return_value=container.show.return_value) as run,
patch.object(
DockerHomeAssistant, "is_running", side_effect=[False, False, True]
),
@@ -122,14 +123,14 @@ async def test_homeassistant_start(coresys: CoreSys, container: MagicMock):
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_homeassistant_start_with_duplicate_log_file(
coresys: CoreSys, container: MagicMock
coresys: CoreSys, container: DockerContainer
):
"""Test starting homeassistant with duplicate_log_file enabled."""
coresys.homeassistant.version = AwesomeVersion("2025.12.0")
coresys.homeassistant.duplicate_log_file = True
with (
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
patch.object(DockerAPI, "run", return_value=container.show.return_value) as run,
patch.object(
DockerHomeAssistant, "is_running", side_effect=[False, False, True]
),
@@ -144,12 +145,12 @@ async def test_homeassistant_start_with_duplicate_log_file(
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_landingpage_start(coresys: CoreSys, container: MagicMock):
async def test_landingpage_start(coresys: CoreSys, container: DockerContainer):
"""Test starting landingpage."""
coresys.homeassistant.version = LANDINGPAGE
with (
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
patch.object(DockerAPI, "run", return_value=container.show.return_value) as run,
patch.object(DockerHomeAssistant, "is_running", return_value=False),
):
await coresys.homeassistant.core.start()
@@ -202,7 +203,7 @@ async def test_landingpage_start(coresys: CoreSys, container: MagicMock):
assert "volumes" not in run.call_args.kwargs
async def test_timeout(coresys: CoreSys, container: MagicMock):
async def test_timeout(coresys: CoreSys, container: DockerContainer):
"""Test timeout for set from S6_SERVICES_GRACETIME."""
assert coresys.homeassistant.core.instance.timeout == 260
@@ -211,7 +212,7 @@ async def test_timeout(coresys: CoreSys, container: MagicMock):
assert coresys.homeassistant.core.instance.timeout == 260
# Set a mock value for env in attrs, see that it changes
container.attrs["Config"] = {
container.show.return_value["Config"] = {
"Env": [
"SUPERVISOR=172.30.32.2",
"HASSIO=172.30.32.2",
+40 -65
View File
@@ -2,16 +2,13 @@
import asyncio
from http import HTTPStatus
from pathlib import Path
from typing import Any
from unittest.mock import ANY, AsyncMock, MagicMock, Mock, PropertyMock, call, patch
import aiodocker
from aiodocker.containers import DockerContainer
from awesomeversion import AwesomeVersion
from docker.errors import DockerException, NotFound
from docker.models.containers import Container
import pytest
from requests import RequestException
from supervisor.addons.manager import Addon
from supervisor.const import BusEvent, CoreState, CpuArch
@@ -25,7 +22,6 @@ from supervisor.exceptions import (
DockerError,
DockerNoSpaceOnDevice,
DockerNotFound,
DockerRequestError,
)
from supervisor.homeassistant.const import WSEvent, WSType
from supervisor.jobs import ChildJobSyncFilter, JobSchedulerOptions, SupervisorJob
@@ -143,38 +139,31 @@ async def test_private_registry_credentials_passed_to_pull(
],
)
async def test_current_state(
coresys: CoreSys, attrs: dict[str, Any], expected: ContainerState
coresys: CoreSys,
container: DockerContainer,
attrs: dict[str, Any],
expected: ContainerState,
):
"""Test current state for container."""
container_collection = MagicMock()
container_collection.get.return_value = Container(attrs)
with patch(
"supervisor.docker.manager.DockerAPI.containers_legacy",
new=PropertyMock(return_value=container_collection),
):
assert await coresys.homeassistant.core.instance.current_state() == expected
container.show.return_value = attrs
assert await coresys.homeassistant.core.instance.current_state() == expected
async def test_current_state_failures(coresys: CoreSys):
"""Test failure states for current state."""
container_collection = MagicMock()
with patch(
"supervisor.docker.manager.DockerAPI.containers_legacy",
new=PropertyMock(return_value=container_collection),
):
container_collection.get.side_effect = NotFound("dne")
assert (
await coresys.homeassistant.core.instance.current_state()
== ContainerState.UNKNOWN
)
coresys.docker.containers.get.side_effect = aiodocker.DockerError(
404, {"message": "dne"}
)
assert (
await coresys.homeassistant.core.instance.current_state()
== ContainerState.UNKNOWN
)
container_collection.get.side_effect = DockerException()
with pytest.raises(DockerAPIError):
await coresys.homeassistant.core.instance.current_state()
container_collection.get.side_effect = RequestException()
with pytest.raises(DockerRequestError):
await coresys.homeassistant.core.instance.current_state()
coresys.docker.containers.get.side_effect = aiodocker.DockerError(
500, {"message": "fail"}
)
with pytest.raises(DockerAPIError):
await coresys.homeassistant.core.instance.current_state()
@pytest.mark.parametrize(
@@ -201,20 +190,15 @@ async def test_current_state_failures(coresys: CoreSys):
)
async def test_attach_existing_container(
coresys: CoreSys,
container: DockerContainer,
attrs: dict[str, Any],
expected: ContainerState,
fired_when_skip_down: bool,
):
"""Test attaching to existing container."""
attrs["Id"] = "abc123"
attrs["Config"] = {}
container_collection = MagicMock()
container_collection.get.return_value = Container(attrs)
container.id = "abc123"
container.show.return_value = {"Id": "abc123", "Config": {}} | attrs
with (
patch(
"supervisor.docker.manager.DockerAPI.containers_legacy",
new=PropertyMock(return_value=container_collection),
),
patch.object(type(coresys.bus), "fire_event") as fire_event,
patch("supervisor.docker.interface.time", return_value=1),
):
@@ -254,7 +238,9 @@ async def test_attach_existing_container(
async def test_attach_container_failure(coresys: CoreSys):
"""Test attach fails to find container but finds image."""
coresys.docker.containers_legacy.get.side_effect = DockerException()
coresys.docker.containers.get.side_effect = aiodocker.DockerError(
500, {"message": "fail"}
)
coresys.docker.images.inspect.return_value.setdefault("Config", {})["Image"] = (
"sha256:abc123"
)
@@ -272,7 +258,9 @@ async def test_attach_container_failure(coresys: CoreSys):
async def test_attach_total_failure(coresys: CoreSys):
"""Test attach fails to find container or image."""
coresys.docker.containers_legacy.get.side_effect = DockerException
coresys.docker.containers.get.side_effect = aiodocker.DockerError(
500, {"message": "fail"}
)
coresys.docker.images.inspect.side_effect = aiodocker.DockerError(
400, {"message": ""}
)
@@ -280,14 +268,11 @@ async def test_attach_total_failure(coresys: CoreSys):
await coresys.homeassistant.core.instance.attach(AwesomeVersion("2022.7.3"))
@pytest.mark.parametrize(
"err", [aiodocker.DockerError(400, {"message": ""}), RequestException()]
)
async def test_image_pull_fail(
coresys: CoreSys, capture_exception: Mock, err: Exception
):
async def test_image_pull_fail(coresys: CoreSys, capture_exception: Mock):
"""Test failure to pull image."""
coresys.docker.images.inspect.side_effect = err
coresys.docker.images.inspect.side_effect = err = aiodocker.DockerError(
400, {"message": ""}
)
with pytest.raises(DockerError):
await coresys.homeassistant.core.instance.install(
AwesomeVersion("2022.7.3"), arch=CpuArch.AMD64
@@ -296,13 +281,9 @@ async def test_image_pull_fail(
capture_exception.assert_called_once_with(err)
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
async def test_run_missing_image(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
capture_exception: Mock,
path_extern,
tmp_supervisor_data: Path,
coresys: CoreSys, install_addon_ssh: Addon, capture_exception: Mock
):
"""Test run captures the exception when image is missing."""
coresys.docker.containers.create.side_effect = [
@@ -450,11 +431,9 @@ async def test_install_fires_progress_events(
]
@pytest.mark.usefixtures("ha_ws_client")
async def test_install_progress_rounding_does_not_cause_misses(
coresys: CoreSys,
test_docker_interface: DockerInterface,
ha_ws_client: AsyncMock,
capture_exception: Mock,
coresys: CoreSys, test_docker_interface: DockerInterface, capture_exception: Mock
):
"""Test extremely close progress events do not create rounding issues."""
coresys.core.set_state(CoreState.RUNNING)
@@ -574,11 +553,9 @@ async def test_install_raises_on_pull_error(
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
@pytest.mark.usefixtures("ha_ws_client")
async def test_install_progress_handles_download_restart(
coresys: CoreSys,
test_docker_interface: DockerInterface,
ha_ws_client: AsyncMock,
capture_exception: Mock,
coresys: CoreSys, test_docker_interface: DockerInterface, capture_exception: Mock
):
"""Test install handles docker progress events that include a download restart."""
coresys.core.set_state(CoreState.RUNNING)
@@ -739,11 +716,9 @@ async def test_install_progress_handles_layers_skipping_download(
capture_exception.assert_not_called()
@pytest.mark.usefixtures("ha_ws_client")
async def test_missing_total_handled_gracefully(
coresys: CoreSys,
test_docker_interface: DockerInterface,
ha_ws_client: AsyncMock,
capture_exception: Mock,
coresys: CoreSys, test_docker_interface: DockerInterface, capture_exception: Mock
):
"""Test missing 'total' fields in progress details handled gracefully."""
coresys.core.set_state(CoreState.RUNNING)
+185 -218
View File
@@ -1,146 +1,121 @@
"""Test Docker manager."""
import asyncio
from http import HTTPStatus
from pathlib import Path
import re
from unittest.mock import AsyncMock, MagicMock, patch
import aiodocker
from aiodocker.containers import DockerContainer
from docker.errors import APIError, DockerException, NotFound
from docker.errors import APIError, NotFound
import pytest
from requests import RequestException
from supervisor.const import DNS_SUFFIX
from supervisor.coresys import CoreSys
from supervisor.docker.const import DockerMount, MountBindOptions, MountType
from supervisor.docker.const import (
LABEL_MANAGED,
DockerMount,
MountBindOptions,
MountType,
)
from supervisor.docker.manager import CommandReturn, DockerAPI
from supervisor.exceptions import DockerError
async def test_run_command_success(docker: DockerAPI):
async def test_run_command_success(docker: DockerAPI, container: DockerContainer):
"""Test successful command execution."""
# Mock container and its methods
mock_container = MagicMock()
mock_container.wait.return_value = {"StatusCode": 0}
mock_container.logs.return_value = b"command output"
# Mock docker containers.run to return our mock container
docker.dockerpy.containers.run.return_value = mock_container
container.wait.return_value = {"StatusCode": 0}
container.log.return_value = ["command output"]
# Execute the command
result = docker.run_command(
image="alpine", version="3.18", command="echo hello", stdout=True, stderr=True
result = await docker.run_command(
image="alpine", tag="3.18", command=["echo", "hello"], stdout=True, stderr=True
)
# Verify the result
assert isinstance(result, CommandReturn)
assert result.exit_code == 0
assert result.output == b"command output"
assert result.log == ["command output"]
# Verify docker.containers.run was called correctly
docker.dockerpy.containers.run.assert_called_once_with(
"alpine:3.18",
command="echo hello",
detach=True,
network=docker.network.name,
use_config_proxy=False,
stdout=True,
stderr=True,
mounts=None,
docker.containers.create.assert_called_once_with(
{
"Image": "alpine:3.18",
"Labels": {"supervisor_managed": ""},
"OpenStdin": False,
"StdinOnce": False,
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"HostConfig": {
"NetworkMode": "hassio",
"Init": False,
"Privileged": False,
"Dns": ["172.30.32.3"],
"DnsSearch": ["local.hass.io"],
"DnsOptions": ["timeout:10"],
},
"Cmd": ["echo", "hello"],
},
name=None,
)
container.start.assert_called_once()
# Verify container cleanup
mock_container.remove.assert_called_once_with(force=True, v=True)
async def test_run_command_with_defaults(docker: DockerAPI):
"""Test command execution with default parameters."""
# Mock container and its methods
mock_container = MagicMock()
mock_container.wait.return_value = {"StatusCode": 1}
mock_container.logs.return_value = b"error output"
# Mock docker containers.run to return our mock container
docker.dockerpy.containers.run.return_value = mock_container
# Execute the command with minimal parameters
result = docker.run_command(image="ubuntu")
# Verify the result
assert isinstance(result, CommandReturn)
assert result.exit_code == 1
assert result.output == b"error output"
# Verify docker.containers.run was called with defaults
docker.dockerpy.containers.run.assert_called_once_with(
"ubuntu:latest", # default tag
command=None, # default command
detach=True,
network=docker.network.name,
use_config_proxy=False,
mounts=None,
)
# Verify container.logs was called with default stdout/stderr
mock_container.logs.assert_called_once_with(stdout=True, stderr=True)
container.delete.assert_called_once_with(force=True, v=True)
async def test_run_command_docker_exception(docker: DockerAPI):
"""Test command execution when Docker raises an exception."""
# Mock docker containers.run to raise DockerException
docker.dockerpy.containers.run.side_effect = DockerException("Docker error")
# Mock docker containers.run to raise aiodocker.DockerError
docker.containers.create.side_effect = aiodocker.DockerError(
HTTPStatus.INTERNAL_SERVER_ERROR, {"message": "Docker error"}
)
# Execute the command and expect DockerError
with pytest.raises(DockerError, match="Can't execute command: Docker error"):
docker.run_command(image="alpine", command="test")
with pytest.raises(
DockerError,
match=re.escape(
"Can't execute command: Can't create container from alpine:latest: DockerError(500, 'Docker error')"
),
):
await docker.run_command(image="alpine", command="test")
async def test_run_command_request_exception(docker: DockerAPI):
"""Test command execution when requests raises an exception."""
# Mock docker containers.run to raise RequestException
docker.dockerpy.containers.run.side_effect = RequestException("Connection error")
# Execute the command and expect DockerError
with pytest.raises(DockerError, match="Can't execute command: Connection error"):
docker.run_command(image="alpine", command="test")
async def test_run_command_cleanup_on_exception(docker: DockerAPI):
async def test_run_command_cleanup_on_exception(
docker: DockerAPI, container: DockerContainer
):
"""Test that container cleanup happens even when an exception occurs."""
# Mock container
mock_container = MagicMock()
# Mock docker.containers.run to return container, but container.wait to raise exception
docker.dockerpy.containers.run.return_value = mock_container
mock_container.wait.side_effect = DockerException("Wait failed")
container.wait.side_effect = aiodocker.DockerError(500, {"message": "Wait failed"})
# Execute the command and expect DockerError
with pytest.raises(DockerError):
docker.run_command(image="alpine", command="test")
await docker.run_command(image="alpine", command="test")
# Verify container cleanup still happened
mock_container.remove.assert_called_once_with(force=True, v=True)
container.delete.assert_called_once_with(force=True, v=True)
async def test_run_command_custom_stdout_stderr(docker: DockerAPI):
async def test_run_command_custom_stdout_stderr(
docker: DockerAPI, container: DockerContainer
):
"""Test command execution with custom stdout/stderr settings."""
# Mock container and its methods
mock_container = MagicMock()
mock_container.wait.return_value = {"StatusCode": 0}
mock_container.logs.return_value = b"output"
# Mock docker containers.run to return our mock container
docker.dockerpy.containers.run.return_value = mock_container
container.wait.return_value = {"StatusCode": 0}
container.log.return_value = ["output"]
# Execute the command with custom stdout/stderr
result = docker.run_command(
result = await docker.run_command(
image="alpine", command="test", stdout=False, stderr=True
)
# Verify container.logs was called with the correct parameters
mock_container.logs.assert_called_once_with(stdout=False, stderr=True)
container.log.assert_called_once_with(stdout=False, stderr=True, follow=False)
# Verify the result
assert result.exit_code == 0
assert result.output == b"output"
assert result.log == ["output"]
async def test_run_command_with_mounts(docker: DockerAPI):
@@ -148,7 +123,7 @@ async def test_run_command_with_mounts(docker: DockerAPI):
# Mock container and its methods
mock_container = MagicMock()
mock_container.wait.return_value = {"StatusCode": 0}
mock_container.logs.return_value = b"output"
mock_container.logs.return_value = ["output"]
# Mock docker containers.run to return our mock container
docker.dockerpy.containers.run.return_value = mock_container
@@ -171,85 +146,95 @@ async def test_run_command_with_mounts(docker: DockerAPI):
]
# Execute the command with mounts
result = docker.run_command(image="alpine", command="test", mounts=mounts)
result = await docker.run_command(image="alpine", command="test", mounts=mounts)
# Verify the result
assert result.exit_code == 0
# Check that mounts were converted correctly
docker.dockerpy.containers.run.assert_called_once_with(
"alpine:latest",
command="test",
detach=True,
network=docker.network.name,
use_config_proxy=False,
mounts=[
{
"Type": "bind",
"Source": "/dev",
"Target": "/dev",
"ReadOnly": True,
"BindOptions": {"ReadOnlyNonRecursive": True},
docker.containers.create.assert_called_once_with(
{
"Image": "alpine:latest",
"Labels": {LABEL_MANAGED: ""},
"OpenStdin": False,
"StdinOnce": False,
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"Cmd": "test",
"HostConfig": {
"NetworkMode": docker.network.name,
"Init": False,
"Privileged": False,
"Dns": [str(docker.network.dns)],
"DnsSearch": [DNS_SUFFIX],
"DnsOptions": ["timeout:10"],
"Mounts": [
{
"Type": "bind",
"Source": "/dev",
"Target": "/dev",
"ReadOnly": True,
"BindOptions": {"ReadOnlyNonRecursive": True},
},
{
"Type": "volume",
"Source": "my_volume",
"Target": "/data",
"ReadOnly": False,
},
],
},
{
"Type": "volume",
"Source": "my_volume",
"Target": "/data",
"ReadOnly": False,
},
],
},
name=None,
)
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
async def test_run_container_with_cidfile(coresys: CoreSys, docker: DockerAPI):
async def test_run_container_with_cidfile(
coresys: CoreSys, docker: DockerAPI, container: DockerContainer
):
"""Test container creation with cidfile and bind mount."""
# Mock container
mock_container = MagicMock(spec=DockerContainer, id="test_container_id_12345")
mock_container.show.return_value = mock_metadata = {"Id": mock_container.id}
container.id = "test_container_id_12345"
container.show.return_value = mock_metadata = {"Id": container.id}
container_name = "test_container"
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
extern_cidfile_path = coresys.config.path_extern_cid_files / f"{container_name}.cid"
docker.containers.create.return_value = mock_container
# Execute run with a container name
result = await docker.run("test_image", tag="latest", name=container_name)
# Mock container creation
with patch.object(
docker.containers, "create", return_value=mock_container
) as create_mock:
# Execute run with a container name
result = await docker.run("test_image", tag="latest", name=container_name)
# Check the container creation parameters
docker.containers.create.assert_called_once()
create_config = docker.containers.create.call_args.args[0]
# Check the container creation parameters
create_mock.assert_called_once()
create_config = create_mock.call_args.args[0]
assert "HostConfig" in create_config
assert "Mounts" in create_config["HostConfig"]
assert {
"Type": "bind",
"Source": str(extern_cidfile_path),
"Target": "/run/cid",
"ReadOnly": True,
} in create_config["HostConfig"]["Mounts"]
assert "HostConfig" in create_config
assert "Mounts" in create_config["HostConfig"]
assert {
"Type": "bind",
"Source": str(extern_cidfile_path),
"Target": "/run/cid",
"ReadOnly": True,
} in create_config["HostConfig"]["Mounts"]
# Verify container start was called
container.start.assert_called_once()
# Verify container start was called
mock_container.start.assert_called_once()
# Verify cidfile was written with container ID
assert cidfile_path.exists()
assert cidfile_path.read_text() == container.id
# Verify cidfile was written with container ID
assert cidfile_path.exists()
assert cidfile_path.read_text() == mock_container.id
assert result == mock_metadata
assert result == mock_metadata
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
async def test_run_container_with_leftover_cidfile(coresys: CoreSys, docker: DockerAPI):
async def test_run_container_with_leftover_cidfile(
coresys: CoreSys, docker: DockerAPI, container: DockerContainer
):
"""Test container creation removes leftover cidfile before creating new one."""
# Mock container
mock_container = MagicMock(spec=DockerContainer, id="test_container_id_new")
mock_container.show.return_value = mock_metadata = {"Id": mock_container.id}
container.id = "test_container_id_12345"
container.show.return_value = mock_metadata = {"Id": container.id}
container_name = "test_container"
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
@@ -257,30 +242,26 @@ async def test_run_container_with_leftover_cidfile(coresys: CoreSys, docker: Doc
# Create a leftover cidfile
cidfile_path.touch()
# Mock container creation
with patch.object(
docker.containers, "create", return_value=mock_container
) as create_mock:
# Execute run with a container name
result = await docker.run("test_image", tag="latest", name=container_name)
# Execute run with a container name
result = await docker.run("test_image", tag="latest", name=container_name)
# Verify container was created
create_mock.assert_called_once()
# Verify container was created
docker.containers.create.assert_called_once()
# Verify new cidfile was written with container ID
assert cidfile_path.exists()
assert cidfile_path.read_text() == mock_container.id
# Verify new cidfile was written with container ID
assert cidfile_path.exists()
assert cidfile_path.read_text() == container.id
assert result == mock_metadata
assert result == mock_metadata
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_stop_container_with_cidfile_cleanup(
coresys: CoreSys, docker: DockerAPI, path_extern, tmp_supervisor_data
coresys: CoreSys, docker: DockerAPI, container: DockerContainer
):
"""Test container stop with cidfile cleanup."""
# Mock container
mock_container = MagicMock()
mock_container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
container_name = "test_container"
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
@@ -288,55 +269,45 @@ async def test_stop_container_with_cidfile_cleanup(
# Create a cidfile
cidfile_path.touch()
# Mock the containers.get method and cidfile cleanup
with (
patch.object(docker.containers_legacy, "get", return_value=mock_container),
):
# Call stop_container with remove_container=True
loop = asyncio.get_event_loop()
await loop.run_in_executor(
None,
lambda kwrgs: docker.stop_container(**kwrgs),
{"timeout": 10, "remove_container": True, "name": container_name},
)
# Call stop_container with remove_container=True
await docker.stop_container(timeout=10, remove_container=True, name=container_name)
# Verify container operations
mock_container.stop.assert_called_once_with(timeout=10)
mock_container.remove.assert_called_once_with(force=True, v=True)
# Verify container operations
container.stop.assert_called_once_with(timeout=10)
container.delete.assert_called_once_with(force=True, v=True)
assert not cidfile_path.exists()
assert not cidfile_path.exists()
async def test_stop_container_without_removal_no_cidfile_cleanup(docker: DockerAPI):
async def test_stop_container_without_removal_no_cidfile_cleanup(
docker: DockerAPI, container: DockerContainer
):
"""Test container stop without removal doesn't clean up cidfile."""
# Mock container
mock_container = MagicMock()
mock_container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
container_name = "test_container"
# Mock the containers.get method and cidfile cleanup
with (
patch.object(docker.containers_legacy, "get", return_value=mock_container),
patch("pathlib.Path.unlink") as mock_unlink,
):
with patch("pathlib.Path.unlink") as mock_unlink:
# Call stop_container with remove_container=False
docker.stop_container(container_name, timeout=10, remove_container=False)
await docker.stop_container(container_name, timeout=10, remove_container=False)
# Verify container operations
mock_container.stop.assert_called_once_with(timeout=10)
mock_container.remove.assert_not_called()
container.stop.assert_called_once_with(timeout=10)
container.delete.assert_not_called()
# Verify cidfile cleanup was NOT called
mock_unlink.assert_not_called()
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
async def test_cidfile_cleanup_handles_oserror(coresys: CoreSys, docker: DockerAPI):
async def test_cidfile_cleanup_handles_oserror(
coresys: CoreSys, docker: DockerAPI, container: DockerContainer
):
"""Test that cidfile cleanup handles OSError gracefully."""
# Mock container
mock_container = MagicMock()
mock_container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
container_name = "test_container"
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
@@ -346,7 +317,6 @@ async def test_cidfile_cleanup_handles_oserror(coresys: CoreSys, docker: DockerA
# Mock the containers.get method and cidfile cleanup to raise OSError
with (
patch.object(docker.containers_legacy, "get", return_value=mock_container),
patch("pathlib.Path.is_dir", return_value=False),
patch("pathlib.Path.is_file", return_value=True),
patch(
@@ -354,11 +324,11 @@ async def test_cidfile_cleanup_handles_oserror(coresys: CoreSys, docker: DockerA
) as mock_unlink,
):
# Call stop_container - should not raise exception
docker.stop_container(container_name, timeout=10, remove_container=True)
await docker.stop_container(container_name, timeout=10, remove_container=True)
# Verify container operations completed
mock_container.stop.assert_called_once_with(timeout=10)
mock_container.remove.assert_called_once_with(force=True, v=True)
container.stop.assert_called_once_with(timeout=10)
container.delete.assert_called_once_with(force=True, v=True)
# Verify cidfile cleanup was attempted
mock_unlink.assert_called_once_with(missing_ok=True)
@@ -366,7 +336,7 @@ async def test_cidfile_cleanup_handles_oserror(coresys: CoreSys, docker: DockerA
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
async def test_run_container_with_leftover_cidfile_directory(
coresys: CoreSys, docker: DockerAPI
coresys: CoreSys, docker: DockerAPI, container: DockerContainer
):
"""Test container creation removes leftover cidfile directory before creating new one.
@@ -374,9 +344,8 @@ async def test_run_container_with_leftover_cidfile_directory(
before Supervisor could write the CID file, causing Docker to create
the bind mount source as a directory.
"""
# Mock container
mock_container = MagicMock(spec=DockerContainer, id="test_container_id_new")
mock_container.show.return_value = mock_metadata = {"Id": mock_container.id}
container.id = "test_container_id_12345"
container.show.return_value = mock_metadata = {"Id": container.id}
container_name = "test_container"
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
@@ -385,25 +354,23 @@ async def test_run_container_with_leftover_cidfile_directory(
cidfile_path.mkdir()
assert cidfile_path.is_dir()
# Mock container creation
with patch.object(
docker.containers, "create", return_value=mock_container
) as create_mock:
# Execute run with a container name
result = await docker.run("test_image", tag="latest", name=container_name)
# Execute run with a container name
result = await docker.run("test_image", tag="latest", name=container_name)
# Verify container was created
create_mock.assert_called_once()
# Verify container was created
docker.containers.create.assert_called_once()
# Verify new cidfile was written as a file (not directory)
assert cidfile_path.exists()
assert cidfile_path.is_file()
assert cidfile_path.read_text() == mock_container.id
# Verify new cidfile was written as a file (not directory)
assert cidfile_path.exists()
assert cidfile_path.is_file()
assert cidfile_path.read_text() == container.id
assert result == mock_metadata
assert result == mock_metadata
async def test_repair(coresys: CoreSys, caplog: pytest.LogCaptureFixture):
async def test_repair(
coresys: CoreSys, caplog: pytest.LogCaptureFixture, container: DockerContainer
):
"""Test repair API."""
coresys.docker.dockerpy.networks.get.side_effect = [
hassio := MagicMock(
@@ -417,13 +384,13 @@ async def test_repair(coresys: CoreSys, caplog: pytest.LogCaptureFixture):
),
host := MagicMock(attrs={"Containers": {}}),
]
coresys.docker.dockerpy.containers.get.side_effect = [
MagicMock(),
NotFound("corrupt"),
DockerException("fail"),
coresys.docker.containers.get.side_effect = [
container,
aiodocker.DockerError(HTTPStatus.NOT_FOUND, {"message": "corrupt"}),
aiodocker.DockerError(HTTPStatus.INTERNAL_SERVER_ERROR, {"message": "fail"}),
]
await coresys.run_in_executor(coresys.docker.repair)
await coresys.docker.repair()
coresys.docker.dockerpy.api.prune_containers.assert_called_once()
coresys.docker.dockerpy.api.prune_images.assert_called_once_with(
@@ -446,7 +413,7 @@ async def test_repair_failures(coresys: CoreSys, caplog: pytest.LogCaptureFixtur
coresys.docker.dockerpy.api.prune_networks.side_effect = APIError("fail")
coresys.docker.dockerpy.networks.get.side_effect = NotFound("missing")
await coresys.run_in_executor(coresys.docker.repair)
await coresys.docker.repair()
assert "Error for containers prune: fail" in caplog.text
assert "Error for images prune: fail" in caplog.text
+11 -17
View File
@@ -2,10 +2,10 @@
import asyncio
from typing import Any
from unittest.mock import MagicMock, PropertyMock, patch
from unittest.mock import PropertyMock, patch
from aiodocker.containers import DockerContainer
from awesomeversion import AwesomeVersion
from docker.models.containers import Container
import pytest
from supervisor.const import BusEvent
@@ -108,22 +108,16 @@ async def test_events(
fire_event.assert_not_called()
async def test_unlabeled_container(coresys: CoreSys):
async def test_unlabeled_container(coresys: CoreSys, container: DockerContainer):
"""Test attaching to unlabeled container is still watched."""
container_collection = MagicMock()
container_collection.get.return_value = Container(
{
"Name": "homeassistant",
"Id": "abc123",
"State": {"Status": "running"},
"Config": {},
}
)
with patch(
"supervisor.docker.manager.DockerAPI.containers_legacy",
new=PropertyMock(return_value=container_collection),
):
await coresys.homeassistant.core.instance.attach(AwesomeVersion("2022.7.3"))
container.id = "abc123"
container.show.return_value = {
"Name": "homeassistant",
"Id": "abc123",
"State": {"Status": "running"},
"Config": {},
}
await coresys.homeassistant.core.instance.attach(AwesomeVersion("2022.7.3"))
with (
patch(
+7 -3
View File
@@ -1,16 +1,20 @@
"""Test Observer plugin container."""
from ipaddress import IPv4Address, ip_network
from unittest.mock import MagicMock, patch
from unittest.mock import patch
from aiodocker.containers import DockerContainer
from supervisor.coresys import CoreSys
from supervisor.docker.const import DockerMount, MountType
from supervisor.docker.manager import DockerAPI
async def test_start(coresys: CoreSys, container: MagicMock):
async def test_start(coresys: CoreSys, container: DockerContainer):
"""Test starting observer plugin."""
with patch.object(DockerAPI, "run", return_value=container.attrs) as run:
with patch.object(
DockerAPI, "run", return_value=container.show.return_value
) as run:
await coresys.plugins.observer.start()
run.assert_called_once()
+104 -104
View File
@@ -5,10 +5,9 @@ from http import HTTPStatus
from unittest.mock import ANY, MagicMock, Mock, PropertyMock, call, patch
import aiodocker
from aiodocker.containers import DockerContainer
from awesomeversion import AwesomeVersion
from docker.errors import APIError, DockerException, NotFound
import pytest
from requests import RequestException
from time_machine import travel
from supervisor.const import CpuArch
@@ -59,22 +58,15 @@ async def test_update_fails_if_out_of_date(coresys: CoreSys):
await coresys.homeassistant.core.update()
@pytest.mark.parametrize(
"err",
[
aiodocker.DockerError(HTTPStatus.TOO_MANY_REQUESTS, {"message": "ratelimit"}),
APIError("ratelimit", MagicMock(status_code=HTTPStatus.TOO_MANY_REQUESTS)),
],
)
async def test_install_landingpage_docker_ratelimit_error(
coresys: CoreSys,
capture_exception: Mock,
caplog: pytest.LogCaptureFixture,
err: Exception,
coresys: CoreSys, capture_exception: Mock, caplog: pytest.LogCaptureFixture
):
"""Test install landing page fails due to docker ratelimit error."""
coresys.security.force = True
coresys.docker.images.pull.side_effect = [err, AsyncIterator([{}])]
coresys.docker.images.pull.side_effect = [
aiodocker.DockerError(HTTPStatus.TOO_MANY_REQUESTS, {"message": "ratelimit"}),
AsyncIterator([{}]),
]
with (
patch.object(DockerHomeAssistant, "attach", side_effect=DockerError),
@@ -97,23 +89,21 @@ async def test_install_landingpage_docker_ratelimit_error(
Issue(IssueType.DOCKER_RATELIMIT, ContextType.SYSTEM)
in coresys.resolution.issues
)
assert "Unhandled exception:" not in caplog.text
@pytest.mark.parametrize(
"err",
[
aiodocker.DockerError(HTTPStatus.INTERNAL_SERVER_ERROR, {"message": "fail"}),
APIError("fail"),
DockerException(),
RequestException(),
OSError(),
aiodocker.DockerError(HTTPStatus.NOT_FOUND, {"message": "missing"}),
],
)
async def test_install_landingpage_other_error(
coresys: CoreSys,
capture_exception: Mock,
caplog: pytest.LogCaptureFixture,
err: Exception,
err: aiodocker.DockerError,
):
"""Test install landing page fails due to other error."""
coresys.docker.images.inspect.side_effect = [err, MagicMock()]
@@ -135,24 +125,18 @@ async def test_install_landingpage_other_error(
assert "Failed to install landingpage, retrying after 30sec" in caplog.text
capture_exception.assert_called_once_with(err)
assert "Unhandled exception:" not in caplog.text
@pytest.mark.parametrize(
"err",
[
aiodocker.DockerError(HTTPStatus.TOO_MANY_REQUESTS, {"message": "ratelimit"}),
APIError("ratelimit", MagicMock(status_code=HTTPStatus.TOO_MANY_REQUESTS)),
],
)
async def test_install_docker_ratelimit_error(
coresys: CoreSys,
capture_exception: Mock,
caplog: pytest.LogCaptureFixture,
err: Exception,
coresys: CoreSys, capture_exception: Mock, caplog: pytest.LogCaptureFixture
):
"""Test install fails due to docker ratelimit error."""
coresys.security.force = True
coresys.docker.images.pull.side_effect = [err, AsyncIterator([{}])]
coresys.docker.images.pull.side_effect = [
aiodocker.DockerError(HTTPStatus.TOO_MANY_REQUESTS, {"message": "ratelimit"}),
AsyncIterator([{}]),
]
with (
patch.object(HomeAssistantCore, "start"),
@@ -179,23 +163,21 @@ async def test_install_docker_ratelimit_error(
Issue(IssueType.DOCKER_RATELIMIT, ContextType.SYSTEM)
in coresys.resolution.issues
)
assert "Unhandled exception:" not in caplog.text
@pytest.mark.parametrize(
"err",
[
aiodocker.DockerError(HTTPStatus.INTERNAL_SERVER_ERROR, {"message": "fail"}),
APIError("fail"),
DockerException(),
RequestException(),
OSError(),
aiodocker.DockerError(HTTPStatus.NOT_FOUND, {"message": "missing"}),
],
)
async def test_install_other_error(
coresys: CoreSys,
capture_exception: Mock,
caplog: pytest.LogCaptureFixture,
err: Exception,
err: aiodocker.DockerError,
):
"""Test install fails due to other error."""
coresys.docker.images.inspect.side_effect = [err, MagicMock()]
@@ -221,12 +203,13 @@ async def test_install_other_error(
assert "Error on Home Assistant installation. Retrying in 30sec" in caplog.text
capture_exception.assert_called_once_with(err)
assert "Unhandled exception:" not in caplog.text
@pytest.mark.parametrize(
("container_exc", "image_exc", "remove_calls"),
("container_exc", "image_exc", "delete_calls"),
[
(NotFound("missing"), None, []),
(aiodocker.DockerError(404, {"message": "missing"}), None, []),
(
None,
aiodocker.DockerError(404, {"message": "missing"}),
@@ -238,16 +221,16 @@ async def test_install_other_error(
@pytest.mark.usefixtures("path_extern")
async def test_start(
coresys: CoreSys,
container: MagicMock,
container_exc: DockerException | None,
container: DockerContainer,
container_exc: aiodocker.DockerError | None,
image_exc: aiodocker.DockerError | None,
remove_calls: list[call],
delete_calls: list[call],
):
"""Test starting Home Assistant."""
coresys.docker.images.inspect.return_value = {"Id": "123"}
coresys.docker.images.inspect.side_effect = image_exc
coresys.docker.containers_legacy.get.return_value.id = "123"
coresys.docker.containers_legacy.get.side_effect = container_exc
container.id = "123"
coresys.docker.containers.get.side_effect = container_exc
with (
patch.object(
@@ -255,7 +238,7 @@ async def test_start(
"version",
new=PropertyMock(return_value=AwesomeVersion("2023.7.0")),
),
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
patch.object(DockerAPI, "run", return_value=container.show.return_value) as run,
patch.object(HomeAssistantCore, "_block_till_run") as block_till_run,
):
await coresys.homeassistant.core.start()
@@ -269,18 +252,17 @@ async def test_start(
assert run.call_args.kwargs["name"] == "homeassistant"
assert run.call_args.kwargs["hostname"] == "homeassistant"
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
assert (
coresys.docker.containers_legacy.get.return_value.remove.call_args_list
== remove_calls
)
container.stop.assert_not_called()
assert container.delete.call_args_list == delete_calls
async def test_start_existing_container(coresys: CoreSys, path_extern):
@pytest.mark.usefixtures("path_extern")
async def test_start_existing_container(coresys: CoreSys, container: DockerContainer):
"""Test starting Home Assistant when container exists and is viable."""
coresys.docker.images.inspect.return_value = {"Id": "123"}
coresys.docker.containers_legacy.get.return_value.image.id = "123"
coresys.docker.containers_legacy.get.return_value.status = "exited"
container.show.return_value["Image"] = "123"
container.show.return_value["State"]["Status"] = "exited"
container.show.return_value["State"]["Running"] = False
with (
patch.object(
@@ -293,90 +275,102 @@ async def test_start_existing_container(coresys: CoreSys, path_extern):
await coresys.homeassistant.core.start()
block_till_run.assert_called_once()
coresys.docker.containers_legacy.get.return_value.start.assert_called_once()
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
coresys.docker.containers_legacy.get.return_value.remove.assert_not_called()
coresys.docker.containers_legacy.get.return_value.run.assert_not_called()
container.start.assert_called_once()
container.stop.assert_not_called()
container.delete.assert_not_called()
coresys.docker.containers.create.assert_not_called()
@pytest.mark.parametrize("exists", [True, False])
async def test_stop(coresys: CoreSys, exists: bool):
async def test_stop(coresys: CoreSys, container: DockerContainer, exists: bool):
"""Test stoppping Home Assistant."""
if exists:
coresys.docker.containers_legacy.get.return_value.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
else:
coresys.docker.containers_legacy.get.side_effect = NotFound("missing")
coresys.docker.containers.get.side_effect = aiodocker.DockerError(
404, {"message": "missing"}
)
await coresys.homeassistant.core.stop()
coresys.docker.containers_legacy.get.return_value.remove.assert_not_called()
container.delete.assert_not_called()
if exists:
coresys.docker.containers_legacy.get.return_value.stop.assert_called_once_with(
timeout=260
)
container.stop.assert_called_once_with(timeout=260)
else:
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
container.stop.assert_not_called()
async def test_restart(coresys: CoreSys):
async def test_restart(coresys: CoreSys, container: DockerContainer):
"""Test restarting Home Assistant."""
with patch.object(HomeAssistantCore, "_block_till_run") as block_till_run:
await coresys.homeassistant.core.restart()
block_till_run.assert_called_once()
coresys.docker.containers_legacy.get.return_value.restart.assert_called_once_with(
timeout=260
)
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
container.restart.assert_called_once_with(timeout=260)
container.stop.assert_not_called()
@pytest.mark.parametrize("get_error", [NotFound("missing"), DockerException(), None])
async def test_restart_failures(coresys: CoreSys, get_error: DockerException | None):
@pytest.mark.parametrize(
"get_error",
[
aiodocker.DockerError(404, {"message": "missing"}),
aiodocker.DockerError(500, {"message": "fail"}),
None,
],
)
async def test_restart_failures(
coresys: CoreSys,
container: DockerContainer,
get_error: aiodocker.DockerError | None,
):
"""Test restart fails when container missing or can't be restarted."""
coresys.docker.containers_legacy.get.return_value.restart.side_effect = (
DockerException()
)
container.restart.side_effect = aiodocker.DockerError(500, {"message": "fail"})
if get_error:
coresys.docker.containers_legacy.get.side_effect = get_error
coresys.docker.containers.get.side_effect = get_error
with pytest.raises(HomeAssistantError):
await coresys.homeassistant.core.restart()
@pytest.mark.parametrize(
"get_error,status",
"get_error,running",
[
(NotFound("missing"), ""),
(DockerException(), ""),
(None, "stopped"),
(None, "running"),
(aiodocker.DockerError(404, {"message": "missing"}), False),
(aiodocker.DockerError(500, {"message": "fail"}), False),
(None, False),
(None, True),
],
)
async def test_stats_failures(
coresys: CoreSys, get_error: DockerException | None, status: str
coresys: CoreSys,
container: DockerContainer,
get_error: aiodocker.DockerError | None,
running: bool,
):
"""Test errors when getting stats."""
coresys.docker.containers_legacy.get.return_value.status = status
coresys.docker.containers_legacy.get.return_value.stats.side_effect = (
DockerException()
)
container.show.return_value["State"]["Status"] = "running" if running else "stopped"
container.show.return_value["State"]["Running"] = running
container.stats.side_effect = aiodocker.DockerError(500, {"message": "fail"})
if get_error:
coresys.docker.containers_legacy.get.side_effect = get_error
coresys.docker.containers.get.side_effect = get_error
with pytest.raises(HomeAssistantError):
await coresys.homeassistant.core.stats()
async def test_api_check_timeout(
coresys: CoreSys, container: MagicMock, caplog: pytest.LogCaptureFixture
coresys: CoreSys, container: DockerContainer, caplog: pytest.LogCaptureFixture
):
"""Test attempts to contact the API timeout."""
container.status = "stopped"
container.show.return_value["State"]["Status"] = "stopped"
container.show.return_value["State"]["Running"] = False
coresys.homeassistant.version = AwesomeVersion("2023.9.0")
coresys.homeassistant.api.get_api_state.return_value = None
async def mock_instance_start(*_):
container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
with (
patch.object(DockerHomeAssistant, "start", new=mock_instance_start),
@@ -400,14 +394,16 @@ async def test_api_check_timeout(
async def test_api_check_success(
coresys: CoreSys, container: MagicMock, caplog: pytest.LogCaptureFixture
coresys: CoreSys, container: DockerContainer, caplog: pytest.LogCaptureFixture
):
"""Test attempts to contact the API timeout."""
container.status = "stopped"
container.show.return_value["State"]["Status"] = "stopped"
container.show.return_value["State"]["Running"] = False
coresys.homeassistant.version = AwesomeVersion("2023.9.0")
async def mock_instance_start(*_):
container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
with (
patch.object(DockerHomeAssistant, "start", new=mock_instance_start),
@@ -426,7 +422,7 @@ async def test_api_check_success(
async def test_api_check_database_migration(
coresys: CoreSys, container: MagicMock, caplog: pytest.LogCaptureFixture
coresys: CoreSys, container: DockerContainer, caplog: pytest.LogCaptureFixture
):
"""Test attempts to contact the API timeout."""
calls = []
@@ -438,12 +434,14 @@ async def test_api_check_database_migration(
else:
return APIState("NOT_RUNNING", True)
container.status = "stopped"
container.show.return_value["State"]["Status"] = "stopped"
container.show.return_value["State"]["Running"] = False
coresys.homeassistant.version = AwesomeVersion("2023.9.0")
coresys.homeassistant.api.get_api_state.side_effect = mock_api_state
async def mock_instance_start(*_):
container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
with (
patch.object(DockerHomeAssistant, "start", new=mock_instance_start),
@@ -462,7 +460,7 @@ async def test_api_check_database_migration(
async def test_core_loads_wrong_image_for_machine(
coresys: CoreSys, container: MagicMock
coresys: CoreSys, container: DockerContainer
):
"""Test core is loaded with wrong image for machine."""
coresys.homeassistant.set_image("ghcr.io/home-assistant/odroid-n2-homeassistant")
@@ -476,7 +474,7 @@ async def test_core_loads_wrong_image_for_machine(
"Config": {"Labels": {"io.hass.version": "2024.4.0"}},
},
) as pull_image:
container.attrs |= pull_image.return_value
container.show.return_value |= pull_image.return_value
await coresys.homeassistant.core.load()
pull_image.assert_called_once_with(
ANY,
@@ -486,7 +484,7 @@ async def test_core_loads_wrong_image_for_machine(
auth=None,
)
container.remove.assert_called_once_with(force=True, v=True)
container.delete.assert_called_once_with(force=True, v=True)
assert coresys.docker.images.delete.call_args_list[0] == call(
"ghcr.io/home-assistant/odroid-n2-homeassistant:latest",
force=True,
@@ -500,16 +498,18 @@ async def test_core_loads_wrong_image_for_machine(
)
async def test_core_load_allows_image_override(coresys: CoreSys, container: MagicMock):
async def test_core_load_allows_image_override(
coresys: CoreSys, container: DockerContainer
):
"""Test core does not change image if user overrode it."""
coresys.homeassistant.set_image("ghcr.io/home-assistant/odroid-n2-homeassistant")
coresys.homeassistant.version = AwesomeVersion("2024.4.0")
container.attrs["Config"] = {"Labels": {"io.hass.version": "2024.4.0"}}
container.show.return_value["Config"] = {"Labels": {"io.hass.version": "2024.4.0"}}
coresys.homeassistant.override_image = True
await coresys.homeassistant.core.load()
container.remove.assert_not_called()
container.delete.assert_not_called()
coresys.docker.images.delete.assert_not_called()
coresys.docker.images.inspect.assert_not_called()
assert (
@@ -518,7 +518,7 @@ async def test_core_load_allows_image_override(coresys: CoreSys, container: Magi
async def test_core_loads_wrong_image_for_architecture(
coresys: CoreSys, container: MagicMock
coresys: CoreSys, container: DockerContainer
):
"""Test core is loaded with wrong image for architecture."""
coresys.homeassistant.version = AwesomeVersion("2024.4.0")
@@ -529,7 +529,7 @@ async def test_core_loads_wrong_image_for_architecture(
"Config": {"Labels": {"io.hass.version": "2024.4.0"}},
}
)
container.attrs |= img_data
container.show.return_value |= img_data
with patch.object(
DockerAPI,
@@ -545,7 +545,7 @@ async def test_core_loads_wrong_image_for_architecture(
auth=None,
)
container.remove.assert_called_once_with(force=True, v=True)
container.delete.assert_called_once_with(force=True, v=True)
assert coresys.docker.images.delete.call_args_list[0] == call(
"ghcr.io/home-assistant/qemux86-64-homeassistant:latest",
force=True,
@@ -1,8 +1,9 @@
"""Test Home Assistant watchdog."""
import asyncio
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
from unittest.mock import AsyncMock, PropertyMock, patch
from aiodocker.containers import DockerContainer
from awesomeversion import AwesomeVersion
from supervisor.const import BusEvent
@@ -147,11 +148,12 @@ async def test_home_assistant_watchdog_rebuild_on_failure(coresys: CoreSys) -> N
async def test_home_assistant_watchdog_skip_on_load(
coresys: CoreSys, container: MagicMock
coresys: CoreSys, container: DockerContainer
) -> None:
"""Test home assistant watchdog skips a crash event on load."""
container.status = "stopped"
container.attrs["State"]["ExitCode"] = 1
container.show.return_value["State"]["Status"] = "stopped"
container.show.return_value["State"]["Running"] = False
container.show.return_value["State"]["ExitCode"] = 1
coresys.homeassistant.core.watchdog = True
events = AsyncMock()
+13 -16
View File
@@ -1,10 +1,10 @@
"""Test scheduled tasks."""
from collections.abc import AsyncGenerator
from pathlib import Path
from shutil import copy
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
from unittest.mock import AsyncMock, Mock, PropertyMock, patch
from aiodocker.containers import DockerContainer
from awesomeversion import AwesomeVersion
import pytest
@@ -25,12 +25,13 @@ from tests.common import MockResponse, get_fixture_path
@pytest.fixture(name="tasks")
async def fixture_tasks(
coresys: CoreSys, container: MagicMock
coresys: CoreSys, container: DockerContainer
) -> AsyncGenerator[Tasks]:
"""Return task manager."""
coresys.homeassistant.watchdog = True
coresys.homeassistant.version = AwesomeVersion("2023.12.0")
container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
yield Tasks(coresys)
@@ -102,10 +103,11 @@ async def test_watchdog_homeassistant_api_landing_page(tasks: Tasks, coresys: Co
async def test_watchdog_homeassistant_api_not_running(
tasks: Tasks, container: MagicMock
tasks: Tasks, container: DockerContainer
):
"""Test watchdog of homeassistant api does not monitor when home assistant not running."""
container.status = "stopped"
container.show.return_value["State"]["Status"] = "stopped"
container.show.return_value["State"]["Running"] = False
with (
patch.object(HomeAssistantAPI, "check_api_state", return_value=False),
@@ -171,12 +173,9 @@ async def test_watchdog_homeassistant_api_reanimation_limit(
rebuild.assert_not_called()
@pytest.mark.usefixtures("no_job_throttle")
@pytest.mark.usefixtures("no_job_throttle", "supervisor_internet")
async def test_reload_updater_triggers_supervisor_update(
tasks: Tasks,
coresys: CoreSys,
mock_update_data: MockResponse,
supervisor_internet: AsyncMock,
tasks: Tasks, coresys: CoreSys, mock_update_data: MockResponse
):
"""Test an updater reload triggers a supervisor update if there is one."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
@@ -205,10 +204,8 @@ async def test_reload_updater_triggers_supervisor_update(
update.assert_called_once()
@pytest.mark.usefixtures("path_extern")
async def test_core_backup_cleanup(
tasks: Tasks, coresys: CoreSys, tmp_supervisor_data: Path
):
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
async def test_core_backup_cleanup(tasks: Tasks, coresys: CoreSys):
"""Test core backup task cleans up old backup files."""
await coresys.core.set_state(CoreState.RUNNING)
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
@@ -232,10 +229,10 @@ async def test_core_backup_cleanup(
assert not old_tar.exists()
@pytest.mark.usefixtures("tmp_supervisor_data")
async def test_update_addons_auto_update_success(
tasks: Tasks,
coresys: CoreSys,
tmp_supervisor_data: Path,
ha_ws_client: AsyncMock,
install_addon_example: Addon,
):
+14 -14
View File
@@ -1,8 +1,9 @@
"""Test base plugin functionality."""
import asyncio
from unittest.mock import ANY, MagicMock, Mock, PropertyMock, call, patch
from unittest.mock import ANY, Mock, PropertyMock, call, patch
from aiodocker.containers import DockerContainer
from awesomeversion import AwesomeVersion
import pytest
@@ -163,15 +164,16 @@ async def test_plugin_watchdog_max_failed_attempts(
capture_exception: Mock,
plugin: PluginBase,
error: PluginError,
container: MagicMock,
container: DockerContainer,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test plugin watchdog gives up after max failed attempts."""
with patch.object(type(plugin.instance), "attach"):
await plugin.load()
container.status = "stopped"
container.attrs = {"State": {"ExitCode": 1}}
container.show.return_value["State"]["Status"] = "stopped"
container.show.return_value["State"]["Running"] = False
container.show.return_value["State"]["ExitCode"] = 1
with (
patch("supervisor.plugins.base.WATCHDOG_RETRY_SECONDS", 0),
patch.object(type(plugin), "start", side_effect=error) as start,
@@ -325,9 +327,8 @@ async def test_update_fails_if_out_of_date(
[PluginAudio, PluginCli, PluginDns, PluginMulticast, PluginObserver],
indirect=True,
)
async def test_repair_failed(
coresys: CoreSys, capture_exception: Mock, plugin: PluginBase
):
@pytest.mark.usefixtures("coresys")
async def test_repair_failed(capture_exception: Mock, plugin: PluginBase):
"""Test repair failed."""
with (
patch.object(DockerInterface, "exists", return_value=False),
@@ -348,7 +349,7 @@ async def test_repair_failed(
indirect=True,
)
async def test_load_with_incorrect_image(
coresys: CoreSys, container: MagicMock, plugin: PluginBase
coresys: CoreSys, container: DockerContainer, plugin: PluginBase
):
"""Test plugin loads with the incorrect image."""
plugin.image = old_image = f"ghcr.io/home-assistant/aarch64-hassio-{plugin.slug}"
@@ -356,12 +357,13 @@ async def test_load_with_incorrect_image(
coresys.updater._data["image"][plugin.slug] = correct_image # pylint: disable=protected-access
plugin.version = AwesomeVersion("2024.4.0")
container.status = "running"
container.show.return_value["State"]["Status"] = "running"
container.show.return_value["State"]["Running"] = True
coresys.docker.images.inspect.return_value = img_data = (
coresys.docker.images.inspect.return_value
| {"Config": {"Labels": {"io.hass.version": "2024.4.0"}}}
)
container.attrs |= img_data
container.show.return_value |= img_data
with patch.object(DockerAPI, "pull_image", return_value=img_data) as pull_image:
await plugin.load()
@@ -369,7 +371,7 @@ async def test_load_with_incorrect_image(
ANY, correct_image, "2024.4.0", platform="linux/amd64", auth=None
)
container.remove.assert_called_once_with(force=True, v=True)
container.delete.assert_called_once_with(force=True, v=True)
assert coresys.docker.images.delete.call_args_list[0] == call(
f"{old_image}:latest",
force=True,
@@ -386,9 +388,7 @@ async def test_load_with_incorrect_image(
[PluginAudio, PluginCli, PluginDns, PluginMulticast, PluginObserver],
indirect=True,
)
async def test_default_image_fallback(
coresys: CoreSys, container: MagicMock, plugin: PluginBase
):
async def test_default_image_fallback(coresys: CoreSys, plugin: PluginBase):
"""Test default image falls back to hard-coded constant if we fail to fetch version file."""
assert getattr(coresys.updater, f"image_{plugin.slug}") is None
assert plugin.default_image == f"ghcr.io/home-assistant/amd64-hassio-{plugin.slug}"
@@ -2,6 +2,7 @@
from unittest.mock import MagicMock, patch
from aiodocker.containers import DockerContainer
import pytest
from supervisor.addons.addon import Addon
@@ -25,12 +26,17 @@ def _make_mock_container_get(bad_config_names: list[str], folder: str = "media")
"Propagation": "rprivate",
}
def mock_container_get(name):
out = MagicMock()
out.status = "running"
out.attrs = {"State": {}, "Mounts": []}
async def mock_container_get(name):
out = MagicMock(spec=DockerContainer)
out.show.return_value = {
"State": {
"Status": "running",
"Running": True,
},
"Mounts": [],
}
if name in bad_config_names:
out.attrs["Mounts"].append(mount)
out.show.return_value["Mounts"].append(mount)
return out
@@ -52,12 +58,17 @@ def _make_mock_container_get_with_volume_mount(
"Propagation": "rprivate", # Wrong propagation, but not our mount
}
def mock_container_get(name):
out = MagicMock()
out.status = "running"
out.attrs = {"State": {}, "Mounts": []}
async def mock_container_get(name):
out = MagicMock(spec=DockerContainer)
out.show.return_value = {
"State": {
"Status": "running",
"Running": True,
},
"Mounts": [],
}
if name in bad_config_names:
out.attrs["Mounts"].append(mount)
out.show.return_value["Mounts"].append(mount)
return out
@@ -72,11 +83,10 @@ async def test_base(coresys: CoreSys):
@pytest.mark.parametrize("folder", ["media", "share"])
async def test_check(
docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon, folder: str
):
@pytest.mark.usefixtures("install_addon_ssh")
async def test_check(docker: DockerAPI, coresys: CoreSys, folder: str):
"""Test check reports issue when containers have incorrect config."""
docker.containers_legacy.get = _make_mock_container_get(
docker.containers.get = _make_mock_container_get(
["homeassistant", "hassio_audio", "addon_local_ssh"], folder
)
# Use state used in setup()
@@ -132,7 +142,7 @@ async def test_check(
assert await docker_config.approve_check()
# IF config issue is resolved, all issues are removed except the main one. Which will be removed if check isn't approved
docker.containers_legacy.get = _make_mock_container_get([])
docker.containers.get = _make_mock_container_get([])
with patch.object(DockerInterface, "is_running", return_value=True):
await coresys.plugins.load()
await coresys.homeassistant.load()
@@ -159,7 +169,7 @@ async def test_addon_volume_mount_not_flagged(
] # No media/share
# Mock container that has VOLUME mount to media/share with wrong propagation
docker.containers_legacy.get = _make_mock_container_get_with_volume_mount(
docker.containers.get = _make_mock_container_get_with_volume_mount(
["addon_local_ssh"], folder
)
@@ -196,8 +206,9 @@ async def test_addon_volume_mount_not_flagged(
@pytest.mark.parametrize("folder", ["media", "share"])
@pytest.mark.usefixtures("install_addon_ssh")
async def test_addon_configured_mount_still_flagged(
docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon, folder: str
docker: DockerAPI, coresys: CoreSys, folder: str
):
"""Test that add-on with configured media/share mount is still flagged when propagation wrong."""
# Keep the original configuration which includes media/share
@@ -213,15 +224,20 @@ async def test_addon_configured_mount_still_flagged(
"Propagation": "rprivate", # Wrong propagation
}
def mock_container_get(name):
out = MagicMock()
out.status = "running"
out.attrs = {"State": {}, "Mounts": []}
async def mock_container_get(name):
out = MagicMock(spec=DockerContainer)
out.show.return_value = {
"State": {
"Status": "running",
"Running": True,
},
"Mounts": [],
}
if name == "addon_local_ssh":
out.attrs["Mounts"].append(mount)
out.show.return_value["Mounts"].append(mount)
return out
docker.containers_legacy.get = mock_container_get
docker.containers.get = mock_container_get
await coresys.core.set_state(CoreState.SETUP)
with patch.object(DockerInterface, "is_running", return_value=True):
@@ -258,11 +274,16 @@ async def test_addon_custom_target_path_flagged(
{"type": mapping_type, "read_only": False, "path": custom_path},
]
def mock_container_get(name: str) -> MagicMock:
async def mock_container_get(name: str) -> MagicMock:
"""Mock container get with custom target path mount."""
out = MagicMock()
out.status = "running"
out.attrs = {"State": {}, "Mounts": []}
out = MagicMock(spec=DockerContainer)
out.show.return_value = {
"State": {
"Status": "running",
"Running": True,
},
"Mounts": [],
}
# Add mount with custom target path and wrong propagation
mount = {
@@ -272,10 +293,10 @@ async def test_addon_custom_target_path_flagged(
}
if name == "addon_local_ssh":
out.attrs["Mounts"].append(mount)
out.show.return_value["Mounts"].append(mount)
return out
docker.containers_legacy.get = mock_container_get
docker.containers.get = mock_container_get
await coresys.core.set_state(CoreState.SETUP)
with patch.object(DockerInterface, "is_running", return_value=True):
@@ -3,7 +3,8 @@
# pylint: disable=import-error,protected-access
from unittest.mock import MagicMock, patch
from docker.errors import DockerException
import aiodocker
from aiodocker.containers import DockerContainer
from supervisor.const import CoreState
from supervisor.coresys import CoreSys
@@ -12,9 +13,9 @@ from supervisor.resolution.data import Issue
from supervisor.resolution.evaluations.container import EvaluateContainer
def _make_image_attr(image: str) -> MagicMock:
out = MagicMock()
out.attrs = {
def _make_image_attr(image: str) -> DockerContainer:
out = MagicMock(spec=DockerContainer)
out.show.return_value = {
"Config": {
"Image": image,
},
@@ -30,7 +31,7 @@ async def test_evaluation(coresys: CoreSys):
assert container.reason not in coresys.resolution.unsupported
assert UnhealthyReason.DOCKER not in coresys.resolution.unhealthy
coresys.docker.containers_legacy.list.return_value = [
coresys.docker.containers.list.return_value = [
_make_image_attr("armhfbuild/watchtower:latest"),
_make_image_attr("concerco/watchtowerv6:10.0.2"),
_make_image_attr("containrrr/watchtower:1.1"),
@@ -47,7 +48,7 @@ async def test_evaluation(coresys: CoreSys):
"pyouroboros/ouroboros:1.4.3",
}
coresys.docker.containers_legacy.list.return_value = []
coresys.docker.containers.list.return_value = []
await container()
assert container.reason not in coresys.resolution.unsupported
@@ -62,7 +63,9 @@ async def test_corrupt_docker(coresys: CoreSys):
corrupt_docker = Issue(IssueType.CORRUPT_DOCKER, ContextType.SYSTEM)
assert corrupt_docker not in coresys.resolution.issues
coresys.docker.containers_legacy.list.side_effect = DockerException
coresys.docker.containers.list.side_effect = aiodocker.DockerError(
500, {"message": "fail"}
)
await container()
assert corrupt_docker in coresys.resolution.issues
@@ -2,6 +2,7 @@
from unittest.mock import MagicMock, patch
from aiodocker.containers import DockerContainer
from awesomeversion import AwesomeVersion
from supervisor.addons.addon import Addon
@@ -28,12 +29,14 @@ async def test_evaluation(coresys: CoreSys, install_addon_ssh: Addon):
addon_attrs = no_restart_attrs
observer_attrs = always_restart_attrs
def get_container(name: str):
meta = MagicMock()
meta.attrs = observer_attrs if name == "hassio_observer" else addon_attrs
async def get_container(name: str) -> DockerContainer:
meta = MagicMock(spec=DockerContainer)
meta.show.return_value = (
observer_attrs if name == "hassio_observer" else addon_attrs
)
return meta
coresys.docker.containers_legacy.get = get_container
coresys.docker.containers.get = get_container
await coresys.plugins.observer.instance.attach(TEST_VERSION)
await install_addon_ssh.instance.attach(TEST_VERSION)
@@ -1,8 +1,12 @@
"""Test fixup core execute rebuild."""
import asyncio
from collections.abc import Callable, Coroutine
from typing import Any
from unittest.mock import MagicMock, patch
from docker.errors import NotFound
import aiodocker
from aiodocker.containers import DockerContainer
import pytest
from supervisor.addons.addon import Addon
@@ -13,25 +17,24 @@ from supervisor.resolution.const import ContextType, IssueType, SuggestionType
from supervisor.resolution.fixups.addon_execute_rebuild import FixupAddonExecuteRebuild
def make_mock_container_get(status: str):
def make_mock_container_get(
status: str,
) -> Callable[[str], Coroutine[Any, Any, DockerContainer]]:
"""Make mock of container get."""
out = MagicMock()
out = MagicMock(spec=DockerContainer)
out.status = status
out.attrs = {"State": {"ExitCode": 0}, "Mounts": []}
out.show.return_value = {"State": {"Status": status, "ExitCode": 0}, "Mounts": []}
def mock_container_get(name):
async def mock_container_get(name) -> DockerContainer:
return out
return mock_container_get
async def _mock_wait_for_container() -> None:
"""Mock of wait for container."""
async def test_fixup(docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon):
@pytest.mark.usefixtures("install_addon_ssh")
async def test_fixup(docker: DockerAPI, coresys: CoreSys):
"""Test fixup rebuilds addon's container."""
docker.containers_legacy.get = make_mock_container_get("running")
docker.containers.get = make_mock_container_get("running")
addon_execute_rebuild = FixupAddonExecuteRebuild(coresys)
@@ -43,9 +46,7 @@ async def test_fixup(docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Add
reference="local_ssh",
suggestions=[SuggestionType.EXECUTE_REBUILD],
)
with patch.object(
Addon, "restart", return_value=_mock_wait_for_container()
) as restart:
with patch.object(Addon, "restart", return_value=asyncio.sleep(0)) as restart:
await addon_execute_rebuild()
restart.assert_called_once()
@@ -53,15 +54,13 @@ async def test_fixup(docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Add
assert not coresys.resolution.suggestions
@pytest.mark.usefixtures("install_addon_ssh")
async def test_fixup_stopped_core(
docker: DockerAPI,
coresys: CoreSys,
install_addon_ssh: Addon,
caplog: pytest.LogCaptureFixture,
docker: DockerAPI, coresys: CoreSys, caplog: pytest.LogCaptureFixture
):
"""Test fixup just removes addon's container when it is stopped."""
caplog.clear()
docker.containers_legacy.get = make_mock_container_get("stopped")
docker.containers.get = make_mock_container_get("stopped")
addon_execute_rebuild = FixupAddonExecuteRebuild(coresys)
coresys.resolution.create_issue(
@@ -76,21 +75,21 @@ async def test_fixup_stopped_core(
assert not coresys.resolution.issues
assert not coresys.resolution.suggestions
docker.containers_legacy.get("addon_local_ssh").remove.assert_called_once_with(
(await docker.containers.get("addon_local_ssh")).delete.assert_called_once_with(
force=True, v=True
)
assert "Addon local_ssh is stopped" in caplog.text
@pytest.mark.usefixtures("install_addon_ssh")
async def test_fixup_unknown_core(
docker: DockerAPI,
coresys: CoreSys,
install_addon_ssh: Addon,
caplog: pytest.LogCaptureFixture,
docker: DockerAPI, coresys: CoreSys, caplog: pytest.LogCaptureFixture
):
"""Test fixup does nothing if addon's container has already been removed."""
caplog.clear()
docker.containers_legacy.get.side_effect = NotFound("")
docker.containers.get.side_effect = aiodocker.DockerError(
404, {"message": "missing"}
)
addon_execute_rebuild = FixupAddonExecuteRebuild(coresys)
coresys.resolution.create_issue(
@@ -1,8 +1,11 @@
"""Test fixup core execute rebuild."""
from collections.abc import Callable, Coroutine
from typing import Any
from unittest.mock import MagicMock, patch
from docker.errors import NotFound
import aiodocker
from aiodocker.containers import DockerContainer
import pytest
from supervisor.coresys import CoreSys
@@ -13,13 +16,15 @@ from supervisor.resolution.const import ContextType, IssueType, SuggestionType
from supervisor.resolution.fixups.core_execute_rebuild import FixupCoreExecuteRebuild
def make_mock_container_get(status: str):
def make_mock_container_get(
status: str,
) -> Callable[[str], Coroutine[Any, Any, DockerContainer]]:
"""Make mock of container get."""
out = MagicMock()
out = MagicMock(spec=DockerContainer)
out.status = status
out.attrs = {"State": {"ExitCode": 0}, "Mounts": []}
out.show.return_value = {"State": {"Status": status, "ExitCode": 0}, "Mounts": []}
def mock_container_get(name):
async def mock_container_get(name) -> DockerContainer:
return out
return mock_container_get
@@ -27,7 +32,7 @@ def make_mock_container_get(status: str):
async def test_fixup(docker: DockerAPI, coresys: CoreSys):
"""Test fixup rebuilds core's container."""
docker.containers_legacy.get = make_mock_container_get("running")
docker.containers.get = make_mock_container_get("running")
core_execute_rebuild = FixupCoreExecuteRebuild(coresys)
@@ -51,7 +56,7 @@ async def test_fixup_stopped_core(
):
"""Test fixup just removes HA's container when it is stopped."""
caplog.clear()
docker.containers_legacy.get = make_mock_container_get("stopped")
docker.containers.get = make_mock_container_get("stopped")
core_execute_rebuild = FixupCoreExecuteRebuild(coresys)
coresys.resolution.create_issue(
@@ -65,7 +70,7 @@ async def test_fixup_stopped_core(
assert not coresys.resolution.issues
assert not coresys.resolution.suggestions
docker.containers_legacy.get("homeassistant").remove.assert_called_once_with(
(await docker.containers.get("homeassistant")).delete.assert_called_once_with(
force=True, v=True
)
assert "Home Assistant is stopped" in caplog.text
@@ -76,7 +81,9 @@ async def test_fixup_unknown_core(
):
"""Test fixup does nothing if core's container has already been removed."""
caplog.clear()
docker.containers_legacy.get.side_effect = NotFound("")
docker.containers.get.side_effect = aiodocker.DockerError(
404, {"message": "missing"}
)
core_execute_rebuild = FixupCoreExecuteRebuild(coresys)
coresys.resolution.create_issue(
@@ -1,7 +1,10 @@
"""Test fixup plugin execute rebuild."""
from collections.abc import Callable, Coroutine
from typing import Any
from unittest.mock import MagicMock, patch
from aiodocker.containers import DockerContainer
import pytest
from supervisor.coresys import CoreSys
@@ -13,13 +16,15 @@ from supervisor.resolution.fixups.plugin_execute_rebuild import (
)
def make_mock_container_get(status: str):
def make_mock_container_get(
status: str,
) -> Callable[[str], Coroutine[Any, Any, DockerContainer]]:
"""Make mock of container get."""
out = MagicMock()
out = MagicMock(spec=DockerContainer)
out.status = status
out.attrs = {"State": {"ExitCode": 0}, "Mounts": []}
out.show.return_value = {"State": {"Status": status, "ExitCode": 0}, "Mounts": []}
def mock_container_get(name):
async def mock_container_get(name) -> DockerContainer:
return out
return mock_container_get
@@ -28,7 +33,7 @@ def make_mock_container_get(status: str):
@pytest.mark.parametrize("status", ["running", "stopped"])
async def test_fixup(docker: DockerAPI, coresys: CoreSys, status: str):
"""Test fixup rebuilds plugin's container regardless of current state."""
docker.containers_legacy.get = make_mock_container_get(status)
docker.containers.get = make_mock_container_get(status)
plugin_execute_rebuild = FixupPluginExecuteRebuild(coresys)