diff --git a/supervisor/api/supervisor.py b/supervisor/api/supervisor.py index ac8a48a09..43a5da26b 100644 --- a/supervisor/api/supervisor.py +++ b/supervisor/api/supervisor.py @@ -248,6 +248,7 @@ class APISupervisor(CoreSysAttributes): return asyncio.shield(self.sys_supervisor.restart()) @api_process_raw(CONTENT_TYPE_TEXT, error_type=CONTENT_TYPE_TEXT) - def logs(self, request: web.Request) -> Awaitable[bytes]: + async def logs(self, request: web.Request) -> bytes: """Return supervisor Docker logs.""" - return self.sys_supervisor.logs() + logs = await self.sys_supervisor.logs() + return "\n".join(logs).encode(errors="replace") diff --git a/supervisor/coresys.py b/supervisor/coresys.py index d896eb7fd..fdfce35e9 100644 --- a/supervisor/coresys.py +++ b/supervisor/coresys.py @@ -628,9 +628,17 @@ class CoreSys: context = callback(context) return context - def create_task(self, coroutine: Coroutine) -> asyncio.Task: + def create_task( + self, coroutine: Coroutine, *, eager_start: bool | None = None + ) -> asyncio.Task: """Create an async task.""" - return self.loop.create_task(coroutine, context=self._create_context()) + # eager_start kwarg works but wasn't added for mypy visibility until 3.14 + # can remove the type ignore then + return self.loop.create_task( + coroutine, + context=self._create_context(), + eager_start=eager_start, # type: ignore + ) def call_later( self, @@ -847,9 +855,11 @@ class CoreSysAttributes: """Add a job to the executor pool.""" return self.coresys.run_in_executor(funct, *args, **kwargs) - def sys_create_task(self, coroutine: Coroutine) -> asyncio.Task: + def sys_create_task( + self, coroutine: Coroutine, *, eager_start: bool | None = None + ) -> asyncio.Task: """Create an async task.""" - return self.coresys.create_task(coroutine) + return self.coresys.create_task(coroutine, eager_start=eager_start) def sys_call_later( self, diff --git a/supervisor/docker/interface.py b/supervisor/docker/interface.py index 5a8651184..80028723c 100644 --- a/supervisor/docker/interface.py +++ b/supervisor/docker/interface.py @@ -533,14 +533,11 @@ class DockerInterface(JobGroup, ABC): with suppress(DockerError): await self.stop() - async def logs(self) -> bytes: + async def logs(self) -> list[str]: """Return Docker logs of container.""" with suppress(DockerError): - return await self.sys_run_in_executor( - self.sys_docker.container_logs, self.name - ) - - return b"" + return await self.sys_docker.container_logs(self.name) + return [] @Job(name="docker_interface_cleanup", concurrency=JobConcurrency.GROUP_QUEUE) async def cleanup( diff --git a/supervisor/docker/manager.py b/supervisor/docker/manager.py index d6af2867d..a887fa263 100644 --- a/supervisor/docker/manager.py +++ b/supervisor/docker/manager.py @@ -3,6 +3,7 @@ from __future__ import annotations import asyncio +from collections.abc import Mapping from contextlib import suppress from dataclasses import dataclass from functools import partial @@ -21,12 +22,9 @@ from aiodocker.images import DockerImages from aiodocker.stream import Stream from aiodocker.types import JSONObject from aiohttp import ClientTimeout, UnixConnector -import attr from awesomeversion import AwesomeVersion, AwesomeVersionCompareException from docker import errors as docker_errors from docker.client import DockerClient -from docker.models.containers import Container -from docker.types.daemon import CancellableStream import requests from ..const import ( @@ -92,18 +90,18 @@ class CommandReturn: log: list[str] -@attr.s(frozen=True) +@dataclass(slots=True, frozen=True) class DockerInfo: """Return docker information.""" - version: AwesomeVersion = attr.ib() - storage: str = attr.ib() - logging: str = attr.ib() - cgroup: str = attr.ib() - support_cpu_realtime: bool = attr.ib() + version: AwesomeVersion + storage: str + logging: str + cgroup: str + support_cpu_realtime: bool @staticmethod - async def new(data: dict[str, Any]) -> DockerInfo: + async def new(data: Mapping[str, Any]) -> DockerInfo: """Create a object from docker info.""" # Check if CONFIG_RT_GROUP_SCHED is loaded (blocking I/O in executor) cpu_rt_file_exists = await asyncio.get_running_loop().run_in_executor( @@ -280,7 +278,7 @@ class DockerAPI(CoreSysAttributes): self._network: DockerNetwork | None = None self._info: DockerInfo | None = None self.config: DockerConfig = DockerConfig() - self._monitor: DockerMonitor = DockerMonitor(coresys) + self._monitor: DockerMonitor = DockerMonitor(coresys, self.docker) self._manifest_fetcher: RegistryManifestFetcher = RegistryManifestFetcher( coresys ) @@ -296,7 +294,7 @@ class DockerAPI(CoreSysAttributes): timeout=900, ), ) - self._info = await DockerInfo.new(self.dockerpy.info()) + self._info = await DockerInfo.new(await self.docker.system.info()) await self.config.read_data() self._network = await DockerNetwork(self.docker).post_init( self.config.enable_ipv6, self.config.mtu @@ -334,11 +332,6 @@ class DockerAPI(CoreSysAttributes): raise RuntimeError("Docker Info not initialized!") return self._info - @property - def events(self) -> CancellableStream: - """Return docker event stream.""" - return self.dockerpy.events(decode=True) - @property def monitor(self) -> DockerMonitor: """Return docker events monitor.""" @@ -901,27 +894,24 @@ class DockerAPI(CoreSysAttributes): except aiodocker.DockerError as err: raise DockerError(f"Can't restart {name}: {err}", _LOGGER.warning) from err - def container_logs(self, name: str, tail: int = 100) -> bytes: - """Return Docker logs of container. - - Must be run in executor. - """ - # Remains on docker py for now because aiodocker doesn't seem to have a way to get - # the raw binary of the logs. Only provides list[str] or AsyncIterator[str] options. + async def container_logs(self, name: str, tail: int = 100) -> list[str]: + """Return Docker logs of container.""" try: - docker_container: Container = self.dockerpy.containers.get(name) - except docker_errors.NotFound: - raise DockerNotFound( - f"Container {name} not found for logs", _LOGGER.warning - ) from None - except (docker_errors.DockerException, requests.RequestException) as err: + container = await self.containers.get(name) + except aiodocker.DockerError as err: + if err.status == HTTPStatus.NOT_FOUND: + raise DockerNotFound( + f"Container {name} not found for logs", _LOGGER.warning + ) from None raise DockerError( f"Could not get container {name} for logs: {err!s}", _LOGGER.error ) from err try: - return docker_container.logs(tail=tail, stdout=True, stderr=True) - except (docker_errors.DockerException, requests.RequestException) as err: + return await container.log( + follow=False, stdout=True, stderr=True, tail=tail + ) + except aiodocker.DockerError as err: raise DockerError( f"Can't grep logs from {name}: {err}", _LOGGER.warning ) from err diff --git a/supervisor/docker/monitor.py b/supervisor/docker/monitor.py index 7eb5ccc90..441c3d252 100644 --- a/supervisor/docker/monitor.py +++ b/supervisor/docker/monitor.py @@ -1,21 +1,25 @@ """Supervisor docker monitor based on events.""" -from contextlib import suppress +import asyncio from dataclasses import dataclass import logging -from threading import Thread from typing import Any -from docker.types.daemon import CancellableStream +import aiodocker +from aiodocker.channel import ChannelSubscriber from ..const import BusEvent from ..coresys import CoreSys, CoreSysAttributes +from ..exceptions import HassioError +from ..utils.sentry import async_capture_exception, capture_exception from .const import LABEL_MANAGED, ContainerState _LOGGER: logging.Logger = logging.getLogger(__name__) +STOP_MONITOR_TIMEOUT = 5.0 -@dataclass + +@dataclass(slots=True, frozen=True) class DockerContainerStateEvent: """Event for docker container state change.""" @@ -25,15 +29,26 @@ class DockerContainerStateEvent: time: int -class DockerMonitor(CoreSysAttributes, Thread): +@dataclass(slots=True, frozen=True) +class DockerEventCallbackTask: + """Docker event and task spawned for it.""" + + data: DockerContainerStateEvent + task: asyncio.Task + + +class DockerMonitor(CoreSysAttributes): """Docker monitor for supervisor.""" - def __init__(self, coresys: CoreSys): + def __init__(self, coresys: CoreSys, docker_client: aiodocker.Docker): """Initialize Docker monitor object.""" super().__init__() self.coresys = coresys - self._events: CancellableStream | None = None + self.docker = docker_client self._unlabeled_managed_containers: list[str] = [] + self._monitor_task: asyncio.Task | None = None + self._await_task: asyncio.Task | None = None + self._event_tasks: asyncio.Queue[DockerEventCallbackTask | None] def watch_container(self, container_metadata: dict[str, Any]): """If container is missing the managed label, add name to list.""" @@ -47,54 +62,124 @@ class DockerMonitor(CoreSysAttributes, Thread): async def load(self): """Start docker events monitor.""" - self._events = self.sys_docker.events - Thread.start(self) + events = self.docker.events.subscribe() + self._event_tasks = asyncio.Queue() + self._monitor_task = self.sys_create_task(self._run(events), eager_start=True) + self._await_task = self.sys_create_task( + self._await_event_tasks(), eager_start=True + ) _LOGGER.info("Started docker events monitor") async def unload(self): """Stop docker events monitor.""" - self._events.close() - with suppress(RuntimeError): - self.join(timeout=5) + await self.docker.events.stop() + + tasks = [task for task in (self._monitor_task, self._await_task) if task] + if tasks: + _, pending = await asyncio.wait(tasks, timeout=STOP_MONITOR_TIMEOUT) + if pending: + _LOGGER.warning( + "Timeout stopping docker events monitor, cancelling %s pending task(s)", + len(pending), + ) + for task in pending: + task.cancel() + await asyncio.gather(*pending, return_exceptions=True) + self._event_tasks.shutdown(immediate=True) + self._monitor_task = None + self._await_task = None _LOGGER.info("Stopped docker events monitor") - def run(self) -> None: + async def _run(self, events: ChannelSubscriber) -> None: """Monitor and process docker events.""" - if not self._events: - raise RuntimeError("Monitor has not been loaded!") + try: + while True: + event: dict[str, Any] | None = await events.get() + if event is None: + break - for event in self._events: - attributes: dict[str, str] = event.get("Actor", {}).get("Attributes", {}) - - if event["Type"] == "container" and ( - LABEL_MANAGED in attributes - or attributes.get("name") in self._unlabeled_managed_containers - ): - container_state: ContainerState | None = None - action: str = event["Action"] - - if action == "start": - container_state = ContainerState.RUNNING - elif action == "die": - container_state = ( - ContainerState.STOPPED - if int(event["Actor"]["Attributes"]["exitCode"]) == 0 - else ContainerState.FAILED + try: + attributes: dict[str, str] = event.get("Actor", {}).get( + "Attributes", {} ) - elif action == "health_status: healthy": - container_state = ContainerState.HEALTHY - elif action == "health_status: unhealthy": - container_state = ContainerState.UNHEALTHY - if container_state: - self.sys_loop.call_soon_threadsafe( - self.sys_bus.fire_event, - BusEvent.DOCKER_CONTAINER_STATE_CHANGE, - DockerContainerStateEvent( - name=attributes["name"], - state=container_state, - id=event["Actor"]["ID"], - time=event["time"], - ), + if event["Type"] == "container" and ( + LABEL_MANAGED in attributes + or attributes.get("name") in self._unlabeled_managed_containers + ): + container_state: ContainerState | None = None + action: str = event["Action"] + + if action == "start": + container_state = ContainerState.RUNNING + elif action == "die": + container_state = ( + ContainerState.STOPPED + if int(event["Actor"]["Attributes"]["exitCode"]) == 0 + else ContainerState.FAILED + ) + elif action == "health_status: healthy": + container_state = ContainerState.HEALTHY + elif action == "health_status: unhealthy": + container_state = ContainerState.UNHEALTHY + + if container_state: + state_event = DockerContainerStateEvent( + name=attributes["name"], + state=container_state, + id=event["Actor"]["ID"], + time=event["time"], + ) + tasks = self.sys_bus.fire_event( + BusEvent.DOCKER_CONTAINER_STATE_CHANGE, state_event + ) + await asyncio.gather( + *[ + self._event_tasks.put( + DockerEventCallbackTask(state_event, task) + ) + for task in tasks + ] + ) + + # Broad exception here because one bad event cannot stop the monitor + # Log what went wrong and send it to sentry but continue monitoring + except Exception as err: # pylint: disable=broad-exception-caught + await async_capture_exception(err) + _LOGGER.error( + "Could not process docker event, container state my be inaccurate: %s %s", + event, + err, ) + + # Can only get to this except if an error raised while getting events from queue + # Shouldn't really happen but any errors raised there are catastrophic and end the monitor + # Log that the monitor broke and send the details to sentry to review + except Exception as err: # pylint: disable=broad-exception-caught + await async_capture_exception(err) + _LOGGER.error( + "Cannot get events from docker, monitor has crashed. Container " + "state information will be inaccurate: %s", + err, + ) + finally: + await self._event_tasks.put(None) + + async def _await_event_tasks(self): + """Await event callback tasks to clean up and capture output.""" + while (event := await self._event_tasks.get()) is not None: + try: + await event.task + # Exceptions which inherit from HassioError are already handled + # We can safely ignore these, we only track the unhandled ones here + except HassioError: + pass + except Exception as err: # pylint: disable=broad-exception-caught + capture_exception(err) + _LOGGER.error( + "Error encountered while processing docker container state event: %s %s %s", + event.task.get_name(), + event.data, + err, + ) diff --git a/supervisor/plugins/dns.py b/supervisor/plugins/dns.py index 6da0f77c5..64d67b963 100644 --- a/supervisor/plugins/dns.py +++ b/supervisor/plugins/dns.py @@ -368,7 +368,7 @@ class PluginDns(PluginBase): log = await self.instance.logs() # Check the log for loop plugin output - if b"plugin/loop: Loop" in log: + if any("plugin/loop: Loop" in line for line in log): _LOGGER.error("Detected a DNS loop in local Network!") self._loop = True self.sys_resolution.create_issue( diff --git a/supervisor/supervisor.py b/supervisor/supervisor.py index 48acc31ff..b741884fe 100644 --- a/supervisor/supervisor.py +++ b/supervisor/supervisor.py @@ -248,7 +248,7 @@ class Supervisor(CoreSysAttributes): """Return True if a task is in progress.""" return self.instance.in_progress - def logs(self) -> Awaitable[bytes]: + def logs(self) -> Awaitable[list[str]]: """Get Supervisor docker logs. Return Coroutine. diff --git a/tests/backups/test_manager.py b/tests/backups/test_manager.py index e93906356..0d257a354 100644 --- a/tests/backups/test_manager.py +++ b/tests/backups/test_manager.py @@ -1836,47 +1836,6 @@ async def test_reload_error( assert coresys.core.healthy is healthy_expected -@pytest.mark.usefixtures("supervisor_internet", "install_addon_ssh") -async def test_monitoring_after_full_restore( - coresys: CoreSys, full_backup_mock: Backup -): - """Test monitoring of addon state still works after full restore.""" - await coresys.core.set_state(CoreState.RUNNING) - coresys.hardware.disk.get_disk_free_space = lambda x: 5000 - coresys.homeassistant.core.start = AsyncMock(return_value=None) - coresys.homeassistant.core.stop = AsyncMock(return_value=None) - coresys.homeassistant.core.update = AsyncMock(return_value=None) - - manager = await BackupManager(coresys).load_config() - - backup_instance = full_backup_mock.return_value - backup_instance.protected = False - assert await manager.do_restore_full(backup_instance) - - backup_instance.restore_addons.assert_called_once_with([TEST_ADDON_SLUG]) - assert coresys.core.state == CoreState.RUNNING - coresys.docker.unload.assert_not_called() - - -@pytest.mark.usefixtures("supervisor_internet", "install_addon_ssh") -async def test_monitoring_after_partial_restore( - coresys: CoreSys, partial_backup_mock: Backup -): - """Test monitoring of addon state still works after full restore.""" - await coresys.core.set_state(CoreState.RUNNING) - coresys.hardware.disk.get_disk_free_space = lambda x: 5000 - - manager = await BackupManager(coresys).load_config() - - backup_instance = partial_backup_mock.return_value - backup_instance.protected = False - assert await manager.do_restore_partial(backup_instance, addons=[TEST_ADDON_SLUG]) - - backup_instance.restore_addons.assert_called_once_with([TEST_ADDON_SLUG]) - assert coresys.core.state == CoreState.RUNNING - coresys.docker.unload.assert_not_called() - - @pytest.mark.parametrize( "pre_backup_error", [ diff --git a/tests/conftest.py b/tests/conftest.py index 7f344356b..24edc21e0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,16 +3,18 @@ import asyncio from collections.abc import AsyncGenerator, Generator from datetime import datetime -import os from pathlib import Path import subprocess from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch from uuid import uuid4 +from aiodocker.channel import Channel, ChannelSubscriber from aiodocker.containers import DockerContainer, DockerContainers from aiodocker.docker import DockerImages +from aiodocker.events import DockerEvents from aiodocker.execs import Exec from aiodocker.networks import DockerNetwork, DockerNetworks +from aiodocker.system import DockerSystem from aiohttp import ClientSession, web from aiohttp.test_utils import TestClient from awesomeversion import AwesomeVersion @@ -49,7 +51,6 @@ from supervisor.const import ( from supervisor.coresys import CoreSys from supervisor.dbus.network import NetworkManager from supervisor.docker.manager import DockerAPI -from supervisor.docker.monitor import DockerMonitor from supervisor.exceptions import HostLogError from supervisor.homeassistant.api import APIState from supervisor.host.logs import LogsControl @@ -61,7 +62,6 @@ from supervisor.utils.dt import utcnow from .common import ( AsyncIterator, MockResponse, - load_binary_fixture, load_fixture, load_json_fixture, mock_dbus_services, @@ -100,16 +100,16 @@ def blockbuster(request: pytest.FixtureRequest) -> BlockBuster | None: @pytest.fixture -async def path_extern() -> None: +async def path_extern(monkeypatch: pytest.MonkeyPatch) -> None: """Set external path env for tests.""" - os.environ["SUPERVISOR_SHARE"] = "/mnt/data/supervisor" + monkeypatch.setenv("SUPERVISOR_SHARE", "/mnt/data/supervisor") yield @pytest.fixture -async def supervisor_name() -> None: +async def supervisor_name(monkeypatch: pytest.MonkeyPatch) -> None: """Set env for supervisor name.""" - os.environ["SUPERVISOR_NAME"] = "hassio_supervisor" + monkeypatch.setenv("SUPERVISOR_NAME", "hassio_supervisor") yield @@ -144,18 +144,24 @@ async def docker() -> DockerAPI: }, "Containers": {}, } + system_info = { + "ServerVersion": "1.0.0", + "Driver": "overlay2", + "LoggingDriver": "journald", + "CgroupVersion": "1", + } with ( patch("supervisor.docker.manager.DockerClient", return_value=MagicMock()), - patch("supervisor.docker.manager.DockerAPI.info", return_value=MagicMock()), - patch("supervisor.docker.manager.DockerAPI.unload"), patch( "supervisor.docker.manager.aiodocker.Docker", return_value=( - docker_client := MagicMock( - networks=MagicMock(spec=DockerNetworks), + MagicMock( + networks=(docker_networks := MagicMock(spec=DockerNetworks)), images=(docker_images := MagicMock(spec=DockerImages)), containers=(docker_containers := MagicMock(spec=DockerContainers)), + events=(docker_events := MagicMock(spec=DockerEvents)), + system=(docker_system := MagicMock(spec=DockerSystem)), ) ), ), @@ -168,16 +174,21 @@ async def docker() -> DockerAPI: new=PropertyMock(return_value=docker_containers), ), ): - docker_client.networks.get.return_value = docker_network = MagicMock( + # Info mocking + docker_system.info.return_value = system_info + + # Network mocking + docker_networks.get.return_value = docker_network = MagicMock( spec=DockerNetwork ) docker_network.show.return_value = network_inspect - docker_obj = await DockerAPI(MagicMock()).post_init() - docker_obj.config._data = {"registries": {}} - with patch("supervisor.docker.monitor.DockerMonitor.load"): - await docker_obj.load() + # Events mocking + docker_events.channel = channel = Channel() + docker_events.subscribe.return_value = ChannelSubscriber(channel) + docker_events.stop = lambda *_: channel.publish(None) + # Images mocking docker_images.inspect.return_value = image_inspect docker_images.list.return_value = [image_inspect] docker_images.import_image = AsyncMock( @@ -185,6 +196,7 @@ async def docker() -> DockerAPI: ) docker_images.pull.return_value = AsyncIterator([{}]) + # Containers mocking docker_containers.get.return_value = docker_container = MagicMock( spec=DockerContainer, id=container_inspect["Id"] ) @@ -200,15 +212,21 @@ async def docker() -> DockerAPI: docker_exec.start.return_value = create_mock_exec_stream(output=b"") docker_exec.inspect.return_value = {"ExitCode": 0} - docker_obj.info.logging = "journald" - docker_obj.info.storage = "overlay2" - docker_obj.info.version = AwesomeVersion("1.0.0") + # Load Docker manager + docker_obj = await DockerAPI( + MagicMock(create_task=asyncio.get_running_loop().create_task) + ).post_init() + docker_obj.config._data = {"registries": {}} + await docker_obj.load() # Mock manifest fetcher to return None (falls back to count-based progress) docker_obj._manifest_fetcher.get_manifest = AsyncMock(return_value=None) yield docker_obj + # Clean up + await docker_obj.unload() + @pytest.fixture(scope="session") def dbus_session() -> Generator[str]: @@ -432,8 +450,8 @@ async def fixture_all_dbus_services( @pytest.fixture async def coresys( - docker, - dbus_session_bus, + docker: DockerAPI, + dbus_session_bus: MessageBus, all_dbus_services, aiohttp_client, run_supervisor_state, @@ -480,7 +498,7 @@ async def coresys( # Mock docker coresys_obj._docker = docker coresys_obj.docker.coresys = coresys_obj - coresys_obj.docker._monitor = DockerMonitor(coresys_obj) + docker.monitor.coresys = coresys_obj # Set internet state coresys_obj.supervisor._connectivity = True @@ -839,12 +857,11 @@ async def journald_logs(coresys: CoreSys) -> MagicMock: @pytest.fixture -async def docker_logs(docker: DockerAPI, supervisor_name) -> MagicMock: +async def docker_logs(container: DockerContainer, supervisor_name) -> AsyncMock: """Mock log output for a container from docker.""" - container_mock = MagicMock() - container_mock.logs.return_value = load_binary_fixture("logs_docker_container.txt") - docker.dockerpy.containers.get.return_value = container_mock - yield container_mock.logs + logs = load_fixture("logs_docker_container.txt") + container.log.return_value = logs.splitlines() + yield container.log @pytest.fixture diff --git a/tests/docker/test_addon.py b/tests/docker/test_addon.py index 1c5f31980..c293e16d5 100644 --- a/tests/docker/test_addon.py +++ b/tests/docker/test_addon.py @@ -1,6 +1,7 @@ """Test docker addon setup.""" import asyncio +from dataclasses import replace from http import HTTPStatus from ipaddress import IPv4Address from pathlib import Path @@ -73,9 +74,8 @@ def get_docker_addon( return docker_addon -def test_base_volumes_included( - coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern -): +@pytest.mark.usefixtures("path_extern") +def test_base_volumes_included(coresys: CoreSys, addonsdata_system: dict[str, Data]): """Dev and data volumes always included.""" docker_addon = get_docker_addon( coresys, addonsdata_system, "basic-addon-config.json" @@ -96,8 +96,9 @@ def test_base_volumes_included( ) +@pytest.mark.usefixtures("path_extern") def test_addon_map_folder_defaults( - coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern + coresys: CoreSys, addonsdata_system: dict[str, Data] ): """Validate defaults for mapped folders in addons.""" docker_addon = get_docker_addon( @@ -153,8 +154,9 @@ def test_addon_map_folder_defaults( assert "/backup" not in [mount.target for mount in docker_addon.mounts] +@pytest.mark.usefixtures("path_extern") def test_addon_map_homeassistant_folder( - coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern + coresys: CoreSys, addonsdata_system: dict[str, Data] ): """Test mounts for addon which maps homeassistant folder.""" config = load_json_fixture("addon-config-map-addon_config.json") @@ -173,8 +175,9 @@ def test_addon_map_homeassistant_folder( ) +@pytest.mark.usefixtures("path_extern") def test_addon_map_addon_configs_folder( - coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern + coresys: CoreSys, addonsdata_system: dict[str, Data] ): """Test mounts for addon which maps addon configs folder.""" config = load_json_fixture("addon-config-map-addon_config.json") @@ -193,8 +196,9 @@ def test_addon_map_addon_configs_folder( ) +@pytest.mark.usefixtures("path_extern") def test_addon_map_addon_config_folder( - coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern + coresys: CoreSys, addonsdata_system: dict[str, Data] ): """Test mounts for addon which maps its own config folder.""" docker_addon = get_docker_addon( @@ -213,8 +217,9 @@ def test_addon_map_addon_config_folder( ) +@pytest.mark.usefixtures("path_extern") def test_addon_map_addon_config_folder_with_custom_target( - coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern + coresys: CoreSys, addonsdata_system: dict[str, Data] ): """Test mounts for addon which maps its own config folder and sets target path.""" config = load_json_fixture("addon-config-map-addon_config.json") @@ -236,8 +241,9 @@ def test_addon_map_addon_config_folder_with_custom_target( ) +@pytest.mark.usefixtures("path_extern") def test_addon_map_data_folder_with_custom_target( - coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern + coresys: CoreSys, addonsdata_system: dict[str, Data] ): """Test mounts for addon which sets target path for data folder.""" config = load_json_fixture("addon-config-map-addon_config.json") @@ -256,8 +262,9 @@ def test_addon_map_data_folder_with_custom_target( ) +@pytest.mark.usefixtures("path_extern") def test_addon_ignore_on_config_map( - coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern + coresys: CoreSys, addonsdata_system: dict[str, Data] ): """Test mounts for addon don't include addon config or homeassistant when config included.""" config = load_json_fixture("basic-addon-config.json") @@ -283,9 +290,8 @@ def test_addon_ignore_on_config_map( assert "/homeassistant" not in [mount.target for mount in docker_addon.mounts] -def test_journald_addon( - coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern -): +@pytest.mark.usefixtures("path_extern") +def test_journald_addon(coresys: CoreSys, addonsdata_system: dict[str, Data]): """Validate volume for journald option.""" docker_addon = get_docker_addon( coresys, addonsdata_system, "journald-addon-config.json" @@ -311,9 +317,8 @@ def test_journald_addon( ) -def test_not_journald_addon( - coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern -): +@pytest.mark.usefixtures("path_extern") +def test_not_journald_addon(coresys: CoreSys, addonsdata_system: dict[str, Data]): """Validate journald option defaults off.""" docker_addon = get_docker_addon( coresys, addonsdata_system, "basic-addon-config.json" @@ -322,11 +327,9 @@ def test_not_journald_addon( assert "/var/log/journal" not in [mount.target for mount in docker_addon.mounts] +@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data") async def test_addon_run_docker_error( - coresys: CoreSys, - addonsdata_system: dict[str, Data], - path_extern, - tmp_supervisor_data: Path, + coresys: CoreSys, addonsdata_system: dict[str, Data] ): """Test docker error when addon is run.""" await coresys.dbus.timedate.connect(coresys.dbus.bus) @@ -352,12 +355,9 @@ async def test_addon_run_docker_error( ) +@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data") async def test_addon_run_add_host_error( - coresys: CoreSys, - addonsdata_system: dict[str, Data], - capture_exception: Mock, - path_extern, - tmp_supervisor_data: Path, + coresys: CoreSys, addonsdata_system: dict[str, Data], capture_exception: Mock ): """Test error adding host when addon is run.""" await coresys.dbus.timedate.connect(coresys.dbus.bus) @@ -378,9 +378,7 @@ async def test_addon_run_add_host_error( async def test_addon_stop_delete_host_error( - coresys: CoreSys, - addonsdata_system: dict[str, Data], - capture_exception: Mock, + coresys: CoreSys, addonsdata_system: dict[str, Data], capture_exception: Mock ): """Test error deleting host when addon is stopped.""" docker_addon = get_docker_addon( @@ -424,7 +422,7 @@ TEST_HW_DEVICE = Device( ) -@pytest.mark.usefixtures("path_extern") +@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data") @pytest.mark.parametrize( ("dev_path", "cgroup", "is_os"), [ @@ -444,13 +442,12 @@ async def test_addon_new_device( dev_path: str, cgroup: str, is_os: bool, - tmp_supervisor_data: Path, ): """Test new device that is listed in static devices.""" coresys.hardware.disk.get_disk_free_space = lambda x: 5000 install_addon_ssh.data["devices"] = [dev_path] container.id = 123 - docker.info.cgroup = cgroup + docker._info = replace(docker.info, cgroup=cgroup) # pylint: disable=protected-access with ( patch.object(Addon, "write_options"), @@ -468,19 +465,15 @@ async def test_addon_new_device( add_devices.assert_called_once_with(123, "c 0:0 rwm") -@pytest.mark.usefixtures("path_extern") +@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data") @pytest.mark.parametrize("dev_path", [TEST_DEV_PATH, TEST_SYSFS_PATH]) async def test_addon_new_device_no_haos( - coresys: CoreSys, - install_addon_ssh: Addon, - docker: DockerAPI, - dev_path: str, - tmp_supervisor_data: Path, + coresys: CoreSys, install_addon_ssh: Addon, docker: DockerAPI, dev_path: str ): """Test new device that is listed in static devices on non HAOS system with CGroup V2.""" coresys.hardware.disk.get_disk_free_space = lambda x: 5000 install_addon_ssh.data["devices"] = [dev_path] - docker.info.cgroup = "2" + docker._info = replace(docker.info, cgroup="2") # pylint: disable=protected-access with ( patch.object(Addon, "write_options"), @@ -512,10 +505,7 @@ async def test_addon_new_device_no_haos( assert coresys.resolution.suggestions == [] -async def test_ulimits_integration( - coresys: CoreSys, - install_addon_ssh: Addon, -): +async def test_ulimits_integration(coresys: CoreSys, install_addon_ssh: Addon): """Test ulimits integration with Docker addon.""" docker_addon = DockerAddon(coresys, install_addon_ssh) diff --git a/tests/docker/test_manager.py b/tests/docker/test_manager.py index 3919179c2..dc5ba4440 100644 --- a/tests/docker/test_manager.py +++ b/tests/docker/test_manager.py @@ -8,10 +8,11 @@ from unittest.mock import AsyncMock, MagicMock, patch import aiodocker from aiodocker.containers import DockerContainer from aiodocker.networks import DockerNetwork +from awesomeversion import AwesomeVersion from docker.errors import APIError import pytest -from supervisor.const import DNS_SUFFIX +from supervisor.const import DNS_SUFFIX, ENV_SUPERVISOR_CPU_RT from supervisor.coresys import CoreSys from supervisor.docker.const import ( LABEL_MANAGED, @@ -533,3 +534,31 @@ async def test_import_multiple_images_in_tar( assert "Unexpected image count 2 while importing image from tar" in caplog.text coresys.docker.images.inspect.assert_not_called() + + +@pytest.mark.parametrize( + ("rt_file_exists", "rt_env", "rt_supported"), + [(False, "1", False), (True, "0", False), (True, "1", True)], +) +async def test_info( + monkeypatch: pytest.MonkeyPatch, + docker: DockerAPI, + rt_file_exists: bool, + rt_env: str, + rt_supported: bool, +): + """Test docker system info.""" + docker.docker.system.info.return_value = { + "ServerVersion": "2.0.0", + "Driver": "example", + "LoggingDriver": "example", + "CgroupVersion": "2", + } + monkeypatch.setenv(ENV_SUPERVISOR_CPU_RT, rt_env) + with patch("supervisor.docker.manager.Path.exists", return_value=rt_file_exists): + await docker.post_init() + assert docker.info.version == AwesomeVersion("2.0.0") + assert docker.info.storage == "example" + assert docker.info.logging == "example" + assert docker.info.cgroup == "2" + assert docker.info.support_cpu_realtime is rt_supported diff --git a/tests/docker/test_monitor.py b/tests/docker/test_monitor.py index f86326fa6..c6077a5b7 100644 --- a/tests/docker/test_monitor.py +++ b/tests/docker/test_monitor.py @@ -2,12 +2,13 @@ import asyncio from typing import Any -from unittest.mock import PropertyMock, patch +from unittest.mock import patch from aiodocker.containers import DockerContainer from awesomeversion import AwesomeVersion import pytest +from supervisor.bus import Bus from supervisor.const import BusEvent from supervisor.coresys import CoreSys from supervisor.docker.const import ContainerState @@ -90,15 +91,13 @@ async def test_events( event["Actor"]["Attributes"]["name"] = "some_container" event["Actor"]["ID"] = "abc123" event["time"] = 123 - with ( - patch( - "supervisor.docker.manager.DockerAPI.events", - new=PropertyMock(return_value=[event]), - ), - patch.object(type(coresys.bus), "fire_event") as fire_event, - ): - await coresys.docker.monitor.load() - await asyncio.sleep(0.1) + + with patch.object( + Bus, "fire_event", return_value=[coresys.create_task(asyncio.sleep(0))] + ) as fire_event: + await coresys.docker.docker.events.channel.publish(event) + await asyncio.sleep(0) + await coresys.docker.monitor.unload() if expected: fire_event.assert_called_once_with( BusEvent.DOCKER_CONTAINER_STATE_CHANGE, @@ -119,27 +118,21 @@ async def test_unlabeled_container(coresys: CoreSys, container: DockerContainer) } await coresys.homeassistant.core.instance.attach(AwesomeVersion("2022.7.3")) - with ( - patch( - "supervisor.docker.manager.DockerAPI.events", - new=PropertyMock( - return_value=[ - { - "time": 123, - "Type": "container", - "Action": "die", - "Actor": { - "ID": "abc123", - "Attributes": {"name": "homeassistant", "exitCode": "137"}, - }, - } - ] - ), - ), - patch.object(type(coresys.bus), "fire_event") as fire_event, - ): - await coresys.docker.monitor.load() - await asyncio.sleep(0.1) + with patch.object( + Bus, "fire_event", return_value=[coresys.create_task(asyncio.sleep(0))] + ) as fire_event: + await coresys.docker.docker.events.channel.publish( + { + "time": 123, + "Type": "container", + "Action": "die", + "Actor": { + "ID": "abc123", + "Attributes": {"name": "homeassistant", "exitCode": "137"}, + }, + } + ) + await coresys.docker.monitor.unload() fire_event.assert_called_once_with( BusEvent.DOCKER_CONTAINER_STATE_CHANGE, DockerContainerStateEvent( diff --git a/tests/plugins/test_dns.py b/tests/plugins/test_dns.py index 2c3354f3b..f03e19104 100644 --- a/tests/plugins/test_dns.py +++ b/tests/plugins/test_dns.py @@ -6,6 +6,7 @@ from ipaddress import IPv4Address from pathlib import Path from unittest.mock import AsyncMock, Mock, patch +from aiodocker.containers import DockerContainer import pytest from supervisor.const import BusEvent, LogLevel @@ -13,7 +14,7 @@ from supervisor.coresys import CoreSys from supervisor.docker.const import ContainerState from supervisor.docker.dns import DockerDNS from supervisor.docker.monitor import DockerContainerStateEvent -from supervisor.plugins.dns import HostEntry +from supervisor.plugins.dns import HostEntry, PluginDns from supervisor.resolution.const import ContextType, IssueType, SuggestionType from supervisor.resolution.data import Issue, Suggestion @@ -145,34 +146,28 @@ async def test_reset(coresys: CoreSys): ] -async def test_loop_detection_on_failure(coresys: CoreSys): +async def test_loop_detection_on_failure(coresys: CoreSys, container: DockerContainer): """Test loop detection when coredns fails.""" assert len(coresys.resolution.issues) == 0 assert len(coresys.resolution.suggestions) == 0 with ( - patch.object(type(coresys.plugins.dns.instance), "attach"), - patch.object( - type(coresys.plugins.dns.instance), - "is_running", - return_value=True, - ), + patch.object(DockerDNS, "attach"), + patch.object(DockerDNS, "is_running", return_value=True), ): await coresys.plugins.dns.load() with ( - patch.object(type(coresys.plugins.dns), "rebuild") as rebuild, + patch.object(PluginDns, "rebuild") as rebuild, patch.object( - type(coresys.plugins.dns.instance), + DockerDNS, "current_state", side_effect=[ ContainerState.FAILED, ContainerState.FAILED, ], ), - patch.object(type(coresys.plugins.dns.instance), "logs") as logs, ): - logs.return_value = b"" coresys.bus.fire_event( BusEvent.DOCKER_CONTAINER_STATE_CHANGE, DockerContainerStateEvent( @@ -188,7 +183,7 @@ async def test_loop_detection_on_failure(coresys: CoreSys): rebuild.assert_called_once() rebuild.reset_mock() - logs.return_value = b"plugin/loop: Loop" + container.log.return_value = ["plugin/loop: Loop"] coresys.bus.fire_event( BusEvent.DOCKER_CONTAINER_STATE_CHANGE, DockerContainerStateEvent( diff --git a/tests/resolution/evaluation/test_evaluate_cgroup.py b/tests/resolution/evaluation/test_evaluate_cgroup.py index 59293450a..7aa250030 100644 --- a/tests/resolution/evaluation/test_evaluate_cgroup.py +++ b/tests/resolution/evaluation/test_evaluate_cgroup.py @@ -1,6 +1,7 @@ """Test evaluation base.""" -# pylint: disable=import-error +# pylint: disable=import-error,protected-access +from dataclasses import replace from unittest.mock import patch from supervisor.const import CoreState @@ -19,17 +20,17 @@ async def test_evaluation(coresys: CoreSys): assert cgroup_version.reason not in coresys.resolution.unsupported - coresys.docker.info.cgroup = "unsupported" + coresys.docker._info = replace(coresys.docker.info, cgroup="unsupported") await cgroup_version() assert cgroup_version.reason in coresys.resolution.unsupported coresys.resolution.unsupported.clear() - coresys.docker.info.cgroup = CGROUP_V2_VERSION + coresys.docker._info = replace(coresys.docker.info, cgroup=CGROUP_V2_VERSION) await cgroup_version() assert cgroup_version.reason not in coresys.resolution.unsupported coresys.resolution.unsupported.clear() - coresys.docker.info.cgroup = CGROUP_V1_VERSION + coresys.docker._info = replace(coresys.docker.info, cgroup=CGROUP_V1_VERSION) await cgroup_version() assert cgroup_version.reason not in coresys.resolution.unsupported @@ -39,11 +40,11 @@ async def test_evaluation_os_available(coresys: CoreSys, os_available): cgroup_version = EvaluateCGroupVersion(coresys) await coresys.core.set_state(CoreState.SETUP) - coresys.docker.info.cgroup = CGROUP_V2_VERSION + coresys.docker._info = replace(coresys.docker.info, cgroup=CGROUP_V2_VERSION) await cgroup_version() assert cgroup_version.reason not in coresys.resolution.unsupported - coresys.docker.info.cgroup = CGROUP_V1_VERSION + coresys.docker._info = replace(coresys.docker.info, cgroup=CGROUP_V1_VERSION) await cgroup_version() assert cgroup_version.reason not in coresys.resolution.unsupported diff --git a/tests/resolution/evaluation/test_evaluate_docker_configuration.py b/tests/resolution/evaluation/test_evaluate_docker_configuration.py index 20f011b10..8041cf669 100644 --- a/tests/resolution/evaluation/test_evaluate_docker_configuration.py +++ b/tests/resolution/evaluation/test_evaluate_docker_configuration.py @@ -1,6 +1,7 @@ """Test evaluation base.""" # pylint: disable=import-error,protected-access +from dataclasses import replace from unittest.mock import patch from supervisor.const import CoreState @@ -19,25 +20,29 @@ async def test_evaluation(coresys: CoreSys): assert docker_configuration.reason not in coresys.resolution.unsupported - coresys.docker.info.storage = "unsupported" - coresys.docker.info.logging = EXPECTED_LOGGING + coresys.docker._info = replace( + coresys.docker.info, storage="unsupported", logging=EXPECTED_LOGGING + ) await docker_configuration() assert docker_configuration.reason in coresys.resolution.unsupported coresys.resolution.unsupported.clear() - coresys.docker.info.storage = EXPECTED_STORAGE[0] - coresys.docker.info.logging = "unsupported" + coresys.docker._info = replace( + coresys.docker.info, storage=EXPECTED_STORAGE[0], logging="unsupported" + ) await docker_configuration() assert docker_configuration.reason in coresys.resolution.unsupported coresys.resolution.unsupported.clear() - coresys.docker.info.storage = "overlay2" - coresys.docker.info.logging = EXPECTED_LOGGING + coresys.docker._info = replace( + coresys.docker.info, storage="overlay2", logging=EXPECTED_LOGGING + ) await docker_configuration() assert docker_configuration.reason not in coresys.resolution.unsupported - coresys.docker.info.storage = "overlayfs" - coresys.docker.info.logging = EXPECTED_LOGGING + coresys.docker._info = replace( + coresys.docker.info, storage="overlayfs", logging=EXPECTED_LOGGING + ) await docker_configuration() assert docker_configuration.reason not in coresys.resolution.unsupported diff --git a/tests/resolution/evaluation/test_evaluate_docker_version.py b/tests/resolution/evaluation/test_evaluate_docker_version.py index 980603454..afffdb04e 100644 --- a/tests/resolution/evaluation/test_evaluate_docker_version.py +++ b/tests/resolution/evaluation/test_evaluate_docker_version.py @@ -1,8 +1,11 @@ """Test evaluation base.""" # pylint: disable=import-error,protected-access +from dataclasses import replace from unittest.mock import patch +from awesomeversion import AwesomeVersion + from supervisor.const import CoreState from supervisor.coresys import CoreSys from supervisor.resolution.evaluations.docker_version import EvaluateDockerVersion @@ -15,11 +18,22 @@ async def test_evaluation(coresys: CoreSys): assert docker_version.reason not in coresys.resolution.unsupported - coresys.docker.info.supported_version = False + coresys.docker._info = replace( + coresys.docker.info, version=AwesomeVersion("23.0.0") + ) + await docker_version() + assert docker_version.reason in coresys.resolution.unsupported + coresys.resolution.unsupported.clear() + + coresys.docker._info = replace( + coresys.docker.info, version=AwesomeVersion("nonsense") + ) await docker_version() assert docker_version.reason in coresys.resolution.unsupported - coresys.docker.info.supported_version = True + coresys.docker._info = replace( + coresys.docker.info, version=AwesomeVersion("24.0.0") + ) await docker_version() assert docker_version.reason not in coresys.resolution.unsupported