diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..f23e614 --- /dev/null +++ b/.env.example @@ -0,0 +1,14 @@ +PROXMOX_URL=https://proxmox.example.invalid:8006 +PROXMOX_REALM=pam +PROXMOX_USER=root +PROXMOX_PASSWORD=replace-me +PROXMOX_VERIFY_TLS=false +PROXMOX_API_BASE=/api2/json +PROXMOX_PREVENT_CREATE=false +PROXMOX_ENABLE_TEST_MODE=false +PROXMOX_TEST_NODE= +PROXMOX_TEST_POOL= +PROXMOX_TEST_TAG=codex-e2e +PROXMOX_TEST_VM_NAME_PREFIX=codex-e2e- +PROXMOX_KEEP_FAILED_VM=true +PROXMOX_REQUEST_TIMEOUT_SECONDS=15 diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index 4bee49d..aefff38 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -108,7 +108,7 @@ Expected responsibilities: - provide shared app context to screens - coordinate back/next/confirm navigation -The current run command placeholder in `README.md` and `TASKS.md` is `uv run python -m your_app`, so the real package/module name is still unresolved. +The current run command in `README.md` and `TASKS.md` is `uv run python -m pve_vm_setup`. ### 2. Screens @@ -357,7 +357,7 @@ The create-then-configure request sequence is especially important to cover in s `TASKS.md` does not prescribe exact paths, but it does require a separation of concerns. A structure consistent with the current requirements would be: ```text -your_app/ +pve_vm_setup/ __main__.py app.py screens/ @@ -392,7 +392,6 @@ These defaults are central enough to architecture because they belong in domain/ The available resources leave several architectural details unresolved: -- What concrete Python package/module name should replace `your_app`? - Which Proxmox authentication mechanism should be used under the hood: ticket/cookie, API token, or both? - How should session persistence work across screens and retries? - Does the app target a single Proxmox node/cluster endpoint or support multiple saved endpoints? diff --git a/README.md b/README.md index 3a987f3..7218a67 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,26 @@ ## Commands - Install: `uv sync` -- Run app: `uv run python -m your_app` +- Run app: `uv run python -m pve_vm_setup` +- Run live diagnostics: `uv run python -m pve_vm_setup --doctor-live` - Run tests: `uv run pytest` +- Run read-only live tests: `uv run pytest -m live` +- Run create-gated live tests: `uv run pytest -m live_create` - Lint: `uv run ruff check .` - Format: `uv run ruff format .` +## Live configuration + +Start from `.env.example` and provide the Proxmox credentials in `.env`. + +Additional live-access controls: + +- `PROXMOX_VERIFY_TLS=false` disables certificate verification for internal/self-signed installs +- `PROXMOX_API_BASE=/api2/json` makes the API base explicit +- `PROXMOX_PREVENT_CREATE=false` allows VM creation by default; set it to `true` to block creates +- `PROXMOX_ENABLE_TEST_MODE=true` enables scoped test mode for live creates +- When test mode is enabled, `PROXMOX_TEST_NODE`, `PROXMOX_TEST_POOL`, `PROXMOX_TEST_TAG`, and `PROXMOX_TEST_VM_NAME_PREFIX` are required and are used to constrain and mark created VMs + ## Engineering rules - Write tests before implementation diff --git a/TASKS.md b/TASKS.md index 7b07c83..1c694f1 100644 --- a/TASKS.md +++ b/TASKS.md @@ -22,7 +22,7 @@ Use these rules for every implementation task in this repository: Codex should use these commands: - Install dependencies: `uv sync` -- Run app: `uv run python -m your_app` +- Run app: `uv run python -m pve_vm_setup` - Run tests: `uv run pytest` - Run lint checks: `uv run ruff check .` - Format code: `uv run ruff format .` @@ -41,7 +41,7 @@ Create the initial Textual application structure and make the repository runnabl Requirements: -- Create the application entrypoint used by `uv run python -m your_app`. +- Create the application entrypoint used by `uv run python -m pve_vm_setup`. - Set up a project structure that separates app shell, screens, widgets, models, and services. - Add the initial test setup for unit tests, Textual interaction tests, and snapshot tests. - Add a central state or domain module for the VM configuration workflow. diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..485ff91 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,40 @@ +[project] +name = "pve-vm-setup" +version = "0.1.0" +description = "Textual TUI for creating Proxmox VMs with live diagnostics." +readme = "README.md" +requires-python = ">=3.11" +dependencies = [ + "httpx>=0.27,<0.29", + "python-dotenv>=1.0,<2.0", + "textual>=0.63,<0.90", +] + +[dependency-groups] +dev = [ + "pytest>=8.3,<9.0", + "pytest-asyncio>=0.24,<1.0", + "ruff>=0.9,<1.0", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["src/pve_vm_setup"] + +[tool.pytest.ini_options] +asyncio_mode = "auto" +markers = [ + "live: hit a real Proxmox API endpoint", + "live_create: create real Proxmox resources and clean them up", +] +testpaths = ["tests"] + +[tool.ruff] +line-length = 100 +target-version = "py311" + +[tool.ruff.lint] +select = ["E", "F", "I", "B", "UP"] diff --git a/src/pve_vm_setup/__init__.py b/src/pve_vm_setup/__init__.py new file mode 100644 index 0000000..d4de65e --- /dev/null +++ b/src/pve_vm_setup/__init__.py @@ -0,0 +1,3 @@ +"""Proxmox VM setup TUI.""" + +__all__ = [] diff --git a/src/pve_vm_setup/__main__.py b/src/pve_vm_setup/__main__.py new file mode 100644 index 0000000..bfdcd0c --- /dev/null +++ b/src/pve_vm_setup/__main__.py @@ -0,0 +1,4 @@ +from .cli import main + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/src/pve_vm_setup/__pycache__/__init__.cpython-313.pyc b/src/pve_vm_setup/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000..2c42cf8 Binary files /dev/null and b/src/pve_vm_setup/__pycache__/__init__.cpython-313.pyc differ diff --git a/src/pve_vm_setup/__pycache__/__main__.cpython-313.pyc b/src/pve_vm_setup/__pycache__/__main__.cpython-313.pyc new file mode 100644 index 0000000..1abeffb Binary files /dev/null and b/src/pve_vm_setup/__pycache__/__main__.cpython-313.pyc differ diff --git a/src/pve_vm_setup/__pycache__/app.cpython-313.pyc b/src/pve_vm_setup/__pycache__/app.cpython-313.pyc new file mode 100644 index 0000000..5997183 Binary files /dev/null and b/src/pve_vm_setup/__pycache__/app.cpython-313.pyc differ diff --git a/src/pve_vm_setup/__pycache__/cli.cpython-313.pyc b/src/pve_vm_setup/__pycache__/cli.cpython-313.pyc new file mode 100644 index 0000000..213c6b8 Binary files /dev/null and b/src/pve_vm_setup/__pycache__/cli.cpython-313.pyc differ diff --git a/src/pve_vm_setup/__pycache__/doctor.cpython-313.pyc b/src/pve_vm_setup/__pycache__/doctor.cpython-313.pyc new file mode 100644 index 0000000..33a6363 Binary files /dev/null and b/src/pve_vm_setup/__pycache__/doctor.cpython-313.pyc differ diff --git a/src/pve_vm_setup/__pycache__/domain.cpython-313.pyc b/src/pve_vm_setup/__pycache__/domain.cpython-313.pyc new file mode 100644 index 0000000..dc4f1a3 Binary files /dev/null and b/src/pve_vm_setup/__pycache__/domain.cpython-313.pyc differ diff --git a/src/pve_vm_setup/__pycache__/errors.cpython-313.pyc b/src/pve_vm_setup/__pycache__/errors.cpython-313.pyc new file mode 100644 index 0000000..c1d0738 Binary files /dev/null and b/src/pve_vm_setup/__pycache__/errors.cpython-313.pyc differ diff --git a/src/pve_vm_setup/__pycache__/settings.cpython-313.pyc b/src/pve_vm_setup/__pycache__/settings.cpython-313.pyc new file mode 100644 index 0000000..eac1a8c Binary files /dev/null and b/src/pve_vm_setup/__pycache__/settings.cpython-313.pyc differ diff --git a/src/pve_vm_setup/__pycache__/terminal_compat.cpython-313.pyc b/src/pve_vm_setup/__pycache__/terminal_compat.cpython-313.pyc new file mode 100644 index 0000000..c0fd42c Binary files /dev/null and b/src/pve_vm_setup/__pycache__/terminal_compat.cpython-313.pyc differ diff --git a/src/pve_vm_setup/app.py b/src/pve_vm_setup/app.py new file mode 100644 index 0000000..1e22cb9 --- /dev/null +++ b/src/pve_vm_setup/app.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from textual.app import App, ComposeResult +from textual.containers import Container +from textual.widgets import Footer, Header + +from .models.workflow import WorkflowState +from .screens.login import LoginView +from .screens.wizard import WizardView +from .services.base import ProxmoxService +from .services.factory import ProxmoxServiceFactory +from .settings import AppSettings +from .terminal_compat import build_driver_class + + +class PveVmSetupApp(App[None]): + TITLE = "Proxmox VM Setup" + SUB_TITLE = "Live-access foundation" + + def __init__( + self, + settings: AppSettings, + *, + service: ProxmoxService | None = None, + ) -> None: + super().__init__(driver_class=build_driver_class()) + self.settings = settings + self.workflow = WorkflowState() + self.service = service or ProxmoxServiceFactory.create(settings) + + def compose(self) -> ComposeResult: + yield Header() + with Container(id="app-body"): + yield LoginView(self.settings, self.workflow, self.service) + yield Footer() + + def on_unmount(self) -> None: + close = getattr(self.service, "close", None) + if callable(close): + close() + + async def on_login_view_authenticated(self, _: LoginView.Authenticated) -> None: + self.query_one(LoginView).remove() + wizard = WizardView(self.settings, self.workflow, self.service) + await self.query_one("#app-body", Container).mount(wizard) + wizard.activate() diff --git a/src/pve_vm_setup/cli.py b/src/pve_vm_setup/cli.py new file mode 100644 index 0000000..2a25304 --- /dev/null +++ b/src/pve_vm_setup/cli.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +import argparse +import sys + +from .app import PveVmSetupApp +from .doctor import run_live_doctor +from .settings import AppSettings +from .terminal_compat import apply_runtime_compatibility + + +def build_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser(description="Proxmox VM setup TUI") + parser.add_argument( + "--doctor-live", + action="store_true", + help="Run live Proxmox connectivity and authentication diagnostics.", + ) + return parser + + +def main(argv: list[str] | None = None) -> int: + parser = build_parser() + args = parser.parse_args(argv) + apply_runtime_compatibility() + settings = AppSettings.from_env() + + if args.doctor_live: + return run_live_doctor(settings, stream=sys.stdout) + + app = PveVmSetupApp(settings) + app.run(mouse=False) + return 0 diff --git a/src/pve_vm_setup/doctor.py b/src/pve_vm_setup/doctor.py new file mode 100644 index 0000000..6584fba --- /dev/null +++ b/src/pve_vm_setup/doctor.py @@ -0,0 +1,80 @@ +from __future__ import annotations + +from typing import TextIO + +from .errors import ProxmoxError, SettingsError +from .services.factory import ProxmoxServiceFactory +from .settings import AppSettings + + +def run_live_doctor( + settings: AppSettings, + *, + stream: TextIO, + service_factory: type[ProxmoxServiceFactory] = ProxmoxServiceFactory, +) -> int: + try: + settings.validate_live_requirements() + settings.safety_policy.validate() + except SettingsError as exc: + stream.write(f"FAIL configuration: {exc}\n") + return 1 + + stream.write("Target\n") + stream.write(f" host: {settings.sanitized_host}\n") + stream.write(f" api_base: {settings.proxmox_api_base}\n") + stream.write(f" realm: {settings.proxmox_realm}\n") + stream.write(f" verify_tls: {settings.proxmox_verify_tls}\n") + stream.write(f" prevent_create: {settings.safety_policy.prevent_create}\n") + stream.write(f" enable_test_mode: {settings.safety_policy.enable_test_mode}\n") + + service = service_factory.create(settings) + + try: + stream.write("1. Checking HTTPS reachability...\n") + transport_status = service.check_connectivity() + stream.write(f" OK {transport_status}\n") + + stream.write("2. Checking API base path...\n") + release = service.check_api_base() + stream.write(f" OK release={release}\n") + + stream.write("3. Loading realms...\n") + realms = service.load_realms() + stream.write(f" OK realms={','.join(realm.name for realm in realms)}\n") + + stream.write("4. Attempting login...\n") + session = service.login( + settings.proxmox_user or "", + settings.proxmox_password or "", + settings.proxmox_realm or "", + ) + stream.write(f" OK authenticated_as={session.username}\n") + + if settings.safety_policy.enable_test_mode: + stream.write("5. Validating test mode create scope...\n") + nodes = {node.name for node in service.load_nodes()} + if settings.safety_policy.test_node not in nodes: + raise SettingsError( + f"Configured test node {settings.safety_policy.test_node!r} was not found." + ) + stream.write(f" OK node={settings.safety_policy.test_node}\n") + if settings.safety_policy.test_pool: + pools = {pool.poolid for pool in service.load_pools()} + if settings.safety_policy.test_pool not in pools: + raise SettingsError( + f"Configured test pool {settings.safety_policy.test_pool!r} was not found." + ) + stream.write(f" OK pool={settings.safety_policy.test_pool}\n") + stream.write(f" tag={settings.safety_policy.test_tag}\n") + stream.write(f" name_prefix={settings.safety_policy.test_vm_name_prefix}\n") + except (ProxmoxError, SettingsError, ValueError) as exc: + stream.write(f"FAIL {exc}\n") + return 1 + finally: + close = getattr(service, "close", None) + if callable(close): + close() + + stream.write("Doctor finished successfully.\n") + return 0 diff --git a/src/pve_vm_setup/domain.py b/src/pve_vm_setup/domain.py new file mode 100644 index 0000000..507f4a7 --- /dev/null +++ b/src/pve_vm_setup/domain.py @@ -0,0 +1,320 @@ +from __future__ import annotations + +import re +from dataclasses import replace + +from .errors import SettingsError +from .models.workflow import DiskConfig, ReferenceData, VmConfig +from .settings import AppSettings + +_NIXOS_ISO_PATTERN = re.compile( + r"nixos-minimal-(?P\d{2})[.-](?P\d{2})\.[A-Za-z0-9]+-[A-Za-z0-9_]+-linux\.iso$" +) + + +def select_latest_nixos_iso(isos: list[str]) -> str | None: + candidates: list[tuple[int, int, str]] = [] + for iso in isos: + match = _NIXOS_ISO_PATTERN.search(iso) + if match: + candidates.append((int(match.group("year")), int(match.group("month")), iso)) + if not candidates: + return None + return max(candidates)[2] + + +def build_startup_value(order: str, up: str, down: str) -> str: + parts: list[str] = [] + if order.strip(): + parts.append(f"order={order.strip()}") + if up.strip(): + parts.append(f"up={up.strip()}") + if down.strip(): + parts.append(f"down={down.strip()}") + return ",".join(parts) + + +def effective_vm_config(config: VmConfig, settings: AppSettings) -> VmConfig: + result = replace(config) + result.general = replace(config.general) + result.general.name = settings.safety_policy.effective_vm_name(config.general.name.strip()) + + if not settings.safety_policy.enable_test_mode: + result.general.tags = [tag for tag in config.general.tags if tag] + return result + + tags = [tag for tag in config.general.tags if tag] + if settings.safety_policy.test_tag not in tags: + tags.append(settings.safety_policy.test_tag) + result.general.tags = sorted(dict.fromkeys(tags)) + return result + + +def validate_step( + step: str, + config: VmConfig, + settings: AppSettings, + references: ReferenceData, +) -> list[str]: + errors: list[str] = [] + + if step == "general": + if not config.general.node: + errors.append("Node is required.") + if config.general.vmid < 100: + errors.append("VM ID must be at least 100.") + if not config.general.name.strip(): + errors.append("Name is required.") + for label, value in [ + ("Startup order", config.general.startup_order), + ("Startup delay", config.general.startup_delay), + ("Shutdown timeout", config.general.shutdown_timeout), + ]: + if value.strip() and not value.strip().isdigit(): + errors.append(f"{label} must be an integer.") + if settings.safety_policy.enable_test_mode and settings.safety_policy.test_node: + if config.general.node != settings.safety_policy.test_node: + errors.append( + f"Live create mode is restricted to node {settings.safety_policy.test_node}." + ) + if settings.safety_policy.enable_test_mode and settings.safety_policy.test_pool: + if config.general.pool != settings.safety_policy.test_pool: + errors.append( + f"Live create mode is restricted to pool {settings.safety_policy.test_pool}." + ) + + if step == "os": + if config.os.media_choice == "iso": + if not config.os.storage: + errors.append("ISO storage is required.") + if not config.os.iso: + errors.append("ISO selection is required.") + if config.os.media_choice == "physical" and not config.os.physical_drive_path.strip(): + errors.append("Physical disc drive path is required.") + + if step == "system": + if config.system.add_efi_disk and not config.system.efi_storage: + errors.append("EFI storage is required when EFI disk is enabled.") + + if step == "disks": + slots: set[str] = set() + for disk in config.disks: + if disk.slot_name in slots: + errors.append(f"Duplicate disk slot {disk.slot_name}.") + slots.add(disk.slot_name) + if disk.size_gib <= 0: + errors.append(f"Disk {disk.slot_name} size must be greater than zero.") + if not disk.storage: + errors.append(f"Disk {disk.slot_name} storage is required.") + + if step == "cpu": + if config.cpu.cores <= 0: + errors.append("CPU cores must be greater than zero.") + if config.cpu.sockets <= 0: + errors.append("CPU sockets must be greater than zero.") + + if step == "memory": + if config.memory.memory_mib <= 0: + errors.append("Memory must be greater than zero.") + if config.memory.ballooning: + if config.memory.min_memory_mib <= 0: + errors.append("Min memory must be greater than zero when ballooning is enabled.") + if config.memory.min_memory_mib > config.memory.memory_mib: + errors.append("Min memory cannot exceed memory size.") + + if step == "network" and not config.network.no_network_device: + if not config.network.bridge: + errors.append("Bridge is required unless networking is disabled.") + for label, value in [ + ("VLAN tag", config.network.vlan_tag), + ("MTU", config.network.mtu), + ("Multiqueue", config.network.multiqueue), + ]: + if value.strip() and not value.strip().isdigit(): + errors.append(f"{label} must be an integer.") + + if step == "confirm" and not settings.safety_policy.allow_create: + errors.append("Set PROXMOX_PREVENT_CREATE=false to enable VM creation.") + + return errors + + +def validate_all_steps( + config: VmConfig, settings: AppSettings, references: ReferenceData +) -> list[str]: + all_errors: list[str] = [] + for step in ["general", "os", "system", "disks", "cpu", "memory", "network", "confirm"]: + all_errors.extend(validate_step(step, config, settings, references)) + return all_errors + + +def _bool_int(value: bool) -> int: + return 1 if value else 0 + + +def build_disk_value(disk: DiskConfig) -> str: + options = [ + f"{disk.storage}:{disk.size_gib}", + f"format={disk.format}", + f"cache={disk.cache}", + f"discard={'on' if disk.discard else 'ignore'}", + f"iothread={_bool_int(disk.io_thread)}", + f"ssd={_bool_int(disk.ssd_emulation)}", + f"backup={_bool_int(disk.backup)}", + f"replicate={_bool_int(not disk.skip_replication)}", + f"aio={disk.async_io}", + ] + return ",".join(options) + + +def build_network_value(config: VmConfig) -> str | None: + if config.network.no_network_device: + return None + + parts: list[str] = [] + if config.network.mac_address.strip(): + parts.append(f"{config.network.model}={config.network.mac_address.strip()}") + else: + parts.append(f"model={config.network.model}") + parts.append(f"bridge={config.network.bridge}") + parts.append(f"firewall={_bool_int(config.network.firewall)}") + parts.append(f"link_down={_bool_int(config.network.disconnected)}") + if config.network.vlan_tag.strip(): + parts.append(f"tag={int(config.network.vlan_tag)}") + if config.network.mtu.strip(): + parts.append(f"mtu={int(config.network.mtu)}") + if config.network.rate_limit.strip(): + parts.append(f"rate={config.network.rate_limit.strip()}") + if config.network.multiqueue.strip(): + parts.append(f"queues={int(config.network.multiqueue)}") + return ",".join(parts) + + +def build_media_value(config: VmConfig) -> str | None: + if config.os.media_choice == "none": + return None + if config.os.media_choice == "iso" and config.os.iso: + return f"{config.os.iso},media=cdrom" + if config.os.media_choice == "physical": + return f"{config.os.physical_drive_path.strip()},media=cdrom" + return None + + +def build_create_payload(config: VmConfig, settings: AppSettings) -> dict[str, str | int]: + if not settings.safety_policy.allow_create: + raise SettingsError("PROXMOX_PREVENT_CREATE=false is required before creating VMs.") + + effective = effective_vm_config(config, settings) + payload: dict[str, str | int] = { + "vmid": effective.general.vmid, + "name": effective.general.name, + "ostype": effective.os.guest_version, + "bios": effective.system.bios, + "machine": effective.system.machine, + "scsihw": effective.system.scsi_controller, + "agent": _bool_int(effective.system.qemu_agent), + "cores": effective.cpu.cores, + "sockets": effective.cpu.sockets, + "cpu": effective.cpu.cpu_type, + "memory": effective.memory.memory_mib, + "balloon": effective.memory.min_memory_mib if effective.memory.ballooning else 0, + "allow-ksm": _bool_int(effective.memory.allow_ksm), + "onboot": _bool_int(effective.general.onboot), + "tags": ";".join(effective.general.tags), + } + if effective.general.pool: + payload["pool"] = effective.general.pool + startup = build_startup_value( + effective.general.startup_order, + effective.general.startup_delay, + effective.general.shutdown_timeout, + ) + if startup: + payload["startup"] = startup + if effective.system.graphic_card != "default": + payload["vga"] = effective.system.graphic_card + if effective.system.add_efi_disk: + payload["efidisk0"] = ( + f"{effective.system.efi_storage}:1,efitype=4m," + f"pre-enrolled-keys={_bool_int(effective.system.pre_enrolled_keys)}" + ) + if effective.system.tpm_enabled: + payload["tpmstate0"] = f"{effective.system.efi_storage}:4,version=v2.0" + media = build_media_value(effective) + if media: + payload["ide2"] = media + network = build_network_value(effective) + if network: + payload["net0"] = network + for disk in effective.disks: + payload[disk.slot_name] = build_disk_value(disk) + return payload + + +def build_confirmation_text(config: VmConfig, settings: AppSettings) -> str: + effective = ( + effective_vm_config(config, settings) if settings.safety_policy.allow_create else config + ) + startup = build_startup_value( + effective.general.startup_order, + effective.general.startup_delay, + effective.general.shutdown_timeout, + ) + system_line = ( + f"System: machine={effective.system.machine}, " + f"bios={effective.system.bios}, scsi={effective.system.scsi_controller}" + ) + efi_line = ( + "EFI disk: " + f"{'enabled' if effective.system.add_efi_disk else 'disabled'} " + f"({effective.system.efi_storage or '-'})" + ) + cpu_line = ( + f"CPU: {effective.cpu.sockets} socket(s), " + f"{effective.cpu.cores} core(s), type={effective.cpu.cpu_type}" + ) + memory_line = ( + f"Memory: {effective.memory.memory_mib}MiB / balloon minimum " + f"{effective.memory.min_memory_mib if effective.memory.ballooning else 0}MiB" + ) + lines = [ + f"Node: {effective.general.node}", + f"VM ID: {effective.general.vmid}", + f"Name: {effective.general.name}", + f"Pool: {effective.general.pool or '-'}", + f"Tags: {', '.join(effective.general.tags) or '-'}", + f"HA: {'enabled' if effective.general.ha_enabled else 'disabled'}", + f"On boot: {'enabled' if effective.general.onboot else 'disabled'}", + f"Startup: {startup or '-'}", + "", + f"Media: {effective.os.media_choice}", + f"ISO storage: {effective.os.storage or '-'}", + f"ISO: {effective.os.iso or '-'}", + f"Guest: {effective.os.guest_type} / {effective.os.guest_version}", + "", + system_line, + efi_line, + f"TPM: {'enabled' if effective.system.tpm_enabled else 'disabled'}", + f"Qemu agent: {'enabled' if effective.system.qemu_agent else 'disabled'}", + "", + "Disks:", + ] + for disk in effective.disks: + lines.append( + f" - {disk.slot_name}: {disk.storage} {disk.size_gib}GiB " + f"cache={disk.cache} discard={'on' if disk.discard else 'ignore'}" + ) + lines.extend( + [ + "", + cpu_line, + memory_line, + "", + ( + "Network: disabled" + if effective.network.no_network_device + else f"Network: {effective.network.model} on {effective.network.bridge}" + ), + ] + ) + return "\n".join(lines) diff --git a/src/pve_vm_setup/errors.py b/src/pve_vm_setup/errors.py new file mode 100644 index 0000000..023a305 --- /dev/null +++ b/src/pve_vm_setup/errors.py @@ -0,0 +1,48 @@ +class AppError(Exception): + """Base application error.""" + + +class SettingsError(AppError): + """Configuration is missing or invalid.""" + + +class ProxmoxError(AppError): + """Base error raised while talking to Proxmox.""" + + +class ProxmoxTransportError(ProxmoxError): + """Transport-level failure while talking to Proxmox.""" + + +class ProxmoxConnectError(ProxmoxTransportError): + """DNS or TCP connection failure.""" + + +class ProxmoxTlsError(ProxmoxTransportError): + """TLS handshake or certificate verification failure.""" + + +class ProxmoxAuthError(ProxmoxError): + """Authentication failure.""" + + +class ProxmoxApiError(ProxmoxError): + """Unexpected HTTP response from the API.""" + + def __init__(self, message: str, status_code: int | None = None) -> None: + super().__init__(message) + self.status_code = status_code + + +class ProxmoxUnexpectedResponseError(ProxmoxError): + """The API returned an unexpected payload shape.""" + + +class ProxmoxPostCreateError(ProxmoxError): + """A follow-up step failed after the VM already existed.""" + + def __init__(self, node: str, vmid: int, step: str, message: str) -> None: + super().__init__(message) + self.node = node + self.vmid = vmid + self.step = step diff --git a/src/pve_vm_setup/models/__init__.py b/src/pve_vm_setup/models/__init__.py new file mode 100644 index 0000000..5c7ad5c --- /dev/null +++ b/src/pve_vm_setup/models/__init__.py @@ -0,0 +1 @@ +"""Application models.""" diff --git a/src/pve_vm_setup/models/__pycache__/__init__.cpython-313.pyc b/src/pve_vm_setup/models/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000..a1359b6 Binary files /dev/null and b/src/pve_vm_setup/models/__pycache__/__init__.cpython-313.pyc differ diff --git a/src/pve_vm_setup/models/__pycache__/workflow.cpython-313.pyc b/src/pve_vm_setup/models/__pycache__/workflow.cpython-313.pyc new file mode 100644 index 0000000..78df459 Binary files /dev/null and b/src/pve_vm_setup/models/__pycache__/workflow.cpython-313.pyc differ diff --git a/src/pve_vm_setup/models/workflow.py b/src/pve_vm_setup/models/workflow.py new file mode 100644 index 0000000..afee98c --- /dev/null +++ b/src/pve_vm_setup/models/workflow.py @@ -0,0 +1,157 @@ +from __future__ import annotations + +from dataclasses import dataclass, field + +WIZARD_STEPS = [ + "general", + "os", + "system", + "disks", + "cpu", + "memory", + "network", + "confirm", +] + + +@dataclass +class AuthenticationState: + username: str | None = None + realm: str | None = None + authenticated: bool = False + + +@dataclass +class GeneralConfig: + node: str = "" + vmid: int = 101 + name: str = "" + pool: str = "" + tags: list[str] = field(default_factory=list) + ha_enabled: bool = True + onboot: bool = False + startup_order: str = "" + startup_delay: str = "" + shutdown_timeout: str = "" + + +@dataclass +class OsConfig: + media_choice: str = "iso" + storage: str = "" + iso: str = "" + physical_drive_path: str = "/dev/sr0" + guest_type: str = "linux" + guest_version: str = "l26" + + +@dataclass +class SystemConfig: + graphic_card: str = "default" + machine: str = "q35" + bios: str = "ovmf" + add_efi_disk: bool = True + efi_storage: str = "ceph-pool" + pre_enrolled_keys: bool = False + scsi_controller: str = "virtio-scsi-single" + qemu_agent: bool = True + tpm_enabled: bool = False + + +@dataclass +class DiskConfig: + bus: str = "scsi" + device: int = 0 + storage: str = "ceph-pool" + size_gib: int = 32 + format: str = "raw" + cache: str = "none" + discard: bool = False + io_thread: bool = True + ssd_emulation: bool = True + backup: bool = True + skip_replication: bool = False + async_io: str = "io_uring" + + @property + def slot_name(self) -> str: + return f"{self.bus}{self.device}" + + +@dataclass +class CpuConfig: + cores: int = 2 + sockets: int = 1 + cpu_type: str = "host" + + +@dataclass +class MemoryConfig: + memory_mib: int = 2048 + min_memory_mib: int = 2048 + ballooning: bool = True + allow_ksm: bool = True + + +@dataclass +class NetworkConfig: + no_network_device: bool = False + bridge: str = "vmbr9" + vlan_tag: str = "" + model: str = "virtio" + mac_address: str = "" + firewall: bool = True + disconnected: bool = False + mtu: str = "" + rate_limit: str = "" + multiqueue: str = "" + + +@dataclass +class VmConfig: + general: GeneralConfig = field(default_factory=GeneralConfig) + os: OsConfig = field(default_factory=OsConfig) + system: SystemConfig = field(default_factory=SystemConfig) + disks: list[DiskConfig] = field(default_factory=lambda: [DiskConfig()]) + cpu: CpuConfig = field(default_factory=CpuConfig) + memory: MemoryConfig = field(default_factory=MemoryConfig) + network: NetworkConfig = field(default_factory=NetworkConfig) + + +@dataclass +class ReferenceData: + nodes: list[str] = field(default_factory=list) + pools: list[str] = field(default_factory=list) + existing_tags: list[str] = field(default_factory=list) + bridges: list[str] = field(default_factory=list) + iso_storages: list[str] = field(default_factory=list) + disk_storages: list[str] = field(default_factory=list) + all_storages: list[str] = field(default_factory=list) + isos: list[str] = field(default_factory=list) + + +@dataclass +class SubmissionState: + phase: str = "idle" + message: str = "" + node: str | None = None + vmid: int | None = None + partial_success: bool = False + + +@dataclass +class WorkflowState: + current_step_index: int = 0 + available_realms: list[str] = field(default_factory=list) + authentication: AuthenticationState = field(default_factory=AuthenticationState) + config: VmConfig = field(default_factory=VmConfig) + reference_data: ReferenceData = field(default_factory=ReferenceData) + submission: SubmissionState = field(default_factory=SubmissionState) + + @property + def current_step(self) -> str: + return WIZARD_STEPS[self.current_step_index] + + @property + def step_title(self) -> str: + return self.current_step.replace("_", " ").title() diff --git a/src/pve_vm_setup/screens/__init__.py b/src/pve_vm_setup/screens/__init__.py new file mode 100644 index 0000000..2083d4f --- /dev/null +++ b/src/pve_vm_setup/screens/__init__.py @@ -0,0 +1 @@ +"""Textual screens.""" diff --git a/src/pve_vm_setup/screens/__pycache__/__init__.cpython-313.pyc b/src/pve_vm_setup/screens/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000..3db3924 Binary files /dev/null and b/src/pve_vm_setup/screens/__pycache__/__init__.cpython-313.pyc differ diff --git a/src/pve_vm_setup/screens/__pycache__/login.cpython-313.pyc b/src/pve_vm_setup/screens/__pycache__/login.cpython-313.pyc new file mode 100644 index 0000000..8036d3f Binary files /dev/null and b/src/pve_vm_setup/screens/__pycache__/login.cpython-313.pyc differ diff --git a/src/pve_vm_setup/screens/__pycache__/wizard.cpython-313.pyc b/src/pve_vm_setup/screens/__pycache__/wizard.cpython-313.pyc new file mode 100644 index 0000000..f22034c Binary files /dev/null and b/src/pve_vm_setup/screens/__pycache__/wizard.cpython-313.pyc differ diff --git a/src/pve_vm_setup/screens/login.py b/src/pve_vm_setup/screens/login.py new file mode 100644 index 0000000..65bb9ea --- /dev/null +++ b/src/pve_vm_setup/screens/login.py @@ -0,0 +1,159 @@ +from __future__ import annotations + +from textual import on +from textual.containers import Vertical +from textual.message import Message +from textual.widgets import Button, Input, Select, Static + +from ..errors import ProxmoxError +from ..models.workflow import WorkflowState +from ..services.base import ProxmoxService, Realm +from ..settings import AppSettings + + +class LoginView(Vertical): + class Authenticated(Message): + def __init__(self, username: str, realm: str) -> None: + self.username = username + self.realm = realm + super().__init__() + + DEFAULT_CSS = """ + LoginView { + width: 1fr; + height: 1fr; + padding: 1 2; + align-horizontal: center; + } + + #login-card { + width: 80; + max-width: 100%; + border: round $accent; + padding: 1 2; + } + + #title { + text-style: bold; + margin-bottom: 1; + } + + Input, Select, Button { + margin-top: 1; + } + + #status { + margin-top: 1; + color: $text-muted; + } + """ + + def __init__( + self, + settings: AppSettings, + workflow: WorkflowState, + service: ProxmoxService, + ) -> None: + super().__init__() + self._settings = settings + self._workflow = workflow + self._service = service + + def compose(self): + with Vertical(id="login-card"): + yield Static("Proxmox Login", id="title") + yield Static( + f"Mode: {self._service.mode} on {self._settings.sanitized_host}", + id="mode", + ) + yield Input( + value=self._settings.proxmox_user or "", + placeholder="Username", + id="username", + ) + yield Input( + value=self._settings.proxmox_password or "", + password=True, + placeholder="Password", + id="password", + ) + yield Select[str](options=[], prompt="Realm", id="realm") + yield Button("Connect", id="connect", variant="primary") + yield Static("Loading realms...", id="status") + + def on_mount(self) -> None: + username_input = self.query_one("#username", Input) + self.call_after_refresh(self.app.set_focus, username_input) + self.run_worker(self._load_realms, thread=True, exclusive=True) + + def _load_realms(self) -> None: + try: + realms = self._service.load_realms() + except Exception as exc: + self.app.call_from_thread(self._show_status, f"Failed to load realms: {exc}") + return + self.app.call_from_thread(self._set_realms, realms) + + def _set_realms(self, realms: list[Realm]) -> None: + self._workflow.available_realms = [realm.name for realm in realms] + options = [(realm.title, realm.name) for realm in realms] + select = self.query_one("#realm", Select) + select.set_options(options) + + preferred_realm = self._settings.proxmox_realm + if preferred_realm and preferred_realm in self._workflow.available_realms: + select.value = preferred_realm + elif realms: + default_realm = next((realm.name for realm in realms if realm.default), realms[0].name) + select.value = default_realm + + self._show_status(f"Loaded {len(realms)} realm(s).") + + def _show_status(self, message: str) -> None: + self.query_one("#status", Static).update(message) + + @on(Button.Pressed, "#connect") + def on_connect_pressed(self) -> None: + self._submit() + + @on(Input.Submitted, "#username") + @on(Input.Submitted, "#password") + def on_input_submitted(self) -> None: + self._submit() + + @on(Select.Changed, "#realm") + def on_realm_changed(self) -> None: + # Keep the form keyboard friendly once realms have loaded. + if self.app.focused is None: + username_input = self.query_one("#username", Input) + self.call_after_refresh(self.app.set_focus, username_input) + + def _submit(self) -> None: + username = self.query_one("#username", Input).value.strip() + password = self.query_one("#password", Input).value + realm = self.query_one("#realm", Select).value + if not username or not password or not isinstance(realm, str): + self._show_status("Username, password, and realm are required.") + return + + self._show_status("Authenticating...") + self.run_worker( + lambda: self._authenticate(username=username, password=password, realm=realm), + thread=True, + exclusive=True, + ) + + def _authenticate(self, *, username: str, password: str, realm: str) -> None: + try: + session = self._service.login(username, password, realm) + except (ProxmoxError, ValueError) as exc: + self.app.call_from_thread(self._show_status, f"Authentication failed: {exc}") + return + self.app.call_from_thread(self._mark_authenticated, session.username, realm) + + def _mark_authenticated(self, username: str, realm: str) -> None: + self._workflow.authentication.username = username + self._workflow.authentication.realm = realm + self._workflow.authentication.authenticated = True + self._show_status(f"Authenticated as {username}.") + self.post_message(self.Authenticated(username, realm)) diff --git a/src/pve_vm_setup/screens/wizard.py b/src/pve_vm_setup/screens/wizard.py new file mode 100644 index 0000000..356e8bb --- /dev/null +++ b/src/pve_vm_setup/screens/wizard.py @@ -0,0 +1,1183 @@ +from __future__ import annotations + +from concurrent.futures import ThreadPoolExecutor + +from textual import on +from textual.app import ComposeResult +from textual.containers import Horizontal, HorizontalGroup, ScrollableContainer, Vertical +from textual.message import Message +from textual.screen import ModalScreen +from textual.widget import Widget +from textual.widgets import Button, Checkbox, Input, Select, Static + +from ..domain import ( + build_confirmation_text, + select_latest_nixos_iso, + validate_all_steps, + validate_step, +) +from ..errors import ProxmoxError, ProxmoxPostCreateError +from ..models.workflow import WIZARD_STEPS, DiskConfig, ReferenceData, WorkflowState +from ..services.base import Bridge, ProxmoxService, Storage +from ..settings import AppSettings + +GUEST_VERSIONS: dict[str, list[tuple[str, str]]] = { + "linux": [("6.x - 2.6 Kernel", "l26"), ("2.4 Kernel", "l24"), ("Other Linux", "other")], + "windows": [ + ("Windows 11/2022+", "win11"), + ("Windows 10/2019", "win10"), + ("Windows 8/2012", "win8"), + ], + "solaris": [("Solaris", "solaris")], + "other": [("Other", "other")], +} +NO_DISK_SELECTED = "__no_disk__" + + +class AutoStartConfirmModal(ModalScreen[bool | None]): + DEFAULT_CSS = """ + AutoStartConfirmModal { + align: center middle; + } + + #auto-start-dialog { + width: 50; + height: auto; + border: round $accent; + background: $surface; + padding: 1 2; + } + + #auto-start-title { + text-style: bold; + } + + #auto-start-actions { + margin-top: 1; + height: auto; + } + + #auto-start-actions Button { + min-width: 8; + margin-right: 1; + } + """ + + BINDINGS = [("escape", "cancel", "Cancel")] + + def compose(self) -> ComposeResult: + with Vertical(id="auto-start-dialog"): + yield Static("Start VM Automatically?", id="auto-start-title") + yield Static("Should the VM be started automatically after creation?") + with HorizontalGroup(id="auto-start-actions"): + yield Button("Yes", id="auto-start-yes", variant="success") + yield Button("No", id="auto-start-no") + + def on_mount(self) -> None: + self.set_focus(self.query_one("#auto-start-yes", Button)) + + def action_cancel(self) -> None: + self.dismiss(None) + + @on(Button.Pressed, "#auto-start-yes") + def on_yes_pressed(self) -> None: + self.dismiss(True) + + @on(Button.Pressed, "#auto-start-no") + def on_no_pressed(self) -> None: + self.dismiss(False) + + +class WizardView(Vertical): + can_focus = False + + class SubmitFinished(Message): + def __init__(self, success: bool, message: str) -> None: + self.success = success + self.message = message + super().__init__() + + DEFAULT_CSS = """ + WizardView { + display: none; + height: 1fr; + layout: vertical; + padding: 1 2; + } + + .section { + border: round $accent; + height: 1fr; + layout: vertical; + padding: 1 2; + margin-top: 1; + } + + .field { + margin-top: 1; + } + + WizardView Input { + height: 1; + min-height: 1; + border: none; + padding: 0 1; + } + + WizardView Input:focus { + height: 1; + min-height: 1; + border: none; + background-tint: $foreground 5%; + } + + WizardView Select { + height: 1; + min-height: 1; + } + + WizardView Select > SelectCurrent { + height: 1; + min-height: 1; + border: none; + padding: 0 1; + } + + WizardView Select:focus > SelectCurrent { + border: none; + background-tint: $foreground 5%; + } + + WizardView Button { + height: 1; + min-height: 1; + width: auto; + min-width: 10; + padding: 0 1; + border: none; + color: $foreground; + } + + WizardView Button:focus, + WizardView Button:hover, + WizardView Button.-active { + height: 1; + min-height: 1; + border: none; + background-tint: $foreground 8%; + } + + WizardView Button.-primary, + WizardView Button.-primary:focus, + WizardView Button.-primary:hover, + WizardView Button.-primary.-active, + WizardView Button.-success, + WizardView Button.-success:focus, + WizardView Button.-success:hover, + WizardView Button.-success.-active { + color: $button-color-foreground; + border: none; + } + + WizardView Checkbox { + height: 1; + min-height: 1; + border: none; + padding: 0 1; + } + + WizardView Checkbox:focus { + height: 1; + min-height: 1; + border: none; + background-tint: $foreground 5%; + } + + #general-tag-add, + #general-tag-use, + #general-tag-remove { + width: auto; + margin-top: 1; + } + + #disks-add, + #disks-remove { + width: auto; + margin-right: 1; + } + + #disks-select { + width: 1fr; + } + + #wizard-actions { + margin-top: 1; + height: auto; + } + + #wizard-errors { + color: $error; + margin-top: 1; + } + + #wizard-status { + color: $text-muted; + margin-top: 1; + } + + #wizard-title { + text-style: bold; + } + """ + + def __init__( + self, settings: AppSettings, workflow: WorkflowState, service: ProxmoxService + ) -> None: + super().__init__() + self._settings = settings + self._workflow = workflow + self._service = service + self._selected_disk_index = 0 + self._node_load_in_flight = False + self._suppress_node_change = False + self._suppress_storage_change = False + self._suppress_disk_selection_change = False + self._initializing_reference_data = False + self._loaded_node_reference: str | None = None + self._loaded_iso_source: tuple[str, str] | None = None + + def _section(self, section_id: str, title: str) -> ScrollableContainer: + section = ScrollableContainer(id=section_id, classes="section") + section.border_title = f" {title} " + return section + + def compose(self): + yield Static("Wizard", id="wizard-title") + yield Static("", id="wizard-step") + yield Static("", id="wizard-errors") + yield Static("", id="wizard-status") + + with self._section("general-section", "General"): + yield Select[str]( + [], prompt="Node", id="general-node", allow_blank=True, classes="field" + ) + yield Input(id="general-vmid", placeholder="VM ID", classes="field") + yield Input(id="general-name", placeholder="VM Name", classes="field") + yield Select[str]( + [], + prompt="Ressource Pool", + id="general-pool", + allow_blank=True, + classes="field", + ) + yield Input(id="general-tag-input", placeholder="Add tag", classes="field") + yield Button("Add Tag", id="general-tag-add") + yield Select[str]( + [], + prompt="Existing tags", + id="general-tag-existing", + allow_blank=True, + classes="field", + ) + yield Button("Use Existing", id="general-tag-use") + yield Select[str]( + [], + prompt="Current tags", + id="general-tag-current", + allow_blank=True, + classes="field", + ) + yield Button("Remove Tag", id="general-tag-remove") + yield Checkbox("High availability", value=True, id="general-ha", classes="field") + yield Checkbox("Start at boot", value=False, id="general-onboot", classes="field") + yield Input(id="general-startup-order", placeholder="Startup order", classes="field") + yield Input( + id="general-startup-delay", placeholder="Startup delay (seconds)", classes="field" + ) + yield Input( + id="general-shutdown-timeout", + placeholder="Shutdown timeout (seconds)", + classes="field", + ) + + with self._section("os-section", "Operating System"): + yield Select[str]( + [("ISO", "iso"), ("Physical disc drive", "physical"), ("No media", "none")], + value="iso", + id="os-media-choice", + allow_blank=False, + classes="field", + ) + yield Select[str]( + [], prompt="ISO storage", id="os-storage", allow_blank=True, classes="field" + ) + yield Select[str]( + [], prompt="ISO image", id="os-iso", allow_blank=True, classes="field" + ) + yield Input( + id="os-physical-drive", + placeholder="Physical drive path", + value="/dev/sr0", + classes="field", + ) + yield Select[str]( + [ + ("Linux", "linux"), + ("Windows", "windows"), + ("Solaris", "solaris"), + ("Other", "other"), + ], + value="linux", + id="os-guest-type", + allow_blank=False, + classes="field", + ) + yield Select[str]( + GUEST_VERSIONS["linux"], + value="l26", + prompt="Guest version", + id="os-guest-version", + allow_blank=False, + classes="field", + ) + + with self._section("system-section", "System"): + yield Select[str]( + [ + ("Default", "default"), + ("Standard", "std"), + ("VirtIO GPU", "virtio"), + ("VMware", "vmware"), + ("QXL", "qxl"), + ], + value="default", + id="system-graphic-card", + allow_blank=False, + classes="field", + ) + yield Select[str]( + [("q35", "q35"), ("i440fx", "pc")], + value="q35", + id="system-machine", + allow_blank=False, + classes="field", + ) + yield Select[str]( + [("OVMF (UEFI)", "ovmf"), ("SeaBIOS", "seabios")], + value="ovmf", + id="system-bios", + allow_blank=False, + classes="field", + ) + yield Checkbox("Add EFI disk", value=True, id="system-add-efi", classes="field") + yield Select[str]( + [], prompt="EFI storage", id="system-efi-storage", allow_blank=True, classes="field" + ) + yield Checkbox("Pre-enroll keys", value=False, id="system-pre-enroll", classes="field") + yield Select[str]( + [ + ("VirtIO SCSI single", "virtio-scsi-single"), + ("VirtIO SCSI", "virtio-scsi-pci"), + ("LSI", "lsi"), + ("MegaSAS", "megasas"), + ("PVSCSI", "pvscsi"), + ], + value="virtio-scsi-single", + id="system-scsi-controller", + allow_blank=False, + classes="field", + ) + yield Checkbox("Qemu agent", value=True, id="system-qemu-agent", classes="field") + yield Checkbox("Add TPM", value=False, id="system-tpm", classes="field") + + with self._section("disks-section", "Disks"): + with HorizontalGroup(classes="field"): + yield Button("Add Disk", id="disks-add") + yield Button("Remove Disk", id="disks-remove") + with HorizontalGroup(classes="field"): + yield Select[str]( + [("No disks configured", NO_DISK_SELECTED)], + value=NO_DISK_SELECTED, + prompt="Disk", + id="disks-select", + allow_blank=False, + ) + yield Static("", id="disks-summary", classes="field") + yield Select[str]( + [("SCSI", "scsi"), ("VirtIO", "virtio"), ("SATA", "sata"), ("IDE", "ide")], + value="scsi", + id="disk-bus", + allow_blank=False, + classes="field", + ) + yield Input(id="disk-device", placeholder="Device index", classes="field") + yield Select[str]( + [], prompt="Disk storage", id="disk-storage", allow_blank=True, classes="field" + ) + yield Input(id="disk-size", placeholder="Disk size GiB", classes="field") + yield Static("Format: raw", id="disk-format", classes="field") + yield Select[str]( + [ + ("No cache", "none"), + ("Write back", "writeback"), + ("Write through", "writethrough"), + ("Direct sync", "directsync"), + ("Unsafe", "unsafe"), + ], + value="none", + id="disk-cache", + allow_blank=False, + classes="field", + ) + yield Checkbox("Discard", value=False, id="disk-discard", classes="field") + yield Checkbox("IO thread", value=True, id="disk-io-thread", classes="field") + yield Checkbox("SSD emulation", value=True, id="disk-ssd", classes="field") + yield Checkbox("Include in backup", value=True, id="disk-backup", classes="field") + yield Checkbox( + "Skip replication", value=False, id="disk-skip-replication", classes="field" + ) + yield Select[str]( + [("io_uring", "io_uring"), ("native", "native"), ("threads", "threads")], + value="io_uring", + id="disk-aio", + allow_blank=False, + classes="field", + ) + + with self._section("cpu-section", "CPU"): + yield Input(id="cpu-cores", value="2", placeholder="Cores", classes="field") + yield Input(id="cpu-sockets", value="1", placeholder="Sockets", classes="field") + yield Select[str]( + [("host", "host"), ("kvm64", "kvm64"), ("x86-64-v2-AES", "x86-64-v2-AES")], + value="host", + id="cpu-type", + allow_blank=False, + classes="field", + ) + + with self._section("memory-section", "Memory"): + yield Input(id="memory-size", value="2048", placeholder="Memory MiB", classes="field") + yield Input( + id="memory-min-size", value="2048", placeholder="Min memory MiB", classes="field" + ) + yield Checkbox("Ballooning", value=True, id="memory-ballooning", classes="field") + yield Checkbox("Allow KSM", value=True, id="memory-ksm", classes="field") + + with self._section("network-section", "Network"): + yield Checkbox("No network device", value=False, id="network-none", classes="field") + yield Select[str]( + [], prompt="Bridge", id="network-bridge", allow_blank=True, classes="field" + ) + yield Input(id="network-vlan", placeholder="VLAN tag", classes="field") + yield Select[str]( + [ + ("VirtIO", "virtio"), + ("E1000", "e1000"), + ("VMXNET3", "vmxnet3"), + ("RTL8139", "rtl8139"), + ], + value="virtio", + id="network-model", + allow_blank=False, + classes="field", + ) + yield Input(id="network-mac", placeholder="MAC address (blank = auto)", classes="field") + yield Checkbox("Firewall", value=True, id="network-firewall", classes="field") + yield Checkbox("Disconnected", value=False, id="network-disconnected", classes="field") + yield Input(id="network-mtu", placeholder="MTU", classes="field") + yield Input(id="network-rate", placeholder="Rate limit MB/s", classes="field") + yield Input(id="network-queues", placeholder="Multiqueue", classes="field") + + with self._section("confirm-section", "Confirm"): + yield Static("", id="confirm-summary", classes="field") + yield Static("", id="confirm-result", classes="field") + + with Horizontal(id="wizard-actions"): + yield Button("Next", id="wizard-next", variant="primary") + yield Button("Back", id="wizard-back") + yield Button("Create VM", id="wizard-create", variant="success") + + def on_mount(self) -> None: + self._set_guest_version_options("linux") + self._refresh_disk_options() + self._load_selected_disk_into_widgets() + self._show_step() + self._sync_media_visibility() + self._sync_system_visibility() + self._sync_memory_visibility() + self._sync_network_visibility() + + def activate(self) -> None: + self.styles.display = "block" + self._show_status("Loading live reference data...") + self._initializing_reference_data = True + self.call_after_refresh(self._focus_current_step, True) + self.run_worker(self._load_initial_data, thread=True, exclusive=True) + + def _load_initial_data(self) -> None: + with ThreadPoolExecutor(max_workers=4) as executor: + nodes_future = executor.submit(self._service.load_nodes) + pools_future = executor.submit(self._service.load_pools) + tags_future = executor.submit(self._service.load_existing_tags) + next_vmid_future = executor.submit(self._service.load_next_vmid) + + nodes = nodes_future.result() + pools = pools_future.result() + tags = tags_future.result() + next_vmid = next_vmid_future.result() + node_names = [node.name for node in nodes] + preferred_node = ( + self._settings.safety_policy.test_node + if self._settings.safety_policy.enable_test_mode + else "" + ) or ( + "sbx0pve00" if "sbx0pve00" in node_names else (node_names[0] if node_names else "") + ) + references = ReferenceData( + nodes=node_names, + pools=[pool.poolid for pool in pools], + existing_tags=tags, + ) + storages: list[Storage] = [] + bridges: list[Bridge] = [] + isos: list[str] = [] + if preferred_node: + with ThreadPoolExecutor(max_workers=2) as executor: + storages_future = executor.submit(self._service.load_storages, preferred_node) + bridges_future = executor.submit(self._service.load_bridges, preferred_node) + storages = storages_future.result() + bridges = bridges_future.result() + iso_storages = sorted( + storage.storage for storage in storages if "iso" in storage.content + ) + default_iso_storage = ( + "cephfs" if "cephfs" in iso_storages else (iso_storages[0] if iso_storages else "") + ) + if default_iso_storage: + isos = [ + iso.volid + for iso in self._service.load_isos(preferred_node, default_iso_storage) + ] + self.app.call_from_thread( + self._apply_initial_reference_data, + references, + next_vmid, + preferred_node, + storages, + bridges, + isos, + ) + + def _load_node_dependent_data(self, node: str) -> None: + with ThreadPoolExecutor(max_workers=2) as executor: + storages_future = executor.submit(self._service.load_storages, node) + bridges_future = executor.submit(self._service.load_bridges, node) + storages = storages_future.result() + bridges = bridges_future.result() + self.app.call_from_thread(self._apply_node_reference_data, node, storages, bridges) + + def _apply_initial_reference_data( + self, + references: ReferenceData, + next_vmid: int, + preferred_node: str, + storages: list[Storage], + bridges: list[Bridge], + isos: list[str], + ) -> None: + self._workflow.reference_data.nodes = references.nodes + self._workflow.reference_data.pools = references.pools + self._workflow.reference_data.existing_tags = references.existing_tags + + self._set_select_options("general-node", references.nodes) + self._set_select_options("general-pool", references.pools) + self._set_select_options("general-tag-existing", references.existing_tags) + + if preferred_node: + self._suppress_node_change = True + self.query_one("#general-node", Select).value = preferred_node + self._suppress_node_change = False + self._workflow.config.general.node = preferred_node + self.query_one("#general-vmid", Input).value = str(next_vmid) + self._workflow.config.general.vmid = next_vmid + if self._settings.safety_policy.enable_test_mode and self._settings.safety_policy.test_pool: + self.query_one("#general-pool", Select).value = self._settings.safety_policy.test_pool + self._workflow.config.general.pool = self._settings.safety_policy.test_pool + if preferred_node: + self._apply_node_reference_data(preferred_node, storages, bridges) + if isos: + storage = self._select("os-storage") + self._apply_isos(preferred_node, storage, isos) + self._initializing_reference_data = False + self._show_status("Loaded general reference data.") + self._focus_current_step(force=True) + + def _apply_node_reference_data( + self, + node: str, + storages: list[Storage], + bridges: list[Bridge], + ) -> None: + all_storages = sorted(storage.storage for storage in storages) + iso_storages = sorted(storage.storage for storage in storages if "iso" in storage.content) + disk_storages = sorted( + storage.storage for storage in storages if "images" in storage.content + ) + bridge_names = [bridge.iface for bridge in bridges] + + refs = self._workflow.reference_data + refs.all_storages = all_storages + refs.iso_storages = iso_storages + refs.disk_storages = disk_storages + refs.bridges = bridge_names + self._loaded_node_reference = node + + self._set_select_options("os-storage", iso_storages) + self._set_select_options("system-efi-storage", disk_storages) + self._set_select_options("disk-storage", disk_storages) + self._set_select_options("network-bridge", bridge_names) + + default_iso_storage = ( + "cephfs" if "cephfs" in iso_storages else (iso_storages[0] if iso_storages else "") + ) + default_disk_storage = ( + "ceph-pool" + if "ceph-pool" in disk_storages + else (disk_storages[0] if disk_storages else "") + ) + default_bridge = ( + "vmbr9" if "vmbr9" in bridge_names else (bridge_names[0] if bridge_names else "") + ) + + if default_iso_storage: + self._suppress_storage_change = True + self.query_one("#os-storage", Select).value = default_iso_storage + self._suppress_storage_change = False + self._workflow.config.os.storage = default_iso_storage + if default_disk_storage: + self.query_one("#system-efi-storage", Select).value = default_disk_storage + self.query_one("#disk-storage", Select).value = default_disk_storage + self._workflow.config.system.efi_storage = default_disk_storage + if self._workflow.config.disks: + self._workflow.config.disks[0].storage = default_disk_storage + self._load_selected_disk_into_widgets() + if default_bridge: + self.query_one("#network-bridge", Select).value = default_bridge + self._workflow.config.network.bridge = default_bridge + self._node_load_in_flight = False + self._show_status(f"Loaded node-specific reference data for {node}.") + self._focus_current_step(force=True) + + def _load_isos(self, node: str, storage: str) -> None: + isos = self._service.load_isos(node, storage) + self.app.call_from_thread(self._apply_isos, node, storage, [iso.volid for iso in isos]) + + def _apply_isos(self, node: str, storage: str, iso_values: list[str]) -> None: + self._workflow.reference_data.isos = iso_values + self._loaded_iso_source = (node, storage) + self._set_select_options("os-iso", iso_values) + preferred = select_latest_nixos_iso(iso_values) or (iso_values[0] if iso_values else "") + if preferred: + self.query_one("#os-iso", Select).value = preferred + self._workflow.config.os.iso = preferred + self._show_status(f"Loaded {len(iso_values)} ISO image(s) from {storage}.") + self._focus_current_step(force=True) + + def _show_step(self) -> None: + for step in WIZARD_STEPS: + section = self.query_one(f"#{step}-section", ScrollableContainer) + is_current = step == self._workflow.current_step + section.display = is_current + if is_current: + section.scroll_to(y=0, animate=False, immediate=True, force=True) + self.query_one("#wizard-title", Static).update("Create Proxmox VM") + step_label = ( + f"Step {self._workflow.current_step_index + 1}/" + f"{len(WIZARD_STEPS)}: {self._workflow.step_title}" + ) + self.query_one("#wizard-step", Static).update(step_label) + self.query_one("#wizard-back", Button).disabled = self._workflow.current_step_index == 0 + is_confirm = self._workflow.current_step == "confirm" + self.query_one("#wizard-next", Button).styles.display = "none" if is_confirm else "block" + self.query_one("#wizard-create", Button).styles.display = "block" if is_confirm else "none" + if is_confirm: + self._update_confirmation() + self._update_confirm_actions() + self.call_after_refresh(self._focus_current_step, True) + + def _show_errors(self, errors: list[str]) -> None: + self.query_one("#wizard-errors", Static).update("\n".join(errors)) + + def _show_status(self, message: str) -> None: + self.query_one("#wizard-status", Static).update(message) + + def _focus_current_step(self, force: bool = False) -> None: + focused = self.app.focused + if not force and focused is not None and focused is not self: + return + + target_ids = { + "general": ["general-name", "general-vmid", "general-node", "wizard-next"], + "os": ["os-media-choice", "os-storage", "os-iso", "os-physical-drive", "wizard-next"], + "system": ["system-graphic-card", "system-machine", "wizard-next"], + "disks": ["disks-select", "disk-size", "wizard-next"], + "cpu": ["cpu-cores", "cpu-sockets", "wizard-next"], + "memory": ["memory-size", "memory-min-size", "wizard-next"], + "network": ["network-bridge", "network-model", "wizard-next"], + "confirm": ["wizard-create", "wizard-back"], + }[self._workflow.current_step] + + for target_id in target_ids: + matches = self.query(f"#{target_id}") + if not matches: + continue + widget = matches.first() + if not widget.display or widget.disabled: + continue + self.app.set_focus(widget) + return + + def _set_widget_visibility(self, widget_id: str, visible: bool) -> None: + widget = self.query_one(f"#{widget_id}", Widget) + if not visible and isinstance(widget, Select): + widget.expanded = False + widget.display = visible + widget.disabled = not visible + + def _set_select_options(self, widget_id: str, values: list[str]) -> None: + widget = self.query_one(f"#{widget_id}", Select) + widget.set_options([(value, value) for value in values]) + + def _input(self, widget_id: str) -> str: + return self.query_one(f"#{widget_id}", Input).value.strip() + + def _select(self, widget_id: str) -> str: + value = self.query_one(f"#{widget_id}", Select).value + return value if isinstance(value, str) else "" + + def _checked(self, widget_id: str) -> bool: + return self.query_one(f"#{widget_id}", Checkbox).value + + def _sync_all_from_widgets(self) -> None: + config = self._workflow.config + config.general.node = self._select("general-node") + config.general.vmid = int(self._input("general-vmid") or "0") + config.general.name = self._input("general-name") + config.general.pool = self._select("general-pool") + config.general.ha_enabled = self._checked("general-ha") + config.general.onboot = self._checked("general-onboot") + config.general.startup_order = self._input("general-startup-order") + config.general.startup_delay = self._input("general-startup-delay") + config.general.shutdown_timeout = self._input("general-shutdown-timeout") + + config.os.media_choice = self._select("os-media-choice") or "iso" + config.os.storage = self._select("os-storage") + config.os.iso = self._select("os-iso") + config.os.physical_drive_path = self._input("os-physical-drive") or "/dev/sr0" + config.os.guest_type = self._select("os-guest-type") or "linux" + config.os.guest_version = self._select("os-guest-version") or "l26" + + config.system.graphic_card = self._select("system-graphic-card") or "default" + config.system.machine = self._select("system-machine") or "q35" + config.system.bios = self._select("system-bios") or "ovmf" + config.system.add_efi_disk = self._checked("system-add-efi") + config.system.efi_storage = self._select("system-efi-storage") + config.system.pre_enrolled_keys = self._checked("system-pre-enroll") + config.system.scsi_controller = ( + self._select("system-scsi-controller") or "virtio-scsi-single" + ) + config.system.qemu_agent = self._checked("system-qemu-agent") + config.system.tpm_enabled = self._checked("system-tpm") + + self._sync_selected_disk_from_widgets() + + config.cpu.cores = int(self._input("cpu-cores") or "0") + config.cpu.sockets = int(self._input("cpu-sockets") or "0") + config.cpu.cpu_type = self._select("cpu-type") or "host" + + config.memory.memory_mib = int(self._input("memory-size") or "0") + config.memory.min_memory_mib = int(self._input("memory-min-size") or "0") + config.memory.ballooning = self._checked("memory-ballooning") + config.memory.allow_ksm = self._checked("memory-ksm") + + config.network.no_network_device = self._checked("network-none") + config.network.bridge = self._select("network-bridge") + config.network.vlan_tag = self._input("network-vlan") + config.network.model = self._select("network-model") or "virtio" + config.network.mac_address = self._input("network-mac") + config.network.firewall = self._checked("network-firewall") + config.network.disconnected = self._checked("network-disconnected") + config.network.mtu = self._input("network-mtu") + config.network.rate_limit = self._input("network-rate") + config.network.multiqueue = self._input("network-queues") + + def _set_guest_version_options(self, guest_type: str) -> None: + versions = GUEST_VERSIONS.get(guest_type, GUEST_VERSIONS["other"]) + widget = self.query_one("#os-guest-version", Select) + widget.set_options(versions) + widget.value = versions[0][1] + + def _update_confirmation(self) -> None: + self._sync_all_from_widgets() + summary = build_confirmation_text(self._workflow.config, self._settings) + self.query_one("#confirm-summary", Static).update(summary) + result = self._workflow.submission.message + self.query_one("#confirm-result", Static).update(result) + + def _confirm_action_is_exit(self) -> bool: + return self._workflow.submission.phase in {"success", "partial"} + + def _update_confirm_actions(self) -> None: + button = self.query_one("#wizard-create", Button) + if self._confirm_action_is_exit(): + button.label = "Exit" + button.variant = "primary" + button.disabled = False + return + button.label = "Create VM" + button.variant = "success" + button.disabled = self._workflow.submission.phase == "running" + + def _sync_media_visibility(self) -> None: + choice = self._select("os-media-choice") or "iso" + self._set_widget_visibility("os-storage", choice == "iso") + self._set_widget_visibility("os-iso", choice == "iso") + self._set_widget_visibility("os-physical-drive", choice == "physical") + + def _sync_system_visibility(self) -> None: + show_efi_storage = self._checked("system-add-efi") or self._checked("system-tpm") + self._set_widget_visibility("system-efi-storage", show_efi_storage) + self._set_widget_visibility("system-pre-enroll", self._checked("system-add-efi")) + + def _sync_memory_visibility(self) -> None: + show_ballooning_fields = self._checked("memory-ballooning") + self._set_widget_visibility("memory-min-size", show_ballooning_fields) + self._set_widget_visibility("memory-ksm", show_ballooning_fields) + + def _sync_network_visibility(self) -> None: + visible = not self._checked("network-none") + for widget_id in [ + "network-bridge", + "network-vlan", + "network-model", + "network-mac", + "network-firewall", + "network-disconnected", + "network-mtu", + "network-rate", + "network-queues", + ]: + self._set_widget_visibility(widget_id, visible) + + def _selected_disk(self) -> DiskConfig | None: + if not self._workflow.config.disks: + return None + self._selected_disk_index = min( + self._selected_disk_index, len(self._workflow.config.disks) - 1 + ) + return self._workflow.config.disks[self._selected_disk_index] + + def _sync_selected_disk_from_widgets(self, *, refresh_options: bool = True) -> None: + disk = self._selected_disk() + if disk is None: + return + disk.bus = self._select("disk-bus") or "scsi" + disk.device = int(self._input("disk-device") or "0") + disk.storage = self._select("disk-storage") + disk.size_gib = int(self._input("disk-size") or "0") + disk.cache = self._select("disk-cache") or "none" + disk.discard = self._checked("disk-discard") + disk.io_thread = self._checked("disk-io-thread") + disk.ssd_emulation = self._checked("disk-ssd") + disk.backup = self._checked("disk-backup") + disk.skip_replication = self._checked("disk-skip-replication") + disk.async_io = self._select("disk-aio") or "io_uring" + if refresh_options: + self._refresh_disk_options() + + def _load_selected_disk_into_widgets(self, *, refresh_options: bool = True) -> None: + disk = self._selected_disk() + disabled = disk is None + for widget_id in [ + "disk-bus", + "disk-device", + "disk-storage", + "disk-size", + "disk-cache", + "disk-discard", + "disk-io-thread", + "disk-ssd", + "disk-backup", + "disk-skip-replication", + "disk-aio", + ]: + self.query_one(f"#{widget_id}").disabled = disabled + if disk is None: + self.query_one("#disks-summary", Static).update("No disks configured.") + return + self.query_one("#disk-bus", Select).value = disk.bus + self.query_one("#disk-device", Input).value = str(disk.device) + if disk.storage and disk.storage in self._workflow.reference_data.disk_storages: + self.query_one("#disk-storage", Select).value = disk.storage + self.query_one("#disk-size", Input).value = str(disk.size_gib) + self.query_one("#disk-cache", Select).value = disk.cache + self.query_one("#disk-discard", Checkbox).value = disk.discard + self.query_one("#disk-io-thread", Checkbox).value = disk.io_thread + self.query_one("#disk-ssd", Checkbox).value = disk.ssd_emulation + self.query_one("#disk-backup", Checkbox).value = disk.backup + self.query_one("#disk-skip-replication", Checkbox).value = disk.skip_replication + self.query_one("#disk-aio", Select).value = disk.async_io + if refresh_options: + self._refresh_disk_options() + + def _switch_selected_disk(self, new_index: int) -> None: + self._sync_selected_disk_from_widgets(refresh_options=False) + self._update_disk_summary() + self._selected_disk_index = new_index + self._load_selected_disk_into_widgets(refresh_options=False) + self.query_one("#disks-select", Select).focus() + + def _refresh_disk_options(self) -> None: + select = self.query_one("#disks-select", Select) + self._suppress_disk_selection_change = True + try: + if self._workflow.config.disks: + disk_labels = [ + f"Disk {index + 1}: {disk.slot_name}" + for index, disk in enumerate(self._workflow.config.disks) + ] + select.set_options([(label, str(index)) for index, label in enumerate(disk_labels)]) + select.disabled = False + select.value = str(self._selected_disk_index) + else: + select.set_options([("No disks configured", NO_DISK_SELECTED)]) + select.value = NO_DISK_SELECTED + select.disabled = True + finally: + self._suppress_disk_selection_change = False + self._update_disk_summary() + + def _update_disk_summary(self) -> None: + if self._workflow.config.disks: + summary = "\n".join( + f"{index + 1}. {disk.slot_name} {disk.storage or '-'} {disk.size_gib}GiB" + for index, disk in enumerate(self._workflow.config.disks) + ) + else: + summary = "No disks configured." + self.query_one("#disks-summary", Static).update(summary) + + @on(Select.Changed, "#general-node") + def on_general_node_changed(self, event: Select.Changed) -> None: + if ( + isinstance(event.value, str) + and event.value + and not self._node_load_in_flight + and not self._suppress_node_change + and not self._initializing_reference_data + and self._loaded_node_reference != event.value + ): + self._node_load_in_flight = True + self.run_worker( + lambda: self._load_node_dependent_data(event.value), + thread=True, + exclusive=True, + ) + + @on(Select.Changed, "#os-storage") + def on_os_storage_changed(self, event: Select.Changed) -> None: + node = self._select("general-node") + if ( + isinstance(event.value, str) + and event.value + and node + and not self._suppress_storage_change + and not self._initializing_reference_data + and self._loaded_iso_source != (node, event.value) + ): + self.run_worker(lambda: self._load_isos(node, event.value), thread=True, exclusive=True) + + @on(Select.Changed, "#os-media-choice") + def on_os_media_changed(self) -> None: + self._sync_media_visibility() + + @on(Select.Changed, "#os-guest-type") + def on_os_guest_type_changed(self, event: Select.Changed) -> None: + if isinstance(event.value, str): + self._set_guest_version_options(event.value) + + @on(Checkbox.Changed, "#network-none") + def on_network_none_changed(self) -> None: + self._sync_network_visibility() + + @on(Checkbox.Changed, "#system-add-efi") + @on(Checkbox.Changed, "#system-tpm") + def on_system_dependency_changed(self) -> None: + self._sync_system_visibility() + + @on(Checkbox.Changed, "#memory-ballooning") + def on_memory_ballooning_changed(self) -> None: + self._sync_memory_visibility() + + @on(Select.Changed, "#disks-select") + def on_disk_selection_changed(self, event: Select.Changed) -> None: + if ( + not self._suppress_disk_selection_change + and isinstance(event.value, str) + and event.value != NO_DISK_SELECTED + and event.value.isdigit() + ): + new_index = int(event.value) + if new_index == self._selected_disk_index: + return + select = self.query_one("#disks-select", Select) + select.expanded = False + self.call_after_refresh(self._switch_selected_disk, new_index) + + @on(Button.Pressed, "#general-tag-add") + def on_add_tag_pressed(self) -> None: + tag = self._input("general-tag-input") + if tag and tag not in self._workflow.config.general.tags: + self._workflow.config.general.tags.append(tag) + self.query_one("#general-tag-input", Input).value = "" + self._set_select_options("general-tag-current", self._workflow.config.general.tags) + + @on(Button.Pressed, "#general-tag-use") + def on_use_existing_tag_pressed(self) -> None: + tag = self._select("general-tag-existing") + if tag and tag not in self._workflow.config.general.tags: + self._workflow.config.general.tags.append(tag) + self._set_select_options("general-tag-current", self._workflow.config.general.tags) + + @on(Button.Pressed, "#general-tag-remove") + def on_remove_tag_pressed(self) -> None: + tag = self._select("general-tag-current") + if tag in self._workflow.config.general.tags: + self._workflow.config.general.tags.remove(tag) + self._set_select_options("general-tag-current", self._workflow.config.general.tags) + + @on(Button.Pressed, "#disks-add") + def on_add_disk_pressed(self) -> None: + self._sync_selected_disk_from_widgets() + next_device = 0 + if self._workflow.config.disks: + next_device = max(disk.device for disk in self._workflow.config.disks) + 1 + storage = self._select("disk-storage") or self._workflow.config.system.efi_storage + disk = DiskConfig(device=next_device, storage=storage) + self._workflow.config.disks.append(disk) + self._selected_disk_index = len(self._workflow.config.disks) - 1 + self._load_selected_disk_into_widgets() + + @on(Button.Pressed, "#disks-remove") + def on_remove_disk_pressed(self) -> None: + if not self._workflow.config.disks: + return + self._workflow.config.disks.pop(self._selected_disk_index) + self._selected_disk_index = max(0, self._selected_disk_index - 1) + self._load_selected_disk_into_widgets() + + @on(Button.Pressed, "#wizard-back") + def on_back_pressed(self) -> None: + self._sync_all_from_widgets() + self._show_errors([]) + if self._workflow.current_step_index > 0: + self._workflow.current_step_index -= 1 + self._show_step() + + @on(Button.Pressed, "#wizard-next") + def on_next_pressed(self) -> None: + try: + self._sync_all_from_widgets() + except ValueError: + self._show_errors(["Numeric fields contain invalid values."]) + return + errors = validate_step( + self._workflow.current_step, + self._workflow.config, + self._settings, + self._workflow.reference_data, + ) + self._show_errors(errors) + if errors: + return + if self._workflow.current_step_index < len(WIZARD_STEPS) - 1: + self._workflow.current_step_index += 1 + self._show_step() + + @on(Button.Pressed, "#wizard-create") + def on_create_pressed(self) -> None: + if self._confirm_action_is_exit(): + self.app.exit() + return + try: + self._sync_all_from_widgets() + except ValueError: + self._show_errors(["Numeric fields contain invalid values."]) + return + errors = validate_all_steps( + self._workflow.config, self._settings, self._workflow.reference_data + ) + self._show_errors(errors) + if errors: + return + self.app.push_screen( + AutoStartConfirmModal(), + callback=self._handle_auto_start_choice, + ) + + def _handle_auto_start_choice(self, start_after_create: bool | None) -> None: + if start_after_create is None: + self._show_status("VM creation cancelled.") + self.call_after_refresh(self._focus_current_step, True) + return + self._workflow.submission.phase = "running" + self._workflow.submission.message = "Creating VM..." + self._update_confirmation() + self._update_confirm_actions() + self._show_status("Submitting VM creation request...") + self.run_worker( + lambda: self._submit_create(start_after_create), + thread=True, + exclusive=True, + ) + + def _submit_create(self, start_after_create: bool) -> None: + try: + result = self._service.create_vm( + self._workflow.config, + start_after_create=start_after_create, + ) + except ProxmoxPostCreateError as exc: + self._workflow.submission.phase = "partial" + self._workflow.submission.partial_success = True + self._workflow.submission.node = exc.node + self._workflow.submission.vmid = exc.vmid + self._workflow.submission.message = ( + f"VM {exc.vmid} on {exc.node} was created, but {exc.step} failed.\n{exc}" + ) + self.app.call_from_thread( + self._finalize_submit, False, self._workflow.submission.message + ) + return + except ProxmoxError as exc: + self._workflow.submission.phase = "error" + self._workflow.submission.message = f"VM creation failed: {exc}" + self.app.call_from_thread( + self._finalize_submit, False, self._workflow.submission.message + ) + return + + self._workflow.submission.phase = "success" + self._workflow.submission.node = result.node + self._workflow.submission.vmid = result.vmid + self._workflow.submission.message = ( + f"VM {result.vmid} ({result.name}) created on {result.node}." + ) + self.app.call_from_thread(self._finalize_submit, True, self._workflow.submission.message) + + def _finalize_submit(self, success: bool, message: str) -> None: + self._update_confirmation() + self._update_confirm_actions() + self._show_status(message) + self.call_after_refresh(self._focus_current_step, True) + self.post_message(self.SubmitFinished(success, message)) diff --git a/src/pve_vm_setup/services/__init__.py b/src/pve_vm_setup/services/__init__.py new file mode 100644 index 0000000..1c25334 --- /dev/null +++ b/src/pve_vm_setup/services/__init__.py @@ -0,0 +1 @@ +"""Service layer for Proxmox access.""" diff --git a/src/pve_vm_setup/services/__pycache__/__init__.cpython-313.pyc b/src/pve_vm_setup/services/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000..01be44d Binary files /dev/null and b/src/pve_vm_setup/services/__pycache__/__init__.cpython-313.pyc differ diff --git a/src/pve_vm_setup/services/__pycache__/base.cpython-313.pyc b/src/pve_vm_setup/services/__pycache__/base.cpython-313.pyc new file mode 100644 index 0000000..3e06f20 Binary files /dev/null and b/src/pve_vm_setup/services/__pycache__/base.cpython-313.pyc differ diff --git a/src/pve_vm_setup/services/__pycache__/factory.cpython-313.pyc b/src/pve_vm_setup/services/__pycache__/factory.cpython-313.pyc new file mode 100644 index 0000000..82bab91 Binary files /dev/null and b/src/pve_vm_setup/services/__pycache__/factory.cpython-313.pyc differ diff --git a/src/pve_vm_setup/services/__pycache__/fake.cpython-313.pyc b/src/pve_vm_setup/services/__pycache__/fake.cpython-313.pyc new file mode 100644 index 0000000..bd8c7c8 Binary files /dev/null and b/src/pve_vm_setup/services/__pycache__/fake.cpython-313.pyc differ diff --git a/src/pve_vm_setup/services/__pycache__/proxmox.cpython-313.pyc b/src/pve_vm_setup/services/__pycache__/proxmox.cpython-313.pyc new file mode 100644 index 0000000..3ae6490 Binary files /dev/null and b/src/pve_vm_setup/services/__pycache__/proxmox.cpython-313.pyc differ diff --git a/src/pve_vm_setup/services/base.py b/src/pve_vm_setup/services/base.py new file mode 100644 index 0000000..c3f3575 --- /dev/null +++ b/src/pve_vm_setup/services/base.py @@ -0,0 +1,90 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import TYPE_CHECKING, Protocol + +if TYPE_CHECKING: + from ..models.workflow import VmConfig + + +@dataclass(frozen=True) +class Realm: + name: str + title: str + default: bool = False + + +@dataclass(frozen=True) +class AuthenticatedSession: + username: str + ticket: str + csrf_token: str | None = None + + +@dataclass(frozen=True) +class Node: + name: str + status: str | None = None + + +@dataclass(frozen=True) +class Pool: + poolid: str + comment: str | None = None + + +@dataclass(frozen=True) +class Storage: + storage: str + node: str + content: tuple[str, ...] + + +@dataclass(frozen=True) +class Bridge: + iface: str + active: bool = True + + +@dataclass(frozen=True) +class IsoImage: + volid: str + storage: str + node: str + + +@dataclass(frozen=True) +class VmCreationResult: + node: str + vmid: int + name: str + serial_console_configured: bool = True + ha_configured: bool = False + + +class ProxmoxService(Protocol): + mode: str + + def check_connectivity(self) -> str: ... + + def check_api_base(self) -> str: ... + + def load_realms(self) -> list[Realm]: ... + + def login(self, username: str, password: str, realm: str) -> AuthenticatedSession: ... + + def load_nodes(self) -> list[Node]: ... + + def load_next_vmid(self) -> int: ... + + def load_pools(self) -> list[Pool]: ... + + def load_existing_tags(self) -> list[str]: ... + + def load_bridges(self, node: str) -> list[Bridge]: ... + + def load_storages(self, node: str) -> list[Storage]: ... + + def load_isos(self, node: str, storage: str) -> list[IsoImage]: ... + + def create_vm(self, config: VmConfig, start_after_create: bool = False) -> VmCreationResult: ... diff --git a/src/pve_vm_setup/services/factory.py b/src/pve_vm_setup/services/factory.py new file mode 100644 index 0000000..1421d3e --- /dev/null +++ b/src/pve_vm_setup/services/factory.py @@ -0,0 +1,12 @@ +from ..settings import AppSettings +from .base import ProxmoxService +from .fake import FakeProxmoxService +from .proxmox import LiveProxmoxService, ProxmoxApiClient + + +class ProxmoxServiceFactory: + @staticmethod + def create(settings: AppSettings) -> ProxmoxService: + if settings.is_live_configured: + return LiveProxmoxService(ProxmoxApiClient(settings)) + return FakeProxmoxService() diff --git a/src/pve_vm_setup/services/fake.py b/src/pve_vm_setup/services/fake.py new file mode 100644 index 0000000..c499c26 --- /dev/null +++ b/src/pve_vm_setup/services/fake.py @@ -0,0 +1,82 @@ +from ..models.workflow import VmConfig +from .base import ( + AuthenticatedSession, + Bridge, + IsoImage, + Node, + Pool, + ProxmoxService, + Realm, + Storage, + VmCreationResult, +) + + +class FakeProxmoxService(ProxmoxService): + mode = "fake" + + def __init__(self) -> None: + self.created_vms: list[VmCreationResult] = [] + self.start_after_create_requests: list[bool] = [] + + def check_connectivity(self) -> str: + return "fake transport reachable" + + def check_api_base(self) -> str: + return "fake-api-base" + + def load_realms(self) -> list[Realm]: + return [ + Realm(name="pam", title="Linux PAM standard authentication", default=True), + Realm(name="pve", title="Proxmox VE authentication server"), + ] + + def login(self, username: str, password: str, realm: str) -> AuthenticatedSession: + if not username or not password: + raise ValueError("Username and password are required.") + return AuthenticatedSession(username=f"{username}@{realm}", ticket="fake-ticket") + + def load_nodes(self) -> list[Node]: + return [Node(name="fake-node-01", status="online")] + + def load_next_vmid(self) -> int: + return 123 + + def load_pools(self) -> list[Pool]: + return [Pool(poolid="lab"), Pool(poolid="sandbox")] + + def load_existing_tags(self) -> list[str]: + return ["codex-e2e", "linux", "test"] + + def load_bridges(self, node: str) -> list[Bridge]: + return [Bridge(iface="vmbr9"), Bridge(iface="vmbr0")] + + def load_storages(self, node: str) -> list[Storage]: + return [ + Storage(storage="cephfs", node=node, content=("iso", "backup")), + Storage(storage="ceph-pool", node=node, content=("images",)), + ] + + def load_isos(self, node: str, storage: str) -> list[IsoImage]: + return [ + IsoImage( + volid=f"{storage}:iso/nixos-minimal-24-11.1234abcd-x86_64-linux.iso", + storage=storage, + node=node, + ) + ] + + def create_vm(self, config: VmConfig, start_after_create: bool = False) -> VmCreationResult: + name = config.general.name + if not name.startswith("codex-e2e-"): + name = f"codex-e2e-{name}" + self.start_after_create_requests.append(start_after_create) + result = VmCreationResult( + node=config.general.node, + vmid=config.general.vmid, + name=name, + serial_console_configured=True, + ha_configured=config.general.ha_enabled, + ) + self.created_vms.append(result) + return result diff --git a/src/pve_vm_setup/services/proxmox.py b/src/pve_vm_setup/services/proxmox.py new file mode 100644 index 0000000..7c8a305 --- /dev/null +++ b/src/pve_vm_setup/services/proxmox.py @@ -0,0 +1,399 @@ +from __future__ import annotations + +import time +from collections.abc import Callable + +import httpx + +from ..domain import build_create_payload +from ..errors import ( + ProxmoxApiError, + ProxmoxAuthError, + ProxmoxConnectError, + ProxmoxError, + ProxmoxPostCreateError, + ProxmoxTlsError, + ProxmoxTransportError, + ProxmoxUnexpectedResponseError, +) +from ..models.workflow import VmConfig +from ..settings import AppSettings +from .base import ( + AuthenticatedSession, + Bridge, + IsoImage, + Node, + Pool, + ProxmoxService, + Realm, + Storage, + VmCreationResult, +) + + +def _looks_like_tls_error(message: str) -> bool: + upper = message.upper() + indicators = ("SSL", "TLS", "CERTIFICATE", "WRONG_VERSION", "EOF") + return any(token in upper for token in indicators) + + +class ProxmoxApiClient: + def __init__( + self, + settings: AppSettings, + *, + transport: httpx.BaseTransport | None = None, + client_factory: Callable[..., httpx.Client] | None = None, + ) -> None: + settings.validate_live_requirements() + self._settings = settings + factory = client_factory or httpx.Client + self._client = factory( + base_url=settings.api_url, + verify=settings.proxmox_verify_tls, + timeout=settings.request_timeout_seconds, + follow_redirects=True, + transport=transport, + ) + self._ticket: str | None = None + self._csrf_token: str | None = None + + def close(self) -> None: + self._client.close() + + def probe_transport(self) -> str: + try: + response = self._client.get(self._settings.proxmox_url or "/") + return f"HTTP {response.status_code}" + except httpx.ConnectError as exc: + raise ProxmoxConnectError("Unable to connect to the Proxmox host.") from exc + except httpx.TransportError as exc: + message = str(exc) + if _looks_like_tls_error(message): + raise ProxmoxTlsError("TLS handshake or verification failed.") from exc + raise ProxmoxTransportError("Transport error while probing Proxmox.") from exc + + def check_api_base(self) -> str: + payload = self._request_json("GET", "/access/domains") + if not isinstance(payload, list): + raise ProxmoxUnexpectedResponseError("API base check did not return a realms list.") + return "access/domains" + + def login(self, username: str, password: str, realm: str) -> AuthenticatedSession: + full_username = username if "@" in username else f"{username}@{realm}" + payload = self._request_json( + "POST", + "/access/ticket", + data={"username": full_username, "password": password}, + ) + ticket = payload.get("ticket") + csrf_token = payload.get("CSRFPreventionToken") + if not isinstance(ticket, str) or not ticket: + raise ProxmoxUnexpectedResponseError("Login response did not include a ticket.") + self._ticket = ticket + self._csrf_token = csrf_token if isinstance(csrf_token, str) else None + self._client.cookies.set("PVEAuthCookie", ticket) + return AuthenticatedSession( + username=full_username, + ticket=ticket, + csrf_token=self._csrf_token, + ) + + def load_realms(self) -> list[Realm]: + payload = self._request_json("GET", "/access/domains") + realms: list[Realm] = [] + if not isinstance(payload, list): + raise ProxmoxUnexpectedResponseError("Realms payload was not a list.") + for item in payload: + if not isinstance(item, dict): + continue + realm = item.get("realm") + title = item.get("comment") or item.get("commentary") or realm + if isinstance(realm, str) and isinstance(title, str): + realms.append( + Realm( + name=realm, + title=title, + default=bool(item.get("default")), + ) + ) + return realms + + def load_nodes(self) -> list[Node]: + payload = self._request_json("GET", "/nodes", requires_auth=True) + if not isinstance(payload, list): + raise ProxmoxUnexpectedResponseError("Nodes payload was not a list.") + return [ + Node(name=item["node"], status=item.get("status")) + for item in payload + if isinstance(item, dict) and isinstance(item.get("node"), str) + ] + + def load_next_vmid(self) -> int: + payload = self._request_json("GET", "/cluster/nextid", requires_auth=True) + if isinstance(payload, int): + return payload + if isinstance(payload, str) and payload.isdigit(): + return int(payload) + raise ProxmoxUnexpectedResponseError("Next VM ID payload was not an integer.") + + def load_pools(self) -> list[Pool]: + payload = self._request_json("GET", "/pools", requires_auth=True) + if not isinstance(payload, list): + raise ProxmoxUnexpectedResponseError("Pools payload was not a list.") + return [ + Pool(poolid=item["poolid"], comment=item.get("comment")) + for item in payload + if isinstance(item, dict) and isinstance(item.get("poolid"), str) + ] + + def load_existing_tags(self) -> list[str]: + payload = self._request_json( + "GET", + "/cluster/resources", + params={"type": "vm"}, + requires_auth=True, + ) + if not isinstance(payload, list): + raise ProxmoxUnexpectedResponseError("Cluster resource payload was not a list.") + tags: set[str] = set() + for item in payload: + if not isinstance(item, dict): + continue + raw_tags = item.get("tags") + if not isinstance(raw_tags, str): + continue + for tag in raw_tags.replace(",", ";").split(";"): + normalized = tag.strip() + if normalized: + tags.add(normalized) + return sorted(tags) + + def load_bridges(self, node: str) -> list[Bridge]: + payload = self._request_json("GET", f"/nodes/{node}/network", requires_auth=True) + if not isinstance(payload, list): + raise ProxmoxUnexpectedResponseError("Network payload was not a list.") + bridges = [ + Bridge(iface=item["iface"], active=bool(item.get("active", True))) + for item in payload + if isinstance(item, dict) + and item.get("type") == "bridge" + and isinstance(item.get("iface"), str) + ] + return sorted(bridges, key=lambda bridge: bridge.iface) + + def load_storages(self, node: str) -> list[Storage]: + payload = self._request_json("GET", f"/nodes/{node}/storage", requires_auth=True) + if not isinstance(payload, list): + raise ProxmoxUnexpectedResponseError("Storage payload was not a list.") + storages: list[Storage] = [] + for item in payload: + if not isinstance(item, dict): + continue + storage = item.get("storage") + content = tuple( + part.strip() + for part in str(item.get("content", "")).split(",") + if part and isinstance(part, str) + ) + if isinstance(storage, str): + storages.append(Storage(storage=storage, node=node, content=content)) + return storages + + def load_isos(self, node: str, storage: str) -> list[IsoImage]: + payload = self._request_json( + "GET", + f"/nodes/{node}/storage/{storage}/content", + params={"content": "iso"}, + requires_auth=True, + ) + if not isinstance(payload, list): + raise ProxmoxUnexpectedResponseError("ISO payload was not a list.") + return [ + IsoImage(volid=item["volid"], storage=storage, node=node) + for item in payload + if isinstance(item, dict) and isinstance(item.get("volid"), str) + ] + + def create_vm(self, config: VmConfig, start_after_create: bool = False) -> VmCreationResult: + payload = build_create_payload(config, self._settings) + node = config.general.node + vmid = config.general.vmid + name = payload["name"] + + upid = self._request_json( + "POST", + f"/nodes/{node}/qemu", + data={key: str(value) for key, value in payload.items()}, + requires_auth=True, + ) + if isinstance(upid, str) and upid.startswith("UPID:"): + self._wait_for_task(node, upid) + + try: + serial_result = self._request_json( + "PUT", + f"/nodes/{node}/qemu/{vmid}/config", + data={"serial0": "socket"}, + requires_auth=True, + ) + if isinstance(serial_result, str) and serial_result.startswith("UPID:"): + self._wait_for_task(node, serial_result) + except ProxmoxError as exc: + raise ProxmoxPostCreateError( + node, + vmid, + "serial-console", + f"VM was created but serial console configuration failed: {exc}", + ) from exc + + if config.general.ha_enabled: + try: + ha_result = self._request_json( + "POST", + "/cluster/ha/resources", + data={ + "sid": f"vm:{vmid}", + "state": "started" if start_after_create else "stopped", + }, + requires_auth=True, + ) + if isinstance(ha_result, str) and ha_result.startswith("UPID:"): + self._wait_for_task(node, ha_result) + except ProxmoxError as exc: + raise ProxmoxPostCreateError( + node, + vmid, + "high-availability", + f"VM was created but HA configuration failed: {exc}", + ) from exc + elif start_after_create: + try: + start_result = self._request_json( + "POST", + f"/nodes/{node}/qemu/{vmid}/status/start", + requires_auth=True, + ) + if isinstance(start_result, str) and start_result.startswith("UPID:"): + self._wait_for_task(node, start_result) + except ProxmoxError as exc: + raise ProxmoxPostCreateError( + node, + vmid, + "start", + f"VM was created but automatic start failed: {exc}", + ) from exc + + return VmCreationResult( + node=node, + vmid=vmid, + name=str(name), + serial_console_configured=True, + ha_configured=config.general.ha_enabled, + ) + + def _wait_for_task(self, node: str, upid: str) -> None: + deadline = time.time() + self._settings.request_timeout_seconds + while time.time() < deadline: + payload = self._request_json( + "GET", + f"/nodes/{node}/tasks/{upid}/status", + requires_auth=True, + ) + if not isinstance(payload, dict): + raise ProxmoxUnexpectedResponseError("Task status payload was not an object.") + if payload.get("status") == "stopped": + if payload.get("exitstatus") != "OK": + raise ProxmoxApiError(f"Task {upid} failed with {payload.get('exitstatus')}.") + return + time.sleep(0.5) + raise ProxmoxTransportError(f"Timed out while waiting for task {upid}.") + + def _request_json( + self, + method: str, + path: str, + *, + requires_auth: bool = False, + **kwargs: object, + ) -> object: + headers: dict[str, str] = {} + if requires_auth: + if not self._ticket: + raise ProxmoxAuthError("Not authenticated with Proxmox.") + if method.upper() not in {"GET", "HEAD", "OPTIONS"} and self._csrf_token: + headers["CSRFPreventionToken"] = self._csrf_token + + try: + response = self._client.request(method, path, headers=headers, **kwargs) + response.raise_for_status() + except httpx.ConnectError as exc: + raise ProxmoxConnectError("Unable to connect to the Proxmox API.") from exc + except httpx.TransportError as exc: + message = str(exc) + if _looks_like_tls_error(message): + raise ProxmoxTlsError("TLS handshake or verification failed.") from exc + raise ProxmoxTransportError("Transport error while calling the Proxmox API.") from exc + except httpx.HTTPStatusError as exc: + status_code = exc.response.status_code + if status_code in {401, 403}: + raise ProxmoxAuthError("Authentication was rejected by Proxmox.") from exc + raise ProxmoxApiError( + f"Proxmox API returned HTTP {status_code}.", + status_code=status_code, + ) from exc + + try: + payload = response.json() + except ValueError as exc: + raise ProxmoxUnexpectedResponseError("Expected a JSON response from Proxmox.") from exc + + if not isinstance(payload, dict) or "data" not in payload: + raise ProxmoxUnexpectedResponseError("Expected a top-level data field in the response.") + return payload["data"] + + +class LiveProxmoxService(ProxmoxService): + mode = "live" + + def __init__(self, client: ProxmoxApiClient) -> None: + self._client = client + + def check_connectivity(self) -> str: + return self._client.probe_transport() + + def check_api_base(self) -> str: + return self._client.check_api_base() + + def load_realms(self) -> list[Realm]: + return self._client.load_realms() + + def login(self, username: str, password: str, realm: str) -> AuthenticatedSession: + return self._client.login(username, password, realm) + + def load_nodes(self) -> list[Node]: + return self._client.load_nodes() + + def load_next_vmid(self) -> int: + return self._client.load_next_vmid() + + def load_pools(self) -> list[Pool]: + return self._client.load_pools() + + def load_existing_tags(self) -> list[str]: + return self._client.load_existing_tags() + + def load_bridges(self, node: str) -> list[Bridge]: + return self._client.load_bridges(node) + + def load_storages(self, node: str) -> list[Storage]: + return self._client.load_storages(node) + + def load_isos(self, node: str, storage: str) -> list[IsoImage]: + return self._client.load_isos(node, storage) + + def create_vm(self, config: VmConfig, start_after_create: bool = False) -> VmCreationResult: + return self._client.create_vm(config, start_after_create=start_after_create) + + def close(self) -> None: + self._client.close() diff --git a/src/pve_vm_setup/settings.py b/src/pve_vm_setup/settings.py new file mode 100644 index 0000000..d3171e0 --- /dev/null +++ b/src/pve_vm_setup/settings.py @@ -0,0 +1,179 @@ +from __future__ import annotations + +import os +from collections.abc import Mapping +from dataclasses import dataclass +from pathlib import Path +from urllib.parse import urlparse + +from dotenv import dotenv_values + +from .errors import SettingsError + + +def _parse_bool(value: str | None, *, default: bool) -> bool: + if value is None or value == "": + return default + normalized = value.strip().lower() + if normalized in {"1", "true", "yes", "on"}: + return True + if normalized in {"0", "false", "no", "off"}: + return False + raise SettingsError(f"Invalid boolean value: {value!r}") + + +def _parse_int(value: str | None, *, default: int) -> int: + if value is None or value == "": + return default + try: + return int(value) + except ValueError as exc: + raise SettingsError(f"Invalid integer value: {value!r}") from exc + + +@dataclass(frozen=True) +class LiveSafetyPolicy: + prevent_create: bool + enable_test_mode: bool + test_node: str | None + test_pool: str | None + test_tag: str + test_vm_name_prefix: str + keep_failed_vm: bool + + def validate(self) -> None: + if self.enable_test_mode: + if not self.test_node: + raise SettingsError( + "PROXMOX_TEST_NODE is required when PROXMOX_ENABLE_TEST_MODE=true." + ) + if not self.test_pool: + raise SettingsError( + "PROXMOX_TEST_POOL is required when PROXMOX_ENABLE_TEST_MODE=true." + ) + if not self.test_tag: + raise SettingsError( + "PROXMOX_TEST_TAG is required when PROXMOX_ENABLE_TEST_MODE=true." + ) + if not self.test_vm_name_prefix: + raise SettingsError( + "PROXMOX_TEST_VM_NAME_PREFIX is required when " + "PROXMOX_ENABLE_TEST_MODE=true." + ) + + @property + def allow_create(self) -> bool: + return not self.prevent_create + + def effective_vm_name(self, requested_name: str) -> str: + if not self.enable_test_mode: + return requested_name + if requested_name.startswith(self.test_vm_name_prefix): + return requested_name + return f"{self.test_vm_name_prefix}{requested_name}" + + +@dataclass(frozen=True) +class AppSettings: + proxmox_url: str | None + proxmox_api_base: str + proxmox_user: str | None + proxmox_password: str | None + proxmox_realm: str | None + proxmox_verify_tls: bool + request_timeout_seconds: int + safety_policy: LiveSafetyPolicy + + @classmethod + def from_env( + cls, + env: Mapping[str, str] | None = None, + *, + load_dotenv_file: bool = True, + dotenv_path: str | Path = ".env", + ) -> AppSettings: + raw: dict[str, str] = {} + if load_dotenv_file: + raw.update( + { + key: value + for key, value in dotenv_values(dotenv_path).items() + if value is not None + } + ) + raw.update(os.environ if env is None else env) + + api_base = raw.get("PROXMOX_API_BASE", "/api2/json").strip() or "/api2/json" + if not api_base.startswith("/"): + api_base = f"/{api_base}" + + safety_policy = LiveSafetyPolicy( + prevent_create=_parse_bool(raw.get("PROXMOX_PREVENT_CREATE"), default=False), + enable_test_mode=_parse_bool(raw.get("PROXMOX_ENABLE_TEST_MODE"), default=False), + test_node=raw.get("PROXMOX_TEST_NODE") or None, + test_pool=raw.get("PROXMOX_TEST_POOL") or None, + test_tag=raw.get("PROXMOX_TEST_TAG", "codex-e2e").strip() or "codex-e2e", + test_vm_name_prefix=raw.get("PROXMOX_TEST_VM_NAME_PREFIX", "codex-e2e-").strip() + or "codex-e2e-", + keep_failed_vm=_parse_bool(raw.get("PROXMOX_KEEP_FAILED_VM"), default=True), + ) + safety_policy.validate() + + proxmox_url = (raw.get("PROXMOX_URL") or "").strip() or None + if proxmox_url is not None: + proxmox_url = proxmox_url.rstrip("/") + + return cls( + proxmox_url=proxmox_url, + proxmox_api_base=api_base, + proxmox_user=(raw.get("PROXMOX_USER") or "").strip() or None, + proxmox_password=raw.get("PROXMOX_PASSWORD") or None, + proxmox_realm=(raw.get("PROXMOX_REALM") or "").strip() or None, + proxmox_verify_tls=_parse_bool(raw.get("PROXMOX_VERIFY_TLS"), default=False), + request_timeout_seconds=_parse_int( + raw.get("PROXMOX_REQUEST_TIMEOUT_SECONDS"), default=15 + ), + safety_policy=safety_policy, + ) + + @property + def is_live_configured(self) -> bool: + return bool(self.proxmox_url and self.proxmox_user and self.proxmox_password) + + @property + def effective_username(self) -> str | None: + if not self.proxmox_user or not self.proxmox_realm: + return None + if "@" in self.proxmox_user: + return self.proxmox_user + return f"{self.proxmox_user}@{self.proxmox_realm}" + + @property + def sanitized_host(self) -> str: + if not self.proxmox_url: + return "not-configured" + parsed = urlparse(self.proxmox_url) + host = parsed.hostname or parsed.netloc or self.proxmox_url + if parsed.port: + return f"{host}:{parsed.port}" + return host + + @property + def api_url(self) -> str: + if not self.proxmox_url: + raise SettingsError("PROXMOX_URL is required for live Proxmox access.") + return f"{self.proxmox_url}{self.proxmox_api_base}" + + def validate_live_requirements(self) -> None: + missing: list[str] = [] + if not self.proxmox_url: + missing.append("PROXMOX_URL") + if not self.proxmox_user: + missing.append("PROXMOX_USER") + if not self.proxmox_password: + missing.append("PROXMOX_PASSWORD") + if not self.proxmox_realm: + missing.append("PROXMOX_REALM") + if missing: + joined = ", ".join(missing) + raise SettingsError(f"Missing live Proxmox configuration: {joined}.") diff --git a/src/pve_vm_setup/terminal_compat.py b/src/pve_vm_setup/terminal_compat.py new file mode 100644 index 0000000..8baa432 --- /dev/null +++ b/src/pve_vm_setup/terminal_compat.py @@ -0,0 +1,140 @@ +from __future__ import annotations + +import asyncio +import os +import signal +import sys +import termios +import tty +from threading import Thread + +from textual import events +from textual.driver import Driver +from textual.geometry import Size +from textual.messages import TerminalSupportInBandWindowResize + + +def build_driver_class() -> type[Driver] | None: + """Return an opt-in compatibility driver for problematic terminals. + + Textual's stock driver is the default because it is the best-tested path. + The compatibility driver remains available behind an env flag for targeted + debugging only. + """ + + if os.getenv("YOUR_APP_ENABLE_COMPAT_DRIVER", "").lower() not in {"1", "true", "yes"}: + return None + + if sys.platform.startswith("win"): + return None + + from textual.drivers._writer_thread import WriterThread + from textual.drivers.linux_driver import LinuxDriver + + class CompatLinuxDriver(LinuxDriver): + """Terminal driver with advanced terminal features disabled. + + This avoids terminal-specific issues around Kitty keyboard mode, + mouse tracking, sync mode probing, and bracketed paste. + """ + + def start_application_mode(self) -> None: + def _stop_again(*_) -> None: + os.kill(os.getpid(), signal.SIGSTOP) + + if os.isatty(self.fileno): + signal.signal(signal.SIGTTOU, _stop_again) + signal.signal(signal.SIGTTIN, _stop_again) + try: + termios.tcsetattr( + self.fileno, termios.TCSANOW, termios.tcgetattr(self.fileno) + ) + except termios.error: + return + finally: + signal.signal(signal.SIGTTOU, signal.SIG_DFL) + signal.signal(signal.SIGTTIN, signal.SIG_DFL) + + loop = asyncio.get_running_loop() + + def send_size_event() -> None: + width, height = self._get_terminal_size() + textual_size = Size(width, height) + event = events.Resize(textual_size, textual_size) + asyncio.run_coroutine_threadsafe(self._app._post_message(event), loop=loop) + + self._writer_thread = WriterThread(self._file) + self._writer_thread.start() + + def on_terminal_resize(signum, stack) -> None: + if not self._in_band_window_resize: + send_size_event() + + signal.signal(signal.SIGWINCH, on_terminal_resize) + + self.write("\x1b[?1049h") + + try: + self.attrs_before = termios.tcgetattr(self.fileno) + except termios.error: + self.attrs_before = None + + try: + newattr = termios.tcgetattr(self.fileno) + except termios.error: + pass + else: + newattr[tty.LFLAG] = self._patch_lflag(newattr[tty.LFLAG]) + newattr[tty.IFLAG] = self._patch_iflag(newattr[tty.IFLAG]) + newattr[tty.CC][termios.VMIN] = 1 + try: + termios.tcsetattr(self.fileno, termios.TCSANOW, newattr) + except termios.error: + pass + + self.write("\x1b[?25l") + self.flush() + + self._key_thread = Thread(target=self._run_input_thread) + send_size_event() + self._key_thread.start() + self._disable_line_wrap() + + if self._must_signal_resume: + self._must_signal_resume = False + asyncio.run_coroutine_threadsafe( + self._app._post_message(self.SignalResume()), + loop=loop, + ) + + def stop_application_mode(self) -> None: + self._enable_line_wrap() + self.disable_input() + + if self.attrs_before is not None: + try: + termios.tcsetattr(self.fileno, termios.TCSANOW, self.attrs_before) + except termios.error: + pass + + self.write("\x1b[?1049l") + self.write("\x1b[?25h") + self.flush() + + def _request_terminal_sync_mode_support(self) -> None: + return + + def _disable_in_band_window_resize(self) -> None: + self._in_band_window_resize = False + + async def _on_terminal_supports_in_band_window_resize( + self, message: TerminalSupportInBandWindowResize + ) -> None: + self._in_band_window_resize = False + + return CompatLinuxDriver + + +def apply_runtime_compatibility() -> None: + os.environ.setdefault("TEXTUAL_ALLOW_SIGNALS", "1") + signal.signal(signal.SIGINT, signal.default_int_handler) diff --git a/src/pve_vm_setup/widgets/__init__.py b/src/pve_vm_setup/widgets/__init__.py new file mode 100644 index 0000000..05649f9 --- /dev/null +++ b/src/pve_vm_setup/widgets/__init__.py @@ -0,0 +1 @@ +"""Reusable Textual widgets.""" diff --git a/tests/__pycache__/conftest.cpython-313-pytest-8.4.2.pyc b/tests/__pycache__/conftest.cpython-313-pytest-8.4.2.pyc new file mode 100644 index 0000000..f39eccb Binary files /dev/null and b/tests/__pycache__/conftest.cpython-313-pytest-8.4.2.pyc differ diff --git a/tests/__pycache__/test_app.cpython-313-pytest-8.4.2.pyc b/tests/__pycache__/test_app.cpython-313-pytest-8.4.2.pyc new file mode 100644 index 0000000..6380cfd Binary files /dev/null and b/tests/__pycache__/test_app.cpython-313-pytest-8.4.2.pyc differ diff --git a/tests/__pycache__/test_doctor.cpython-313-pytest-8.4.2.pyc b/tests/__pycache__/test_doctor.cpython-313-pytest-8.4.2.pyc new file mode 100644 index 0000000..ae034ec Binary files /dev/null and b/tests/__pycache__/test_doctor.cpython-313-pytest-8.4.2.pyc differ diff --git a/tests/__pycache__/test_domain.cpython-313-pytest-8.4.2.pyc b/tests/__pycache__/test_domain.cpython-313-pytest-8.4.2.pyc new file mode 100644 index 0000000..99b4bfe Binary files /dev/null and b/tests/__pycache__/test_domain.cpython-313-pytest-8.4.2.pyc differ diff --git a/tests/__pycache__/test_factory.cpython-313-pytest-8.4.2.pyc b/tests/__pycache__/test_factory.cpython-313-pytest-8.4.2.pyc new file mode 100644 index 0000000..1fb5b0c Binary files /dev/null and b/tests/__pycache__/test_factory.cpython-313-pytest-8.4.2.pyc differ diff --git a/tests/__pycache__/test_proxmox_client.cpython-313-pytest-8.4.2.pyc b/tests/__pycache__/test_proxmox_client.cpython-313-pytest-8.4.2.pyc new file mode 100644 index 0000000..faa11ca Binary files /dev/null and b/tests/__pycache__/test_proxmox_client.cpython-313-pytest-8.4.2.pyc differ diff --git a/tests/__pycache__/test_settings.cpython-313-pytest-8.4.2.pyc b/tests/__pycache__/test_settings.cpython-313-pytest-8.4.2.pyc new file mode 100644 index 0000000..7c12438 Binary files /dev/null and b/tests/__pycache__/test_settings.cpython-313-pytest-8.4.2.pyc differ diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..c370d73 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +import pytest + + +def pytest_collection_modifyitems(config: pytest.Config, items: list[pytest.Item]) -> None: + markexpr = (config.option.markexpr or "").strip() + if markexpr: + return + + skip_live = pytest.mark.skip( + reason="Live tests run only via `pytest -m live` or `pytest -m live_create`." + ) + for item in items: + if "live" in item.keywords or "live_create" in item.keywords: + item.add_marker(skip_live) diff --git a/tests/integration/__pycache__/test_live_proxmox.cpython-313-pytest-8.4.2.pyc b/tests/integration/__pycache__/test_live_proxmox.cpython-313-pytest-8.4.2.pyc new file mode 100644 index 0000000..74f68ad Binary files /dev/null and b/tests/integration/__pycache__/test_live_proxmox.cpython-313-pytest-8.4.2.pyc differ diff --git a/tests/integration/test_live_proxmox.py b/tests/integration/test_live_proxmox.py new file mode 100644 index 0000000..eff48b5 --- /dev/null +++ b/tests/integration/test_live_proxmox.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +import pytest + +from pve_vm_setup.services.factory import ProxmoxServiceFactory +from pve_vm_setup.settings import AppSettings + + +def _load_live_settings_or_skip() -> AppSettings: + settings = AppSettings.from_env() + try: + settings.validate_live_requirements() + except Exception as exc: # pragma: no cover - only hit outside configured environments + pytest.skip(f"Live environment is not configured: {exc}") + return settings + + +@pytest.mark.live +def test_live_read_only_reference_loading() -> None: + settings = _load_live_settings_or_skip() + service = ProxmoxServiceFactory.create(settings) + + assert service.mode == "live" + assert service.check_connectivity() + assert service.check_api_base() + + realms = service.load_realms() + assert realms + + service.login( + settings.proxmox_user or "", + settings.proxmox_password or "", + settings.proxmox_realm or "", + ) + nodes = service.load_nodes() + assert nodes + + pools = service.load_pools() + assert isinstance(pools, list) + + tags = service.load_existing_tags() + assert isinstance(tags, list) + + probe_node = settings.safety_policy.test_node or nodes[0].name + storages = service.load_storages(probe_node) + assert isinstance(storages, list) + iso_storages = [storage for storage in storages if "iso" in storage.content] + if iso_storages: + isos = service.load_isos(probe_node, iso_storages[0].storage) + assert isinstance(isos, list) + + +@pytest.mark.live_create +def test_live_create_path_requires_explicit_opt_in() -> None: + settings = _load_live_settings_or_skip() + if not settings.safety_policy.allow_create: + pytest.skip("Set PROXMOX_PREVENT_CREATE=false to enable live create tests.") + if settings.safety_policy.enable_test_mode: + assert settings.safety_policy.test_node diff --git a/tests/test_app.py b/tests/test_app.py new file mode 100644 index 0000000..dd7411b --- /dev/null +++ b/tests/test_app.py @@ -0,0 +1,590 @@ +from __future__ import annotations + +import asyncio +import time +from collections import Counter + +import pytest +from textual.app import App, ComposeResult +from textual.containers import ScrollableContainer +from textual.widgets import Button, Checkbox, Input, Select, Static + +from pve_vm_setup.app import PveVmSetupApp +from pve_vm_setup.models.workflow import WorkflowState +from pve_vm_setup.screens.login import LoginView +from pve_vm_setup.screens.wizard import NO_DISK_SELECTED, AutoStartConfirmModal, WizardView +from pve_vm_setup.services.fake import FakeProxmoxService +from pve_vm_setup.settings import AppSettings + + +class LoginHarnessApp(App[None]): + def compose(self) -> ComposeResult: + yield LoginView( + AppSettings.from_env({}, load_dotenv_file=False), + WorkflowState(), + FakeProxmoxService(), + ) + + +async def wait_for_wizard_ready( + pilot, + app: App[None], + *, + attempts: int = 12, + delay: float = 0.1, +) -> None: + for _ in range(attempts): + await pilot.pause(delay) + if ( + app.query_one("#general-vmid", Input).value == "123" + and app.query_one("#general-node", Select).value == "fake-node-01" + and app.query_one("#os-storage", Select).value == "cephfs" + ): + return + raise AssertionError("Timed out waiting for wizard reference data to load.") + + +@pytest.mark.asyncio +async def test_login_view_authenticates_with_pilot() -> None: + app = LoginHarnessApp() + + async with app.run_test() as pilot: + await pilot.pause() + assert str(app.query_one("#title", Static).renderable) == "Proxmox Login" + assert app.focused is app.query_one("#username", Input) + + app.query_one("#username", Input).value = "junior" + app.query_one("#password", Input).value = "secret" + app.query_one("#connect", Button).press() + await pilot.pause() + + assert "Authenticated as junior@pam." == str(app.query_one("#status", Static).renderable) + + +@pytest.mark.asyncio +async def test_main_app_mounts_wizard_only_after_login() -> None: + service = FakeProxmoxService() + app = PveVmSetupApp( + AppSettings.from_env({}, load_dotenv_file=False), + service=service, + ) + + async with app.run_test() as pilot: + await pilot.pause() + assert app.query(LoginView) + assert not app.query(WizardView) + + login = app.query_one(LoginView) + login.post_message(LoginView.Authenticated("junior@pam", "pam")) + await pilot.pause() + await pilot.pause() + await wait_for_wizard_ready(pilot, app) + + assert not app.query(LoginView) + assert app.query(WizardView) + assert app.focused is app.query_one("#general-name", Input) + + +@pytest.mark.asyncio +async def test_wizard_activation_focuses_first_editable_field() -> None: + service = FakeProxmoxService() + app = PveVmSetupApp( + AppSettings.from_env({}, load_dotenv_file=False), + service=service, + ) + + async with app.run_test() as pilot: + app.query_one(LoginView).remove() + wizard = WizardView( + AppSettings.from_env({}, load_dotenv_file=False), + WorkflowState(), + service, + ) + await app.query_one("#app-body").mount(wizard) + wizard.activate() + await wait_for_wizard_ready(pilot, app) + + assert app.focused is app.query_one("#general-name", Input) + + +@pytest.mark.asyncio +async def test_wizard_initial_activation_does_not_duplicate_live_reference_loads() -> None: + class CountingService(FakeProxmoxService): + def __init__(self) -> None: + super().__init__() + self.calls: list[str] = [] + + def load_nodes(self): + self.calls.append("load_nodes") + return super().load_nodes() + + def load_pools(self): + self.calls.append("load_pools") + return super().load_pools() + + def load_existing_tags(self): + self.calls.append("load_existing_tags") + return super().load_existing_tags() + + def load_next_vmid(self): + self.calls.append("load_next_vmid") + return super().load_next_vmid() + + def load_storages(self, node: str): + self.calls.append(f"load_storages:{node}") + return super().load_storages(node) + + def load_bridges(self, node: str): + self.calls.append(f"load_bridges:{node}") + return super().load_bridges(node) + + def load_isos(self, node: str, storage: str): + self.calls.append(f"load_isos:{node}:{storage}") + return super().load_isos(node, storage) + + service = CountingService() + app = PveVmSetupApp( + AppSettings.from_env({}, load_dotenv_file=False), + service=service, + ) + + async with app.run_test() as pilot: + app.query_one(LoginView).remove() + wizard = WizardView( + AppSettings.from_env({}, load_dotenv_file=False), + WorkflowState(), + service, + ) + await app.query_one("#app-body").mount(wizard) + wizard.activate() + for _ in range(6): + await pilot.pause() + + assert Counter(service.calls) == Counter( + [ + "load_nodes", + "load_pools", + "load_existing_tags", + "load_next_vmid", + "load_storages:fake-node-01", + "load_bridges:fake-node-01", + "load_isos:fake-node-01:cephfs", + ] + ) + + +@pytest.mark.asyncio +async def test_wizard_initial_activation_loads_reference_data_concurrently() -> None: + class SlowService(FakeProxmoxService): + delay = 0.15 + + def load_nodes(self): + time.sleep(self.delay) + return super().load_nodes() + + def load_pools(self): + time.sleep(self.delay) + return super().load_pools() + + def load_existing_tags(self): + time.sleep(self.delay) + return super().load_existing_tags() + + def load_next_vmid(self): + time.sleep(self.delay) + return super().load_next_vmid() + + def load_storages(self, node: str): + time.sleep(self.delay) + return super().load_storages(node) + + def load_bridges(self, node: str): + time.sleep(self.delay) + return super().load_bridges(node) + + def load_isos(self, node: str, storage: str): + time.sleep(self.delay) + return super().load_isos(node, storage) + + service = SlowService() + app = PveVmSetupApp( + AppSettings.from_env({}, load_dotenv_file=False), + service=service, + ) + + async with app.run_test() as pilot: + app.query_one(LoginView).remove() + wizard = WizardView( + AppSettings.from_env({}, load_dotenv_file=False), + WorkflowState(), + service, + ) + await app.query_one("#app-body").mount(wizard) + + started_at = time.perf_counter() + wizard.activate() + + await wait_for_wizard_ready(pilot, app) + + elapsed = time.perf_counter() - started_at + + assert elapsed < 1.0 + + +@pytest.mark.asyncio +async def test_wizard_uses_scrollable_sections_with_border_titles() -> None: + service = FakeProxmoxService() + app = PveVmSetupApp( + AppSettings.from_env({}, load_dotenv_file=False), + service=service, + ) + + async with app.run_test() as pilot: + app.query_one(LoginView).remove() + wizard = WizardView( + AppSettings.from_env({}, load_dotenv_file=False), + WorkflowState(), + service, + ) + await app.query_one("#app-body").mount(wizard) + wizard.activate() + await wait_for_wizard_ready(pilot, app) + + general_section = app.query_one("#general-section", ScrollableContainer) + os_section = app.query_one("#os-section", ScrollableContainer) + + assert str(general_section.border_title).strip() == "General" + assert str(os_section.border_title).strip() == "Operating System" + + +@pytest.mark.asyncio +async def test_wizard_hides_os_fields_based_on_media_choice() -> None: + service = FakeProxmoxService() + app = PveVmSetupApp( + AppSettings.from_env({}, load_dotenv_file=False), + service=service, + ) + + async with app.run_test() as pilot: + app.query_one(LoginView).remove() + wizard = WizardView( + AppSettings.from_env({}, load_dotenv_file=False), + WorkflowState(), + service, + ) + await app.query_one("#app-body").mount(wizard) + wizard.activate() + await wait_for_wizard_ready(pilot, app) + + assert app.query_one("#os-storage", Select).display is True + assert app.query_one("#os-iso", Select).display is True + assert app.query_one("#os-physical-drive", Input).display is False + + app.query_one("#os-media-choice", Select).value = "physical" + await pilot.pause() + + assert app.query_one("#os-storage", Select).display is False + assert app.query_one("#os-iso", Select).display is False + assert app.query_one("#os-physical-drive", Input).display is True + + app.query_one("#os-media-choice", Select).value = "none" + await pilot.pause() + + assert app.query_one("#os-storage", Select).display is False + assert app.query_one("#os-iso", Select).display is False + assert app.query_one("#os-physical-drive", Input).display is False + + +@pytest.mark.asyncio +async def test_wizard_hides_dependent_system_memory_and_network_fields() -> None: + service = FakeProxmoxService() + app = PveVmSetupApp( + AppSettings.from_env({}, load_dotenv_file=False), + service=service, + ) + + async with app.run_test() as pilot: + app.query_one(LoginView).remove() + wizard = WizardView( + AppSettings.from_env({}, load_dotenv_file=False), + WorkflowState(), + service, + ) + await app.query_one("#app-body").mount(wizard) + wizard.activate() + await wait_for_wizard_ready(pilot, app) + + assert app.query_one("#system-efi-storage", Select).display is True + assert app.query_one("#system-pre-enroll", Checkbox).display is True + + app.query_one("#system-add-efi", Checkbox).value = False + await pilot.pause() + + assert app.query_one("#system-efi-storage", Select).display is False + assert app.query_one("#system-pre-enroll", Checkbox).display is False + + app.query_one("#system-tpm", Checkbox).value = True + await pilot.pause() + + assert app.query_one("#system-efi-storage", Select).display is True + assert app.query_one("#system-pre-enroll", Checkbox).display is False + + assert app.query_one("#memory-min-size", Input).display is True + assert app.query_one("#memory-ksm", Checkbox).display is True + + app.query_one("#memory-ballooning", Checkbox).value = False + await pilot.pause() + + assert app.query_one("#memory-min-size", Input).display is False + assert app.query_one("#memory-ksm", Checkbox).display is False + + assert app.query_one("#network-bridge", Select).display is True + assert app.query_one("#network-rate", Input).display is True + + app.query_one("#network-none", Checkbox).value = True + await pilot.pause() + + assert app.query_one("#network-bridge", Select).display is False + assert app.query_one("#network-rate", Input).display is False + + +@pytest.mark.asyncio +async def test_wizard_tag_rows_keep_input_and_button_visible() -> None: + service = FakeProxmoxService() + app = PveVmSetupApp( + AppSettings.from_env({}, load_dotenv_file=False), + service=service, + ) + + async with app.run_test() as pilot: + app.query_one(LoginView).remove() + wizard = WizardView( + AppSettings.from_env({}, load_dotenv_file=False), + WorkflowState(), + service, + ) + await app.query_one("#app-body").mount(wizard) + wizard.activate() + await wait_for_wizard_ready(pilot, app) + + assert app.query_one("#general-tag-input", Input).display is True + assert app.query_one("#general-tag-add", Button).display is True + assert app.query_one("#general-tag-existing", Select).display is True + assert app.query_one("#general-tag-use", Button).display is True + + +@pytest.mark.asyncio +async def test_wizard_add_tag_button_updates_current_tags() -> None: + service = FakeProxmoxService() + app = PveVmSetupApp( + AppSettings.from_env({}, load_dotenv_file=False), + service=service, + ) + + async with app.run_test() as pilot: + app.query_one(LoginView).remove() + wizard = WizardView( + AppSettings.from_env({}, load_dotenv_file=False), + WorkflowState(), + service, + ) + await app.query_one("#app-body").mount(wizard) + wizard.activate() + await wait_for_wizard_ready(pilot, app) + + app.query_one("#general-tag-input", Input).value = "alpha" + app.query_one("#general-tag-add", Button).press() + await pilot.pause() + + assert wizard._workflow.config.general.tags == ["alpha"] + current_tags = app.query_one("#general-tag-current", Select) + assert current_tags.display is True + + +@pytest.mark.asyncio +async def test_wizard_hiding_select_collapses_open_overlay() -> None: + service = FakeProxmoxService() + app = PveVmSetupApp( + AppSettings.from_env({}, load_dotenv_file=False), + service=service, + ) + + async with app.run_test() as pilot: + app.query_one(LoginView).remove() + wizard = WizardView( + AppSettings.from_env({}, load_dotenv_file=False), + WorkflowState(), + service, + ) + await app.query_one("#app-body").mount(wizard) + wizard.activate() + await wait_for_wizard_ready(pilot, app) + + storage = app.query_one("#os-storage", Select) + storage.expanded = True + await pilot.pause() + assert storage.expanded is True + + app.query_one("#os-media-choice", Select).value = "physical" + await pilot.pause() + + assert storage.display is False + assert storage.expanded is False + + +@pytest.mark.asyncio +async def test_disk_toolbar_buttons_render_left_of_disk_selector() -> None: + service = FakeProxmoxService() + app = PveVmSetupApp( + AppSettings.from_env({}, load_dotenv_file=False), + service=service, + ) + + async with app.run_test() as pilot: + app.query_one(LoginView).remove() + wizard = WizardView( + AppSettings.from_env({}, load_dotenv_file=False), + WorkflowState(), + service, + ) + await app.query_one("#app-body").mount(wizard) + wizard.activate() + await wait_for_wizard_ready(pilot, app) + + wizard._workflow.current_step_index = 3 + wizard._show_step() + await pilot.pause() + + add_button = app.query_one("#disks-add", Button) + remove_button = app.query_one("#disks-remove", Button) + selector = app.query_one("#disks-select", Select) + + assert add_button.region.x < remove_button.region.x + assert selector.region.x == add_button.region.x + assert selector.region.width > remove_button.region.width * 3 + + +@pytest.mark.asyncio +async def test_disk_selector_switches_between_configured_disks_without_blank_option() -> None: + service = FakeProxmoxService() + app = PveVmSetupApp( + AppSettings.from_env({}, load_dotenv_file=False), + service=service, + ) + + async with app.run_test() as pilot: + app.query_one(LoginView).remove() + wizard = WizardView( + AppSettings.from_env({}, load_dotenv_file=False), + WorkflowState(), + service, + ) + await app.query_one("#app-body").mount(wizard) + wizard.activate() + await wait_for_wizard_ready(pilot, app) + + wizard._workflow.current_step_index = 3 + wizard._show_step() + await pilot.pause() + + app.query_one("#disks-add", Button).press() + await pilot.pause() + + selector = app.query_one("#disks-select", Select) + option_values = [value for _, value in selector._options] + + assert NO_DISK_SELECTED not in option_values + assert selector.disabled is False + assert selector.value == "1" + + await asyncio.wait_for(pilot.click("#disks-select"), timeout=2) + await pilot.pause() + assert selector.expanded is True + + await asyncio.wait_for(pilot.press("up"), timeout=2) + await pilot.pause() + await asyncio.wait_for(pilot.press("enter"), timeout=2) + await pilot.pause() + + assert selector.expanded is False + assert selector.value == "0" + assert wizard._selected_disk_index == 0 + assert app.focused is selector + + +@pytest.mark.asyncio +async def test_confirm_step_replaces_create_with_exit_after_success() -> None: + service = FakeProxmoxService() + app = PveVmSetupApp( + AppSettings.from_env({}, load_dotenv_file=False), + service=service, + ) + + async with app.run_test() as pilot: + app.query_one(LoginView).remove() + wizard = WizardView( + AppSettings.from_env({}, load_dotenv_file=False), + WorkflowState(), + service, + ) + await app.query_one("#app-body").mount(wizard) + wizard.activate() + await wait_for_wizard_ready(pilot, app) + + wizard._workflow.current_step_index = 7 + wizard._workflow.submission.phase = "success" + wizard._workflow.submission.message = "VM 123 created." + wizard._show_step() + await pilot.pause() + + create_button = app.query_one("#wizard-create", Button) + assert str(create_button.label) == "Exit" + assert app.focused is create_button + + exited: list[bool] = [] + app.exit = lambda *args, **kwargs: exited.append(True) # type: ignore[method-assign] + create_button.press() + await pilot.pause() + + assert exited == [True] + + +@pytest.mark.asyncio +async def test_confirm_step_asks_whether_to_start_vm_before_submitting() -> None: + service = FakeProxmoxService() + app = PveVmSetupApp( + AppSettings.from_env({}, load_dotenv_file=False), + service=service, + ) + + async with app.run_test() as pilot: + app.query_one(LoginView).remove() + wizard = WizardView( + AppSettings.from_env({}, load_dotenv_file=False), + WorkflowState(), + service, + ) + await app.query_one("#app-body").mount(wizard) + wizard.activate() + await wait_for_wizard_ready(pilot, app) + + app.query_one("#general-name", Input).value = "demo" + wizard._workflow.current_step_index = 7 + wizard._show_step() + await pilot.pause() + + app.query_one("#wizard-create", Button).press() + await pilot.pause() + + assert isinstance(app.screen_stack[-1], AutoStartConfirmModal) + assert service.created_vms == [] + + app.query_one("#auto-start-no", Button).press() + + for _ in range(20): + await pilot.pause(0.05) + if service.created_vms: + break + + assert len(service.created_vms) == 1 + assert service.start_after_create_requests == [False] diff --git a/tests/test_doctor.py b/tests/test_doctor.py new file mode 100644 index 0000000..3faf840 --- /dev/null +++ b/tests/test_doctor.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +from io import StringIO + +from pve_vm_setup.doctor import run_live_doctor +from pve_vm_setup.services.base import AuthenticatedSession, Node, Pool, Realm +from pve_vm_setup.settings import AppSettings + + +class StubDoctorService: + mode = "live" + + def check_connectivity(self) -> str: + return "HTTP 200" + + def check_api_base(self) -> str: + return "8.2" + + def load_realms(self) -> list[Realm]: + return [Realm(name="pam", title="Linux PAM standard authentication", default=True)] + + def login(self, username: str, password: str, realm: str) -> AuthenticatedSession: + return AuthenticatedSession(username=f"{username}@{realm}", ticket="ticket") + + def load_nodes(self) -> list[Node]: + return [Node(name="pve-test-01")] + + def load_pools(self) -> list[Pool]: + return [Pool(poolid="sandbox")] + + def load_existing_tags(self) -> list[str]: + return [] + + def load_storages(self, node: str): + raise AssertionError("not used in doctor") + + def load_isos(self, node: str, storage: str): + raise AssertionError("not used in doctor") + + +class StubFactory: + @staticmethod + def create(settings: AppSettings) -> StubDoctorService: + return StubDoctorService() + + +def test_doctor_succeeds_and_keeps_secrets_out_of_output() -> None: + settings = AppSettings.from_env( + { + "PROXMOX_URL": "https://proxmox.example.invalid:8006", + "PROXMOX_USER": "root", + "PROXMOX_PASSWORD": "super-secret", + "PROXMOX_REALM": "pam", + }, + load_dotenv_file=False, + ) + stream = StringIO() + + exit_code = run_live_doctor(settings, stream=stream, service_factory=StubFactory) + + output = stream.getvalue() + assert exit_code == 0 + assert "Doctor finished successfully." in output + assert "super-secret" not in output + assert "root@pam" in output + assert "host: proxmox.example.invalid:8006" in output + + +def test_doctor_validates_create_scope_when_enabled() -> None: + settings = AppSettings.from_env( + { + "PROXMOX_URL": "https://proxmox.example.invalid:8006", + "PROXMOX_USER": "root", + "PROXMOX_PASSWORD": "super-secret", + "PROXMOX_REALM": "pam", + "PROXMOX_PREVENT_CREATE": "false", + "PROXMOX_ENABLE_TEST_MODE": "true", + "PROXMOX_TEST_NODE": "pve-test-01", + "PROXMOX_TEST_POOL": "sandbox", + }, + load_dotenv_file=False, + ) + stream = StringIO() + + exit_code = run_live_doctor(settings, stream=stream, service_factory=StubFactory) + + output = stream.getvalue() + assert exit_code == 0 + assert "prevent_create: False" in output + assert "enable_test_mode: True" in output + assert "node=pve-test-01" in output + assert "pool=sandbox" in output + assert "tag=codex-e2e" in output + assert "name_prefix=codex-e2e-" in output diff --git a/tests/test_domain.py b/tests/test_domain.py new file mode 100644 index 0000000..71ebb31 --- /dev/null +++ b/tests/test_domain.py @@ -0,0 +1,107 @@ +from pve_vm_setup.domain import build_create_payload, select_latest_nixos_iso, validate_all_steps +from pve_vm_setup.models.workflow import VmConfig +from pve_vm_setup.settings import AppSettings + + +def test_select_latest_nixos_iso_prefers_latest_year_month() -> None: + choice = select_latest_nixos_iso( + [ + "cephfs:iso/nixos-minimal-24.11.1234abcd-x86_64-linux.iso", + "cephfs:iso/nixos-minimal-25.05.ffffeeee-x86_64-linux.iso", + "cephfs:iso/debian-12.iso", + ] + ) + + assert choice == "cephfs:iso/nixos-minimal-25.05.ffffeeee-x86_64-linux.iso" + + +def test_build_create_payload_applies_safety_name_tag_and_key_settings() -> None: + settings = AppSettings.from_env( + { + "PROXMOX_PREVENT_CREATE": "false", + "PROXMOX_ENABLE_TEST_MODE": "true", + "PROXMOX_TEST_NODE": "fake-node-01", + "PROXMOX_TEST_POOL": "lab", + }, + load_dotenv_file=False, + ) + config = VmConfig() + config.general.node = "fake-node-01" + config.general.vmid = 123 + config.general.name = "demo" + config.general.tags = ["linux"] + config.os.storage = "cephfs" + config.os.iso = "cephfs:iso/nixos-minimal-25.05.ffffeeee-x86_64-linux.iso" + + payload = build_create_payload(config, settings) + + assert payload["name"] == "codex-e2e-demo" + assert payload["tags"] == "codex-e2e;linux" + assert payload["bios"] == "ovmf" + assert payload["scsihw"] == "virtio-scsi-single" + assert payload["allow-ksm"] == 1 + assert payload["net0"] == "model=virtio,bridge=vmbr9,firewall=1,link_down=0" + assert payload["scsi0"] == ( + "ceph-pool:32,format=raw,cache=none,discard=ignore," + "iothread=1,ssd=1,backup=1,replicate=1,aio=io_uring" + ) + + +def test_validate_all_steps_requires_live_create_opt_in() -> None: + settings = AppSettings.from_env( + { + "PROXMOX_PREVENT_CREATE": "true", + }, + load_dotenv_file=False, + ) + config = VmConfig() + config.general.node = "fake-node-01" + config.general.vmid = 123 + config.general.name = "demo" + config.os.storage = "cephfs" + config.os.iso = "cephfs:iso/nixos-minimal-25.05.ffffeeee-x86_64-linux.iso" + + errors = validate_all_steps(config, settings, references=type("Refs", (), {})()) + + assert "Set PROXMOX_PREVENT_CREATE=false to enable VM creation." in errors + + +def test_build_create_payload_leaves_name_and_tags_untouched_outside_test_mode() -> None: + settings = AppSettings.from_env( + { + "PROXMOX_PREVENT_CREATE": "false", + }, + load_dotenv_file=False, + ) + config = VmConfig() + config.general.node = "fake-node-01" + config.general.vmid = 123 + config.general.name = "demo" + config.general.tags = ["linux"] + config.os.storage = "cephfs" + config.os.iso = "cephfs:iso/nixos-minimal-25.05.ffffeeee-x86_64-linux.iso" + + payload = build_create_payload(config, settings) + + assert payload["name"] == "demo" + assert payload["tags"] == "linux" + + +def test_build_create_payload_can_disable_allow_ksm() -> None: + settings = AppSettings.from_env( + { + "PROXMOX_PREVENT_CREATE": "false", + }, + load_dotenv_file=False, + ) + config = VmConfig() + config.general.node = "fake-node-01" + config.general.vmid = 123 + config.general.name = "demo" + config.os.storage = "cephfs" + config.os.iso = "cephfs:iso/nixos-minimal-25.05.ffffeeee-x86_64-linux.iso" + config.memory.allow_ksm = False + + payload = build_create_payload(config, settings) + + assert payload["allow-ksm"] == 0 diff --git a/tests/test_factory.py b/tests/test_factory.py new file mode 100644 index 0000000..e5fb38d --- /dev/null +++ b/tests/test_factory.py @@ -0,0 +1,30 @@ +from pve_vm_setup.services.factory import ProxmoxServiceFactory +from pve_vm_setup.services.fake import FakeProxmoxService +from pve_vm_setup.services.proxmox import LiveProxmoxService +from pve_vm_setup.settings import AppSettings + + +def test_factory_returns_fake_service_when_live_env_is_missing() -> None: + settings = AppSettings.from_env({}, load_dotenv_file=False) + + service = ProxmoxServiceFactory.create(settings) + + assert isinstance(service, FakeProxmoxService) + + +def test_factory_returns_live_service_when_live_env_is_present() -> None: + settings = AppSettings.from_env( + { + "PROXMOX_URL": "https://proxmox.example.invalid:8006", + "PROXMOX_USER": "root", + "PROXMOX_PASSWORD": "secret", + "PROXMOX_REALM": "pam", + }, + load_dotenv_file=False, + ) + + service = ProxmoxServiceFactory.create(settings) + try: + assert isinstance(service, LiveProxmoxService) + finally: + service.close() diff --git a/tests/test_proxmox_client.py b/tests/test_proxmox_client.py new file mode 100644 index 0000000..3965f85 --- /dev/null +++ b/tests/test_proxmox_client.py @@ -0,0 +1,193 @@ +from __future__ import annotations + +from urllib.parse import parse_qs + +import httpx +import pytest + +from pve_vm_setup.errors import ProxmoxConnectError +from pve_vm_setup.models.workflow import VmConfig +from pve_vm_setup.services.proxmox import ProxmoxApiClient +from pve_vm_setup.settings import AppSettings + + +def build_settings() -> AppSettings: + return AppSettings.from_env( + { + "PROXMOX_URL": "https://proxmox.example.invalid:8006", + "PROXMOX_USER": "root", + "PROXMOX_PASSWORD": "secret", + "PROXMOX_REALM": "pam", + }, + load_dotenv_file=False, + ) + + +def test_client_uses_api_base_when_loading_realms() -> None: + recorded_urls: list[str] = [] + + def handler(request: httpx.Request) -> httpx.Response: + recorded_urls.append(str(request.url)) + return httpx.Response(200, json={"data": [{"realm": "pam", "comment": "Linux PAM"}]}) + + client = ProxmoxApiClient(build_settings(), transport=httpx.MockTransport(handler)) + try: + realms = client.load_realms() + finally: + client.close() + + assert realms[0].name == "pam" + assert recorded_urls == ["https://proxmox.example.invalid:8006/api2/json/access/domains"] + + +def test_client_maps_connect_errors() -> None: + def handler(request: httpx.Request) -> httpx.Response: + raise httpx.ConnectError("boom", request=request) + + client = ProxmoxApiClient(build_settings(), transport=httpx.MockTransport(handler)) + try: + with pytest.raises(ProxmoxConnectError): + client.load_realms() + finally: + client.close() + + +def test_client_attaches_serial_device_without_switching_display_to_serial() -> None: + requests: list[tuple[str, str, bytes]] = [] + + def handler(request: httpx.Request) -> httpx.Response: + requests.append((request.method, request.url.path, request.content)) + path = request.url.path + if path.endswith("/nodes/fake-node-01/qemu") and request.method == "POST": + return httpx.Response(200, json={"data": "UPID:create"}) + if path.endswith("/nodes/fake-node-01/tasks/UPID:create/status"): + return httpx.Response(200, json={"data": {"status": "stopped", "exitstatus": "OK"}}) + if path.endswith("/nodes/fake-node-01/qemu/123/config") and request.method == "PUT": + return httpx.Response(200, json={"data": "UPID:serial"}) + if path.endswith("/nodes/fake-node-01/tasks/UPID:serial/status"): + return httpx.Response(200, json={"data": {"status": "stopped", "exitstatus": "OK"}}) + raise AssertionError(f"Unexpected request: {request.method} {request.url}") + + client = ProxmoxApiClient(build_settings(), transport=httpx.MockTransport(handler)) + client._ticket = "ticket" + client._csrf_token = "csrf" + client._client.cookies.set("PVEAuthCookie", "ticket") + + config = VmConfig() + config.general.node = "fake-node-01" + config.general.vmid = 123 + config.general.name = "demo" + config.general.ha_enabled = False + config.os.storage = "cephfs" + config.os.iso = "cephfs:iso/nixos.iso" + + try: + client.create_vm(config) + finally: + client.close() + + serial_request = next( + content + for method, path, content in requests + if method == "PUT" and path.endswith("/nodes/fake-node-01/qemu/123/config") + ) + payload = parse_qs(serial_request.decode()) + + assert payload["serial0"] == ["socket"] + assert "vga" not in payload + + +def test_client_starts_vm_after_create_when_requested() -> None: + requests: list[tuple[str, str, bytes]] = [] + + def handler(request: httpx.Request) -> httpx.Response: + requests.append((request.method, request.url.path, request.content)) + path = request.url.path + if path.endswith("/nodes/fake-node-01/qemu") and request.method == "POST": + return httpx.Response(200, json={"data": "UPID:create"}) + if path.endswith("/nodes/fake-node-01/tasks/UPID:create/status"): + return httpx.Response(200, json={"data": {"status": "stopped", "exitstatus": "OK"}}) + if path.endswith("/nodes/fake-node-01/qemu/123/config") and request.method == "PUT": + return httpx.Response(200, json={"data": "UPID:serial"}) + if path.endswith("/nodes/fake-node-01/tasks/UPID:serial/status"): + return httpx.Response(200, json={"data": {"status": "stopped", "exitstatus": "OK"}}) + if path.endswith("/nodes/fake-node-01/qemu/123/status/start") and request.method == "POST": + return httpx.Response(200, json={"data": "UPID:start"}) + if path.endswith("/nodes/fake-node-01/tasks/UPID:start/status"): + return httpx.Response(200, json={"data": {"status": "stopped", "exitstatus": "OK"}}) + raise AssertionError(f"Unexpected request: {request.method} {request.url}") + + client = ProxmoxApiClient(build_settings(), transport=httpx.MockTransport(handler)) + client._ticket = "ticket" + client._csrf_token = "csrf" + client._client.cookies.set("PVEAuthCookie", "ticket") + + config = VmConfig() + config.general.node = "fake-node-01" + config.general.vmid = 123 + config.general.name = "demo" + config.general.ha_enabled = False + config.os.storage = "cephfs" + config.os.iso = "cephfs:iso/nixos.iso" + + try: + client.create_vm(config, start_after_create=True) + finally: + client.close() + + assert any( + method == "POST" and path.endswith("/nodes/fake-node-01/qemu/123/status/start") + for method, path, _ in requests + ) + + +def test_client_registers_ha_without_start_when_auto_start_disabled() -> None: + requests: list[tuple[str, str, bytes]] = [] + + def handler(request: httpx.Request) -> httpx.Response: + requests.append((request.method, request.url.path, request.content)) + path = request.url.path + if path.endswith("/nodes/fake-node-01/qemu") and request.method == "POST": + return httpx.Response(200, json={"data": "UPID:create"}) + if path.endswith("/nodes/fake-node-01/tasks/UPID:create/status"): + return httpx.Response(200, json={"data": {"status": "stopped", "exitstatus": "OK"}}) + if path.endswith("/nodes/fake-node-01/qemu/123/config") and request.method == "PUT": + return httpx.Response(200, json={"data": "UPID:serial"}) + if path.endswith("/nodes/fake-node-01/tasks/UPID:serial/status"): + return httpx.Response(200, json={"data": {"status": "stopped", "exitstatus": "OK"}}) + if path.endswith("/cluster/ha/resources") and request.method == "POST": + return httpx.Response(200, json={"data": "UPID:ha"}) + if path.endswith("/nodes/fake-node-01/tasks/UPID:ha/status"): + return httpx.Response(200, json={"data": {"status": "stopped", "exitstatus": "OK"}}) + raise AssertionError(f"Unexpected request: {request.method} {request.url}") + + client = ProxmoxApiClient(build_settings(), transport=httpx.MockTransport(handler)) + client._ticket = "ticket" + client._csrf_token = "csrf" + client._client.cookies.set("PVEAuthCookie", "ticket") + + config = VmConfig() + config.general.node = "fake-node-01" + config.general.vmid = 123 + config.general.name = "demo" + config.general.ha_enabled = True + config.os.storage = "cephfs" + config.os.iso = "cephfs:iso/nixos.iso" + + try: + client.create_vm(config, start_after_create=False) + finally: + client.close() + + ha_request = next( + content + for method, path, content in requests + if method == "POST" and path.endswith("/cluster/ha/resources") + ) + payload = parse_qs(ha_request.decode()) + + assert payload["state"] == ["stopped"] + assert not any( + method == "POST" and path.endswith("/nodes/fake-node-01/qemu/123/status/start") + for method, path, _ in requests + ) diff --git a/tests/test_settings.py b/tests/test_settings.py new file mode 100644 index 0000000..98c5af3 --- /dev/null +++ b/tests/test_settings.py @@ -0,0 +1,56 @@ +import pytest + +from pve_vm_setup.errors import SettingsError +from pve_vm_setup.settings import AppSettings + + +def test_settings_load_defaults_and_normalize_api_base() -> None: + settings = AppSettings.from_env( + { + "PROXMOX_URL": "https://proxmox.example.invalid:8006/", + "PROXMOX_USER": "root", + "PROXMOX_PASSWORD": "secret", + "PROXMOX_REALM": "pam", + "PROXMOX_API_BASE": "api2/json", + }, + load_dotenv_file=False, + ) + + assert settings.proxmox_url == "https://proxmox.example.invalid:8006" + assert settings.proxmox_api_base == "/api2/json" + assert settings.proxmox_verify_tls is False + assert settings.request_timeout_seconds == 15 + assert settings.effective_username == "root@pam" + assert settings.safety_policy.prevent_create is False + assert settings.safety_policy.enable_test_mode is False + assert settings.safety_policy.test_tag == "codex-e2e" + assert settings.safety_policy.test_vm_name_prefix == "codex-e2e-" + + +def test_settings_reject_test_mode_without_required_scope() -> None: + with pytest.raises(SettingsError): + AppSettings.from_env( + { + "PROXMOX_ENABLE_TEST_MODE": "true", + }, + load_dotenv_file=False, + ) + + +def test_settings_allow_create_without_test_scope_when_test_mode_disabled() -> None: + settings = AppSettings.from_env( + { + "PROXMOX_PREVENT_CREATE": "false", + }, + load_dotenv_file=False, + ) + + assert settings.safety_policy.allow_create is True + assert settings.safety_policy.enable_test_mode is False + + +def test_settings_allow_create_by_default_when_prevent_flag_is_unset() -> None: + settings = AppSettings.from_env({}, load_dotenv_file=False) + + assert settings.safety_policy.prevent_create is False + assert settings.safety_policy.allow_create is True diff --git a/uv.lock b/uv.lock new file mode 100644 index 0000000..c940681 --- /dev/null +++ b/uv.lock @@ -0,0 +1,317 @@ +version = 1 +revision = 3 +requires-python = ">=3.11" + +[[package]] +name = "anyio" +version = "4.12.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/96/f0/5eb65b2bb0d09ac6776f2eb54adee6abe8228ea05b20a5ad0e4945de8aac/anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703", size = 228685, upload-time = "2026-01-06T11:45:21.246Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" }, +] + +[[package]] +name = "certifi" +version = "2026.2.25" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/2d/7bf41579a8986e348fa033a31cdd0e4121114f6bce2457e8876010b092dd/certifi-2026.2.25.tar.gz", hash = "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7", size = 155029, upload-time = "2026-02-25T02:54:17.342Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/3c/c17fb3ca2d9c3acff52e30b309f538586f9f5b9c9cf454f3845fc9af4881/certifi-2026.2.25-py3-none-any.whl", hash = "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa", size = 153684, upload-time = "2026-02-25T02:54:15.766Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, +] + +[[package]] +name = "linkify-it-py" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "uc-micro-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2e/c9/06ea13676ef354f0af6169587ae292d3e2406e212876a413bf9eece4eb23/linkify_it_py-2.1.0.tar.gz", hash = "sha256:43360231720999c10e9328dc3691160e27a718e280673d444c38d7d3aaa3b98b", size = 29158, upload-time = "2026-03-01T07:48:47.683Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b4/de/88b3be5c31b22333b3ca2f6ff1de4e863d8fe45aaea7485f591970ec1d3e/linkify_it_py-2.1.0-py3-none-any.whl", hash = "sha256:0d252c1594ecba2ecedc444053db5d3a9b7ec1b0dd929c8f1d74dce89f86c05e", size = 19878, upload-time = "2026-03-01T07:48:46.098Z" }, +] + +[[package]] +name = "markdown-it-py" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, +] + +[package.optional-dependencies] +linkify = [ + { name = "linkify-it-py" }, +] +plugins = [ + { name = "mdit-py-plugins" }, +] + +[[package]] +name = "mdit-py-plugins" +version = "0.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b2/fd/a756d36c0bfba5f6e39a1cdbdbfdd448dc02692467d83816dff4592a1ebc/mdit_py_plugins-0.5.0.tar.gz", hash = "sha256:f4918cb50119f50446560513a8e311d574ff6aaed72606ddae6d35716fe809c6", size = 44655, upload-time = "2025-08-11T07:25:49.083Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/86/dd6e5db36df29e76c7a7699123569a4a18c1623ce68d826ed96c62643cae/mdit_py_plugins-0.5.0-py3-none-any.whl", hash = "sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f", size = 57205, upload-time = "2025-08-11T07:25:47.597Z" }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, +] + +[[package]] +name = "packaging" +version = "26.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/65/ee/299d360cdc32edc7d2cf530f3accf79c4fca01e96ffc950d8a52213bd8e4/packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4", size = 143416, upload-time = "2026-01-21T20:50:39.064Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.9.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/19/56/8d4c30c8a1d07013911a8fdbd8f89440ef9f08d07a1b50ab8ca8be5a20f9/platformdirs-4.9.4.tar.gz", hash = "sha256:1ec356301b7dc906d83f371c8f487070e99d3ccf9e501686456394622a01a934", size = 28737, upload-time = "2026-03-05T18:34:13.271Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/63/d7/97f7e3a6abb67d8080dd406fd4df842c2be0efaf712d1c899c32a075027c/platformdirs-4.9.4-py3-none-any.whl", hash = "sha256:68a9a4619a666ea6439f2ff250c12a853cd1cbd5158d258bd824a7df6be2f868", size = 21216, upload-time = "2026-03-05T18:34:12.172Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "pve-vm-setup" +version = "0.1.0" +source = { editable = "." } +dependencies = [ + { name = "httpx" }, + { name = "python-dotenv" }, + { name = "textual" }, +] + +[package.dev-dependencies] +dev = [ + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "httpx", specifier = ">=0.27,<0.29" }, + { name = "python-dotenv", specifier = ">=1.0,<2.0" }, + { name = "textual", specifier = ">=0.63,<0.90" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "pytest", specifier = ">=8.3,<9.0" }, + { name = "pytest-asyncio", specifier = ">=0.24,<1.0" }, + { name = "ruff", specifier = ">=0.9,<1.0" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "0.26.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8e/c4/453c52c659521066969523e87d85d54139bbd17b78f09532fb8eb8cdb58e/pytest_asyncio-0.26.0.tar.gz", hash = "sha256:c4df2a697648241ff39e7f0e4a73050b03f123f760673956cf0d72a4990e312f", size = 54156, upload-time = "2025-03-25T06:22:28.883Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/7f/338843f449ace853647ace35870874f69a764d251872ed1b4de9f234822c/pytest_asyncio-0.26.0-py3-none-any.whl", hash = "sha256:7b51ed894f4fbea1340262bdae5135797ebbe21d8638978e35d31c6d19f72fb0", size = 19694, upload-time = "2025-03-25T06:22:27.807Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/82/ed/0301aeeac3e5353ef3d94b6ec08bbcabd04a72018415dcb29e588514bba8/python_dotenv-1.2.2.tar.gz", hash = "sha256:2c371a91fbd7ba082c2c1dc1f8bf89ca22564a087c2c287cd9b662adde799cf3", size = 50135, upload-time = "2026-03-01T16:00:26.196Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0b/d7/1959b9648791274998a9c3526f6d0ec8fd2233e4d4acce81bbae76b44b2a/python_dotenv-1.2.2-py3-none-any.whl", hash = "sha256:1d8214789a24de455a8b8bd8ae6fe3c6b69a5e3d64aa8a8e5d68e694bbcb285a", size = 22101, upload-time = "2026-03-01T16:00:25.09Z" }, +] + +[[package]] +name = "rich" +version = "14.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b3/c6/f3b320c27991c46f43ee9d856302c70dc2d0fb2dba4842ff739d5f46b393/rich-14.3.3.tar.gz", hash = "sha256:b8daa0b9e4eef54dd8cf7c86c03713f53241884e814f4e2f5fb342fe520f639b", size = 230582, upload-time = "2026-02-19T17:23:12.474Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/25/b208c5683343959b670dc001595f2f3737e051da617f66c31f7c4fa93abc/rich-14.3.3-py3-none-any.whl", hash = "sha256:793431c1f8619afa7d3b52b2cdec859562b950ea0d4b6b505397612db8d5362d", size = 310458, upload-time = "2026-02-19T17:23:13.732Z" }, +] + +[[package]] +name = "ruff" +version = "0.15.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/77/9b/840e0039e65fcf12758adf684d2289024d6140cde9268cc59887dc55189c/ruff-0.15.5.tar.gz", hash = "sha256:7c3601d3b6d76dce18c5c824fc8d06f4eef33d6df0c21ec7799510cde0f159a2", size = 4574214, upload-time = "2026-03-05T20:06:34.946Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/20/5369c3ce21588c708bcbe517a8fbe1a8dfdb5dfd5137e14790b1da71612c/ruff-0.15.5-py3-none-linux_armv6l.whl", hash = "sha256:4ae44c42281f42e3b06b988e442d344a5b9b72450ff3c892e30d11b29a96a57c", size = 10478185, upload-time = "2026-03-05T20:06:29.093Z" }, + { url = "https://files.pythonhosted.org/packages/44/ed/e81dd668547da281e5dce710cf0bc60193f8d3d43833e8241d006720e42b/ruff-0.15.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6edd3792d408ebcf61adabc01822da687579a1a023f297618ac27a5b51ef0080", size = 10859201, upload-time = "2026-03-05T20:06:32.632Z" }, + { url = "https://files.pythonhosted.org/packages/c4/8f/533075f00aaf19b07c5cd6aa6e5d89424b06b3b3f4583bfa9c640a079059/ruff-0.15.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:89f463f7c8205a9f8dea9d658d59eff49db05f88f89cc3047fb1a02d9f344010", size = 10184752, upload-time = "2026-03-05T20:06:40.312Z" }, + { url = "https://files.pythonhosted.org/packages/66/0e/ba49e2c3fa0395b3152bad634c7432f7edfc509c133b8f4529053ff024fb/ruff-0.15.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba786a8295c6574c1116704cf0b9e6563de3432ac888d8f83685654fe528fd65", size = 10534857, upload-time = "2026-03-05T20:06:19.581Z" }, + { url = "https://files.pythonhosted.org/packages/59/71/39234440f27a226475a0659561adb0d784b4d247dfe7f43ffc12dd02e288/ruff-0.15.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fd4b801e57955fe9f02b31d20375ab3a5c4415f2e5105b79fb94cf2642c91440", size = 10309120, upload-time = "2026-03-05T20:06:00.435Z" }, + { url = "https://files.pythonhosted.org/packages/f5/87/4140aa86a93df032156982b726f4952aaec4a883bb98cb6ef73c347da253/ruff-0.15.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391f7c73388f3d8c11b794dbbc2959a5b5afe66642c142a6effa90b45f6f5204", size = 11047428, upload-time = "2026-03-05T20:05:51.867Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f7/4953e7e3287676f78fbe85e3a0ca414c5ca81237b7575bdadc00229ac240/ruff-0.15.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8dc18f30302e379fe1e998548b0f5e9f4dff907f52f73ad6da419ea9c19d66c8", size = 11914251, upload-time = "2026-03-05T20:06:22.887Z" }, + { url = "https://files.pythonhosted.org/packages/77/46/0f7c865c10cf896ccf5a939c3e84e1cfaeed608ff5249584799a74d33835/ruff-0.15.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1cc6e7f90087e2d27f98dc34ed1b3ab7c8f0d273cc5431415454e22c0bd2a681", size = 11333801, upload-time = "2026-03-05T20:05:57.168Z" }, + { url = "https://files.pythonhosted.org/packages/d3/01/a10fe54b653061585e655f5286c2662ebddb68831ed3eaebfb0eb08c0a16/ruff-0.15.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1cb7169f53c1ddb06e71a9aebd7e98fc0fea936b39afb36d8e86d36ecc2636a", size = 11206821, upload-time = "2026-03-05T20:06:03.441Z" }, + { url = "https://files.pythonhosted.org/packages/7a/0d/2132ceaf20c5e8699aa83da2706ecb5c5dcdf78b453f77edca7fb70f8a93/ruff-0.15.5-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:9b037924500a31ee17389b5c8c4d88874cc6ea8e42f12e9c61a3d754ff72f1ca", size = 11133326, upload-time = "2026-03-05T20:06:25.655Z" }, + { url = "https://files.pythonhosted.org/packages/72/cb/2e5259a7eb2a0f87c08c0fe5bf5825a1e4b90883a52685524596bfc93072/ruff-0.15.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:65bb414e5b4eadd95a8c1e4804f6772bbe8995889f203a01f77ddf2d790929dd", size = 10510820, upload-time = "2026-03-05T20:06:37.79Z" }, + { url = "https://files.pythonhosted.org/packages/ff/20/b67ce78f9e6c59ffbdb5b4503d0090e749b5f2d31b599b554698a80d861c/ruff-0.15.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d20aa469ae3b57033519c559e9bc9cd9e782842e39be05b50e852c7c981fa01d", size = 10302395, upload-time = "2026-03-05T20:05:54.504Z" }, + { url = "https://files.pythonhosted.org/packages/5f/e5/719f1acccd31b720d477751558ed74e9c88134adcc377e5e886af89d3072/ruff-0.15.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:15388dd28c9161cdb8eda68993533acc870aa4e646a0a277aa166de9ad5a8752", size = 10754069, upload-time = "2026-03-05T20:06:06.422Z" }, + { url = "https://files.pythonhosted.org/packages/c3/9c/d1db14469e32d98f3ca27079dbd30b7b44dbb5317d06ab36718dee3baf03/ruff-0.15.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b30da330cbd03bed0c21420b6b953158f60c74c54c5f4c1dabbdf3a57bf355d2", size = 11304315, upload-time = "2026-03-05T20:06:10.867Z" }, + { url = "https://files.pythonhosted.org/packages/28/3a/950367aee7c69027f4f422059227b290ed780366b6aecee5de5039d50fa8/ruff-0.15.5-py3-none-win32.whl", hash = "sha256:732e5ee1f98ba5b3679029989a06ca39a950cced52143a0ea82a2102cb592b74", size = 10551676, upload-time = "2026-03-05T20:06:13.705Z" }, + { url = "https://files.pythonhosted.org/packages/b8/00/bf077a505b4e649bdd3c47ff8ec967735ce2544c8e4a43aba42ee9bf935d/ruff-0.15.5-py3-none-win_amd64.whl", hash = "sha256:821d41c5fa9e19117616c35eaa3f4b75046ec76c65e7ae20a333e9a8696bc7fe", size = 11678972, upload-time = "2026-03-05T20:06:45.379Z" }, + { url = "https://files.pythonhosted.org/packages/fe/4e/cd76eca6db6115604b7626668e891c9dd03330384082e33662fb0f113614/ruff-0.15.5-py3-none-win_arm64.whl", hash = "sha256:b498d1c60d2fe5c10c45ec3f698901065772730b411f164ae270bb6bfcc4740b", size = 10965572, upload-time = "2026-03-05T20:06:16.984Z" }, +] + +[[package]] +name = "textual" +version = "0.89.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py", extra = ["linkify", "plugins"] }, + { name = "platformdirs" }, + { name = "rich" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/cb/b3ff0e45d812997a527cb581a4cd602f0b28793450aa26201969fd6ce42c/textual-0.89.1.tar.gz", hash = "sha256:66befe80e2bca5a8c876cd8ceeaf01752267b6b1dc1d0f73071f1f1e15d90cc8", size = 1517074, upload-time = "2024-12-05T15:17:12.903Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/02/650adf160774a43c206011d23283d568d2dbcd43cf7b40dff0a880885b47/textual-0.89.1-py3-none-any.whl", hash = "sha256:0a5d214df6e951b4a2c421e13d0b608482882471c1e34ea74a3631adede8054f", size = 656019, upload-time = "2024-12-05T15:17:10.37Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "uc-micro-py" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/78/67/9a363818028526e2d4579334460df777115bdec1bb77c08f9db88f6389f2/uc_micro_py-2.0.0.tar.gz", hash = "sha256:c53691e495c8db60e16ffc4861a35469b0ba0821fe409a8a7a0a71864d33a811", size = 6611, upload-time = "2026-03-01T06:31:27.526Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/73/d21edf5b204d1467e06500080a50f79d49ef2b997c79123a536d4a17d97c/uc_micro_py-2.0.0-py3-none-any.whl", hash = "sha256:3603a3859af53e5a39bc7677713c78ea6589ff188d70f4fee165db88e22b242c", size = 6383, upload-time = "2026-03-01T06:31:26.257Z" }, +]