diff --git a/flake.lock b/flake.lock index 68e69f0..275a1c5 100644 --- a/flake.lock +++ b/flake.lock @@ -468,6 +468,22 @@ "type": "github" } }, + "nvim": { + "flake": false, + "locked": { + "lastModified": 1771940146, + "narHash": "sha256-z52fdPOWd3hUKKd9IojZ6rFYAgQrdrw/lNKFVdO2jf8=", + "ref": "refs/heads/main", + "rev": "058c9c730d8d60f482d97b61702561e1ea978847", + "revCount": 91, + "type": "git", + "url": "https://git.janezic.dev/janezicmatej/nvim.git" + }, + "original": { + "type": "git", + "url": "https://git.janezic.dev/janezicmatej/nvim.git" + } + }, "pre-commit": { "inputs": { "flake-compat": "flake-compat", @@ -503,6 +519,7 @@ "nixpkgs": "nixpkgs_3", "nixpkgs-master": "nixpkgs-master", "nixpkgs-unstable": "nixpkgs-unstable", + "nvim": "nvim", "stylix": "stylix" } }, diff --git a/flake.nix b/flake.nix index b3358a2..8f8b152 100644 --- a/flake.nix +++ b/flake.nix @@ -10,10 +10,10 @@ # url = "git+https://git.janezic.dev/janezicmatej/.dotfiles.git"; # flake = false; # }; - # nvim = { - # url = "git+https://git.janezic.dev/janezicmatej/nvim.git?ref=rewrite"; - # flake = false; - # }; + nvim = { + url = "git+https://git.janezic.dev/janezicmatej/nvim.git"; + flake = false; + }; nixos-hardware.url = "github:NixOS/nixos-hardware/master"; @@ -82,6 +82,11 @@ iso = mkHost "iso" { system = "x86_64-linux"; }; + + ephvm = mkHost "ephvm" { + system = "x86_64-linux"; + user = "matej"; + }; }; nixosModules = import ./modules/nixos { @@ -123,6 +128,7 @@ packages = [ pkgs.pre-commit pkgs.statix + pkgs.qemu ]; }; } diff --git a/hosts/ephvm/configuration.nix b/hosts/ephvm/configuration.nix new file mode 100644 index 0000000..9b1d450 --- /dev/null +++ b/hosts/ephvm/configuration.nix @@ -0,0 +1,69 @@ +{ + pkgs, + lib, + inputs, + config, + ... +}: +{ + networking.hostName = "ephvm"; + + profiles.base.enable = true; + + vm-guest = { + enable = true; + headless = true; + }; + + vm-9p-automount = { + enable = true; + user = "matej"; + }; + + localisation = { + timeZone = "UTC"; + defaultLocale = "en_US.UTF-8"; + }; + + virtualisation.docker = { + enable = true; + logDriver = "json-file"; + }; + + # TODO:(@janezicmatej) move neovim dotfiles wiring to a cleaner place + home-manager.users.matej = { + neovim.dotfiles = inputs.nvim; + }; + + # writable claude config via 9p + fileSystems."/home/matej/.claude" = { + device = "claude"; + fsType = "9p"; + options = [ + "trans=virtio" + "version=9p2000.L" + "nofail" + "x-systemd.automount" + ]; + }; + + # .claude.json passed via qemu fw_cfg + boot.kernelModules = [ "qemu_fw_cfg" ]; + systemd.services.claude-json = { + after = [ "systemd-modules-load.service" ]; + wants = [ "systemd-modules-load.service" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + Type = "oneshot"; + RemainAfterExit = true; + ExecStart = pkgs.writeShellScript "claude-json" '' + src="/sys/firmware/qemu_fw_cfg/by_name/opt/claude.json/raw" + [ -f "$src" ] || exit 0 + cp "$src" /home/matej/.claude.json + chown matej:users /home/matej/.claude.json + ''; + }; + }; + + system.stateVersion = "25.11"; +} diff --git a/hosts/ephvm/hardware-configuration.nix b/hosts/ephvm/hardware-configuration.nix new file mode 100644 index 0000000..2050d4f --- /dev/null +++ b/hosts/ephvm/hardware-configuration.nix @@ -0,0 +1,20 @@ +{ + lib, + pkgs, + modulesPath, + ... +}: +{ + imports = [ + (modulesPath + "/profiles/qemu-guest.nix") + ]; + + fileSystems."/" = { + device = "/dev/disk/by-label/nixos"; + autoResize = true; + fsType = "ext4"; + }; + + # image.modules (disk-image.nix) overrides boot loader per variant + boot.loader.grub.device = lib.mkDefault "/dev/vda"; +} diff --git a/justfile b/justfile index cd45777..5069614 100644 --- a/justfile +++ b/justfile @@ -33,6 +33,18 @@ build: iso: nixos-rebuild build-image --image-variant iso-installer --flake .#iso +# build ephemeral VM image +ephvm-build: + nixos-rebuild build-image --image-variant qemu --flake .#ephvm + +# run ephemeral VM +ephvm-run *ARGS: + bash scripts/ephvm-run.sh $(find -L result -name '*.qcow2' | head -1) {{ARGS}} + +# ssh into running ephemeral VM +ephvm-ssh port="2222": + ssh -p {{port}} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null matej@localhost + # garbage collect old generations clean: sudo nix-collect-garbage $(nix eval --raw -f ./nix.nix nix.gc.options) diff --git a/modules/nixos/vm-9p-automount.nix b/modules/nixos/vm-9p-automount.nix new file mode 100644 index 0000000..5e53364 --- /dev/null +++ b/modules/nixos/vm-9p-automount.nix @@ -0,0 +1,74 @@ +{ + pkgs, + lib, + config, + ... +}: +let + inherit (config.vm-9p-automount) user; + inherit (config.users.users.${user}) home group; +in +{ + options = { + vm-9p-automount = { + enable = lib.mkEnableOption "auto-discover and mount 9p shares"; + + user = lib.mkOption { + type = lib.types.str; + description = "user to own the mount points"; + }; + + prefix = lib.mkOption { + type = lib.types.str; + default = "mount_"; + description = "9p mount tag prefix to match"; + }; + + basePath = lib.mkOption { + type = lib.types.str; + default = "${home}/mnt"; + description = "directory to mount shares under"; + }; + }; + }; + + config = lib.mkIf config.vm-9p-automount.enable { + systemd.services.vm-9p-automount = { + description = "Auto-discover and mount 9p shares"; + after = [ + "local-fs.target" + "nss-user-lookup.target" + "systemd-modules-load.service" + ]; + wants = [ "systemd-modules-load.service" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + Type = "oneshot"; + RemainAfterExit = true; + ExecStart = pkgs.writeShellScript "vm-9p-automount" '' + BASE="${config.vm-9p-automount.basePath}" + PREFIX="${config.vm-9p-automount.prefix}" + mkdir -p "$BASE" + chown ${user}:${group} "$BASE" + + for tagfile in $(find /sys/devices -name mount_tag 2>/dev/null); do + [ -f "$tagfile" ] || continue + tag=$(tr -d '\0' < "$tagfile") + + case "$tag" in + "$PREFIX"*) ;; + *) continue ;; + esac + + name="''${tag#"$PREFIX"}" + target="$BASE/$name" + + mkdir -p "$target" + ${pkgs.util-linux}/bin/mount -t 9p "$tag" "$target" \ + -o trans=virtio,version=9p2000.L || continue + done + ''; + }; + }; + }; +} diff --git a/modules/nixos/vm-guest.nix b/modules/nixos/vm-guest.nix new file mode 100644 index 0000000..d259a03 --- /dev/null +++ b/modules/nixos/vm-guest.nix @@ -0,0 +1,49 @@ +{ + pkgs, + lib, + config, + ... +}: +{ + options = { + vm-guest = { + enable = lib.mkEnableOption "VM guest configuration"; + headless = lib.mkOption { + type = lib.types.bool; + default = false; + description = "run without display, serial console only"; + }; + }; + }; + + config = lib.mkIf config.vm-guest.enable { + services.qemuGuest.enable = true; + services.spice-vdagentd.enable = lib.mkIf (!config.vm-guest.headless) true; + + boot.kernelParams = lib.mkIf config.vm-guest.headless [ "console=ttyS0,115200" ]; + + # 9p for host file mounting + boot.initrd.availableKernelModules = [ + "9p" + "9pnet_virtio" + ]; + boot.kernelModules = [ + "9p" + "9pnet_virtio" + ]; + + networking = { + useDHCP = true; + firewall.allowedTCPPorts = [ 22 ]; + }; + + security.sudo.wheelNeedsPassword = false; + + environment.systemPackages = with pkgs; [ + curl + wget + htop + sshfs + ]; + }; +} diff --git a/scripts/ephvm-run.sh b/scripts/ephvm-run.sh new file mode 100755 index 0000000..5c254ce --- /dev/null +++ b/scripts/ephvm-run.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +set -euo pipefail + +SSH_PORT=2222 +MEMORY=8G +CPUS=4 +MOUNTS=() +CLAUDE_DIR="" +CLAUDE_JSON="" +IMAGE="" + +usage() { + cat < [options] + +Options: + --mount Mount host directory into VM (repeatable) + --claude Mount claude config dir writable into VM + --claude-json Copy claude.json into mounted claude dir + --memory VM memory (default: 8G) + --cpus VM CPUs (default: 4) + --ssh-port SSH port forward (default: 2222) +EOF + exit 1 +} + +[ "${1:-}" ] || usage + +IMAGE="$1" +shift + +while [ $# -gt 0 ]; do + case "$1" in + --mount) MOUNTS+=("$2"); shift 2 ;; + --claude) CLAUDE_DIR="$2"; shift 2 ;; + --claude-json) CLAUDE_JSON="$2"; shift 2 ;; + --memory) MEMORY="$2"; shift 2 ;; + --cpus) CPUS="$2"; shift 2 ;; + --ssh-port) SSH_PORT="$2"; shift 2 ;; + *) echo "unknown option: $1"; usage ;; + esac +done + +if [ ! -f "$IMAGE" ]; then + echo "error: image not found: $IMAGE" + exit 1 +fi + +ACCEL="tcg" +[ -r /dev/kvm ] && ACCEL="kvm" + +QEMU_ARGS=( + qemu-system-x86_64 + -accel "$ACCEL" + -m "$MEMORY" + -smp "$CPUS" + -drive "file=$IMAGE,format=qcow2,snapshot=on" + -nic "user,hostfwd=tcp::${SSH_PORT}-:22" + -nographic +) + +if [ "$ACCEL" != "tcg" ]; then + QEMU_ARGS+=(-cpu host) +fi + +FS_ID=0 +for mount_path in "${MOUNTS[@]}"; do + mount_path=$(realpath "$mount_path") + name=$(basename "$mount_path") + QEMU_ARGS+=( + -virtfs "local,path=$mount_path,mount_tag=mount_$name,security_model=none,id=fs${FS_ID}" + ) + FS_ID=$((FS_ID + 1)) +done + +if [ -n "$CLAUDE_DIR" ]; then + CLAUDE_DIR=$(realpath "$CLAUDE_DIR") + QEMU_ARGS+=( + -virtfs "local,path=$CLAUDE_DIR,mount_tag=claude,security_model=none,id=fs${FS_ID}" + ) +fi + +if [ -n "$CLAUDE_JSON" ]; then + QEMU_ARGS+=(-fw_cfg "name=opt/claude.json,file=$CLAUDE_JSON") +fi + +exec "${QEMU_ARGS[@]}"