Initial commit — OpenClaw VM infrastructure

- ansible/: VM provisioning playbooks and roles
  - provision-vm.yml: create KVM VM from Ubuntu cloud image
  - install.yml: install OpenClaw on guest (upstream)
  - customize.yml: swappiness, virtiofs fstab, linger
  - roles/vm/: libvirt domain XML, cloud-init templates
  - inventory.yml + host_vars/zap.yml: zap instance config
- backup-openclaw-vm.sh: daily rsync + MinIO upload
- restore-openclaw-vm.sh: full redeploy from scratch
- README.md: full operational documentation

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
William Valentin
2026-03-12 12:18:31 -07:00
commit aceeb7b542
71 changed files with 7840 additions and 0 deletions

View File

@@ -0,0 +1,34 @@
---
# VM provisioning defaults — override in host_vars/<instance>.yml
# Libvirt connection URI
vm_libvirt_uri: qemu:///system
# Cloud image
vm_ubuntu_release: noble
vm_cloud_image_url: "https://cloud-images.ubuntu.com/{{ vm_ubuntu_release }}/current/{{ vm_ubuntu_release }}-server-cloudimg-amd64.img"
vm_cloud_image_cache: "/var/lib/libvirt/images/{{ vm_ubuntu_release }}-cloudimg-amd64.img"
# VM identity
vm_domain: "" # full libvirt domain name, e.g. "zap [claw]"
vm_hostname: "" # guest hostname
vm_disk_path: "" # path to qcow2 disk image
# Resources
vm_memory_mib: 3072
vm_vcpus: 2
vm_disk_size: "60G"
# Network
vm_mac: "" # MAC address, e.g. "52:54:00:01:00:71"
vm_ip: "" # static IP for DHCP reservation
vm_network: default
# virtiofs share (host → guest)
vm_virtiofs_source: "" # host path
vm_virtiofs_tag: "" # mount tag used inside guest
# OVMF firmware (Arch/CachyOS paths)
vm_ovmf_code: /usr/share/edk2/x64/OVMF_CODE.secboot.4m.fd
vm_ovmf_vars_template: /usr/share/edk2/x64/OVMF_VARS.4m.fd
vm_ovmf_vars_dir: /var/lib/libvirt/qemu/nvram

View File

@@ -0,0 +1,149 @@
---
# Provision a KVM/libvirt VM from an Ubuntu cloud image.
# Runs on the hypervisor host (localhost).
- name: Validate required variables
ansible.builtin.assert:
that:
- vm_domain | length > 0
- vm_hostname | length > 0
- vm_disk_path | length > 0
- vm_mac | length > 0
- vm_ip | length > 0
fail_msg: "vm_domain, vm_hostname, vm_disk_path, vm_mac, and vm_ip must all be set in host_vars"
- name: Install host dependencies
ansible.builtin.package:
name:
- qemu-img
- genisoimage
- libvirt-utils
state: present
# ── Cloud image ────────────────────────────────────────────────────────────
- name: Check if cloud image cache exists
ansible.builtin.stat:
path: "{{ vm_cloud_image_cache }}"
register: cloud_image_stat
- name: Download Ubuntu cloud image
ansible.builtin.get_url:
url: "{{ vm_cloud_image_url }}"
dest: "{{ vm_cloud_image_cache }}"
mode: "0644"
timeout: 300
when: not cloud_image_stat.stat.exists
# ── Disk image ─────────────────────────────────────────────────────────────
- name: Check if VM disk already exists
ansible.builtin.stat:
path: "{{ vm_disk_path }}"
register: vm_disk_stat
- name: Create VM disk from cloud image
ansible.builtin.command:
cmd: >
qemu-img create -f qcow2 -F qcow2
-b {{ vm_cloud_image_cache }}
{{ vm_disk_path }} {{ vm_disk_size }}
creates: "{{ vm_disk_path }}"
when: not vm_disk_stat.stat.exists
# ── Cloud-init seed ISO ────────────────────────────────────────────────────
- name: Create cloud-init temp directory
ansible.builtin.tempfile:
state: directory
suffix: cloud-init
register: cloud_init_dir
- name: Write cloud-init user-data
ansible.builtin.template:
src: cloud-init-user-data.j2
dest: "{{ cloud_init_dir.path }}/user-data"
mode: "0644"
vars:
vm_ssh_keys: "{{ openclaw_ssh_keys | default([]) }}"
- name: Write cloud-init meta-data
ansible.builtin.template:
src: cloud-init-meta-data.j2
dest: "{{ cloud_init_dir.path }}/meta-data"
mode: "0644"
- name: Set seed ISO path fact
ansible.builtin.set_fact:
vm_seed_iso: "/var/lib/libvirt/images/{{ vm_hostname }}-seed.iso"
- name: Create cloud-init seed ISO
ansible.builtin.command:
cmd: >
genisoimage -output {{ vm_seed_iso }}
-volid cidata -joliet -rock
{{ cloud_init_dir.path }}/user-data
{{ cloud_init_dir.path }}/meta-data
changed_when: true
- name: Clean up cloud-init temp directory
ansible.builtin.file:
path: "{{ cloud_init_dir.path }}"
state: absent
# ── VM definition ──────────────────────────────────────────────────────────
- name: Check if VM domain already exists
community.libvirt.virt:
command: list_vms
uri: "{{ vm_libvirt_uri }}"
register: existing_vms
- name: Define VM from XML template
community.libvirt.virt:
command: define
xml: "{{ lookup('template', 'domain.xml.j2') }}"
uri: "{{ vm_libvirt_uri }}"
when: vm_domain not in existing_vms.list_vms
# ── Network ────────────────────────────────────────────────────────────────
- name: Add static DHCP reservation
ansible.builtin.command:
cmd: >
virsh -c {{ vm_libvirt_uri }} net-update {{ vm_network }}
add ip-dhcp-host
'<host mac="{{ vm_mac }}" name="{{ vm_hostname }}" ip="{{ vm_ip }}"/>'
--live --config
register: dhcp_result
failed_when:
- dhcp_result.rc != 0
- "'already exists' not in dhcp_result.stderr"
changed_when: dhcp_result.rc == 0
# ── Autostart & boot ───────────────────────────────────────────────────────
- name: Enable autostart
community.libvirt.virt:
name: "{{ vm_domain }}"
autostart: true
uri: "{{ vm_libvirt_uri }}"
- name: Start VM
community.libvirt.virt:
name: "{{ vm_domain }}"
state: running
uri: "{{ vm_libvirt_uri }}"
- name: Wait for SSH to become available
ansible.builtin.wait_for:
host: "{{ vm_ip }}"
port: 22
delay: 10
timeout: 180
state: started
delegate_to: localhost
- name: VM is ready
ansible.builtin.debug:
msg: "VM '{{ vm_domain }}' is up at {{ vm_ip }}. Run install.yml + customize.yml to provision the guest."

View File

@@ -0,0 +1,2 @@
instance-id: {{ vm_hostname }}-{{ vm_mac | replace(':', '') }}
local-hostname: {{ vm_hostname }}

View File

@@ -0,0 +1,28 @@
#cloud-config
hostname: {{ vm_hostname }}
manage_etc_hosts: true
# Enable root SSH with key from host
disable_root: false
ssh_pwauth: false
users:
- name: root
ssh_authorized_keys:
{% for key in vm_ssh_keys | default([]) %}
- {{ key }}
{% endfor %}
# Grow root partition to fill disk
growpart:
mode: auto
devices: [/]
resize_rootfs: true
# Ensure SSH is running
packages:
- qemu-guest-agent
runcmd:
- systemctl enable --now qemu-guest-agent

View File

@@ -0,0 +1,123 @@
<domain type='kvm'>
<name>{{ vm_domain }}</name>
<metadata>
<libosinfo:libosinfo xmlns:libosinfo="http://libosinfo.org/xmlns/libvirt/domain/1.0">
<libosinfo:os id="http://ubuntu.com/ubuntu/24.04"/>
</libosinfo:libosinfo>
</metadata>
<memory unit='KiB'>{{ vm_memory_mib * 1024 }}</memory>
<currentMemory unit='KiB'>{{ vm_memory_mib * 1024 }}</currentMemory>
<memoryBacking>
<source type='memfd'/>
<access mode='shared'/>
</memoryBacking>
<vcpu placement='static'>{{ vm_vcpus }}</vcpu>
<os firmware='efi'>
<type arch='x86_64' machine='pc-q35-10.2'>hvm</type>
<firmware>
<feature enabled='no' name='enrolled-keys'/>
<feature enabled='yes' name='secure-boot'/>
</firmware>
<loader readonly='yes' secure='yes' type='pflash' format='raw'>{{ vm_ovmf_code }}</loader>
<nvram template='{{ vm_ovmf_vars_template }}' templateFormat='raw' format='raw'>{{ vm_ovmf_vars_dir }}/{{ vm_domain }}_VARS.fd</nvram>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<vmport state='off'/>
<smm state='on'/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<clock offset='utc'>
<timer name='rtc' tickpolicy='catchup'/>
<timer name='pit' tickpolicy='delay'/>
<timer name='hpet' present='no'/>
</clock>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<pm>
<suspend-to-mem enabled='no'/>
<suspend-to-disk enabled='no'/>
</pm>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<!-- Primary disk -->
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' discard='unmap'/>
<source file='{{ vm_disk_path }}'/>
<target dev='vda' bus='virtio'/>
</disk>
<!-- Cloud-init seed (removed after first boot) -->
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='{{ vm_seed_iso }}'/>
<target dev='sda' bus='sata'/>
<readonly/>
</disk>
<!-- virtio-serial for qemu-guest-agent -->
<controller type='virtio-serial' index='0'/>
<!-- Network -->
<interface type='network'>
<mac address='{{ vm_mac }}'/>
<source network='{{ vm_network }}'/>
<model type='virtio'/>
</interface>
<!-- Serial console -->
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<!-- qemu-guest-agent channel -->
<channel type='unix'>
<target type='virtio' name='org.qemu.guest_agent.0'/>
</channel>
{% if vm_virtiofs_source and vm_virtiofs_tag %}
<!-- virtiofs host share -->
<filesystem type='mount' accessmode='passthrough'>
<driver type='virtiofs'/>
<source dir='{{ vm_virtiofs_source }}'/>
<target dir='{{ vm_virtiofs_tag }}'/>
</filesystem>
{% endif %}
<!-- TPM 2.0 -->
<tpm model='tpm-crb'>
<backend type='emulator' version='2.0'/>
</tpm>
<!-- Watchdog -->
<watchdog model='itco' action='reset'/>
<!-- Memory balloon -->
<memballoon model='virtio'>
<stats period='5'/>
</memballoon>
<!-- RNG -->
<rng model='virtio'>
<backend model='random'>/dev/urandom</backend>
</rng>
<!-- SPICE (for virt-manager) -->
<graphics type='spice' autoport='yes' listen='127.0.0.1'>
<listen type='address' address='127.0.0.1'/>
<image compression='off'/>
</graphics>
<video>
<model type='virtio' heads='1' primary='yes'/>
</video>
</devices>
</domain>