Add orb and sun VMs with virtiofs swarm-common share

- Add orb (192.168.122.183) and sun (192.168.122.184) to inventory
- Create host_vars for orb and sun (fresh install, brew_packages: [])
- Add brew_packages to zap host_vars (gogcli, himalaya, kubernetes-cli, opencode)
- customize.yml: parameterize brew_packages via host_vars, add /mnt/swarm-common
  virtiofs+bindfs mount for all VMs, install bindfs, fix Homebrew install
- provision-vm.yml: remove become requirement; use virsh vol commands for all
  disk/image operations (no sudo needed)
- roles/vm/tasks/main.yml: rewrite disk provisioning to use virsh vol-create-as
  and vol-upload; fix vol name quoting for names with spaces; use qcow2 backing
- domain.xml.j2: always include swarm-common virtiofs share; make main share
  conditional on vm_virtiofs_source/tag

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
William Valentin
2026-03-13 11:06:08 -07:00
parent c8aaa40cd8
commit ea5e2c2ef3
8 changed files with 193 additions and 48 deletions

21
ansible/host_vars/orb.yml Normal file
View File

@@ -0,0 +1,21 @@
---
# Host-specific vars for orb [claw]
# ── VM provisioning ────────────────────────────────────────────────────────
vm_domain: "orb [claw]"
vm_hostname: orb
vm_memory_mib: 2048
vm_vcpus: 2
vm_disk_path: "/var/lib/libvirt/images/orb [claw].qcow2"
vm_disk_size: "60G"
vm_mac: "52:54:00:e2:bb:4f"
vm_ip: "192.168.122.183"
vm_network: default
# ── OpenClaw guest provisioning ────────────────────────────────────────────
openclaw_install_mode: release
brew_packages: []
openclaw_ssh_keys:
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC6l6Z3CBr0gU6tVMddCW1vjYk5CK8TExp/AViiUEJGADci/Dk26XnfmG0XjexIjD7L4a/V5hIh+0HEIwM146vcfRnB1lXty5BV6Rhum7J3qp7xXBPghqCC9tujc5KiMQZyCsLICFyhHOdqRoquUqbFeYL7cT+Vk+J+HSGXmXZvJGGSpW7b94wkGADkSTEn2u8FRpynU3vZ6KIIiBG+oreWl7LcBhlztZELlwiRx66HgW8t/DhJlL6mhfKJ6C0Sg7s98SwvsT+jJxsaip69SlXvAJhrun2oDvS+X+a/2u9LD6w8GazmkX6m626SqGEGdw21l+oJQf+2LphQ3h8gIScNg5LmhaxXFqo718nmKEi9aE1MNGU4HWsNLJGxXvPTZqTreyS81yKMiqSZKZ2WzwaCQO2VeRmHyuDgrlGUGcU9DFi9pEkkjiChp1PE7XNbIwTurUCC19WUHcijY1K/ZH9Ku8GXgWf0109QZpJKc/04dRlYNBgUBL7dCTxbC/UjIdDMmgdRmPZ4oDUqUyBMsIEu8Wsx2snaUh4E2i5m0Vrd4Yy0+Eiu5YZBZt2IsljFE+c0KGSZMOyoCJksmqlTfvC0Ejt/bVsNhbZDgVB2K3sxRYa9Sa6I9nlCm7bSZC94vILVKkDsivmi+sj9dTV8mlJhA/yaGsBOokbjYYAa2cgQyw== will@squareffect.com"

21
ansible/host_vars/sun.yml Normal file
View File

@@ -0,0 +1,21 @@
---
# Host-specific vars for sun [claw]
# ── VM provisioning ────────────────────────────────────────────────────────
vm_domain: "sun [claw]"
vm_hostname: sun
vm_memory_mib: 2048
vm_vcpus: 2
vm_disk_path: "/var/lib/libvirt/images/sun [claw].qcow2"
vm_disk_size: "60G"
vm_mac: "52:54:00:6b:8b:f5"
vm_ip: "192.168.122.184"
vm_network: default
# ── OpenClaw guest provisioning ────────────────────────────────────────────
openclaw_install_mode: release
brew_packages: []
openclaw_ssh_keys:
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC6l6Z3CBr0gU6tVMddCW1vjYk5CK8TExp/AViiUEJGADci/Dk26XnfmG0XjexIjD7L4a/V5hIh+0HEIwM146vcfRnB1lXty5BV6Rhum7J3qp7xXBPghqCC9tujc5KiMQZyCsLICFyhHOdqRoquUqbFeYL7cT+Vk+J+HSGXmXZvJGGSpW7b94wkGADkSTEn2u8FRpynU3vZ6KIIiBG+oreWl7LcBhlztZELlwiRx66HgW8t/DhJlL6mhfKJ6C0Sg7s98SwvsT+jJxsaip69SlXvAJhrun2oDvS+X+a/2u9LD6w8GazmkX6m626SqGEGdw21l+oJQf+2LphQ3h8gIScNg5LmhaxXFqo718nmKEi9aE1MNGU4HWsNLJGxXvPTZqTreyS81yKMiqSZKZ2WzwaCQO2VeRmHyuDgrlGUGcU9DFi9pEkkjiChp1PE7XNbIwTurUCC19WUHcijY1K/ZH9Ku8GXgWf0109QZpJKc/04dRlYNBgUBL7dCTxbC/UjIdDMmgdRmPZ4oDUqUyBMsIEu8Wsx2snaUh4E2i5m0Vrd4Yy0+Eiu5YZBZt2IsljFE+c0KGSZMOyoCJksmqlTfvC0Ejt/bVsNhbZDgVB2K3sxRYa9Sa6I9nlCm7bSZC94vILVKkDsivmi+sj9dTV8mlJhA/yaGsBOokbjYYAa2cgQyw== will@squareffect.com"

View File

@@ -17,5 +17,11 @@ vm_virtiofs_tag: swarm
# ── OpenClaw guest provisioning ──────────────────────────────────────────── # ── OpenClaw guest provisioning ────────────────────────────────────────────
openclaw_install_mode: release openclaw_install_mode: release
brew_packages:
- gogcli
- himalaya
- kubernetes-cli
- opencode
openclaw_ssh_keys: openclaw_ssh_keys:
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC6l6Z3CBr0gU6tVMddCW1vjYk5CK8TExp/AViiUEJGADci/Dk26XnfmG0XjexIjD7L4a/V5hIh+0HEIwM146vcfRnB1lXty5BV6Rhum7J3qp7xXBPghqCC9tujc5KiMQZyCsLICFyhHOdqRoquUqbFeYL7cT+Vk+J+HSGXmXZvJGGSpW7b94wkGADkSTEn2u8FRpynU3vZ6KIIiBG+oreWl7LcBhlztZELlwiRx66HgW8t/DhJlL6mhfKJ6C0Sg7s98SwvsT+jJxsaip69SlXvAJhrun2oDvS+X+a/2u9LD6w8GazmkX6m626SqGEGdw21l+oJQf+2LphQ3h8gIScNg5LmhaxXFqo718nmKEi9aE1MNGU4HWsNLJGxXvPTZqTreyS81yKMiqSZKZ2WzwaCQO2VeRmHyuDgrlGUGcU9DFi9pEkkjiChp1PE7XNbIwTurUCC19WUHcijY1K/ZH9Ku8GXgWf0109QZpJKc/04dRlYNBgUBL7dCTxbC/UjIdDMmgdRmPZ4oDUqUyBMsIEu8Wsx2snaUh4E2i5m0Vrd4Yy0+Eiu5YZBZt2IsljFE+c0KGSZMOyoCJksmqlTfvC0Ejt/bVsNhbZDgVB2K3sxRYa9Sa6I9nlCm7bSZC94vILVKkDsivmi+sj9dTV8mlJhA/yaGsBOokbjYYAa2cgQyw== will@squareffect.com" - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC6l6Z3CBr0gU6tVMddCW1vjYk5CK8TExp/AViiUEJGADci/Dk26XnfmG0XjexIjD7L4a/V5hIh+0HEIwM146vcfRnB1lXty5BV6Rhum7J3qp7xXBPghqCC9tujc5KiMQZyCsLICFyhHOdqRoquUqbFeYL7cT+Vk+J+HSGXmXZvJGGSpW7b94wkGADkSTEn2u8FRpynU3vZ6KIIiBG+oreWl7LcBhlztZELlwiRx66HgW8t/DhJlL6mhfKJ6C0Sg7s98SwvsT+jJxsaip69SlXvAJhrun2oDvS+X+a/2u9LD6w8GazmkX6m626SqGEGdw21l+oJQf+2LphQ3h8gIScNg5LmhaxXFqo718nmKEi9aE1MNGU4HWsNLJGxXvPTZqTreyS81yKMiqSZKZ2WzwaCQO2VeRmHyuDgrlGUGcU9DFi9pEkkjiChp1PE7XNbIwTurUCC19WUHcijY1K/ZH9Ku8GXgWf0109QZpJKc/04dRlYNBgUBL7dCTxbC/UjIdDMmgdRmPZ4oDUqUyBMsIEu8Wsx2snaUh4E2i5m0Vrd4Yy0+Eiu5YZBZt2IsljFE+c0KGSZMOyoCJksmqlTfvC0Ejt/bVsNhbZDgVB2K3sxRYa9Sa6I9nlCm7bSZC94vILVKkDsivmi+sj9dTV8mlJhA/yaGsBOokbjYYAa2cgQyw== will@squareffect.com"

View File

@@ -6,3 +6,11 @@ all:
ansible_host: 192.168.122.182 ansible_host: 192.168.122.182
ansible_user: root ansible_user: root
ansible_ssh_common_args: "-o StrictHostKeyChecking=no" ansible_ssh_common_args: "-o StrictHostKeyChecking=no"
orb:
ansible_host: 192.168.122.183
ansible_user: root
ansible_ssh_common_args: "-o StrictHostKeyChecking=no"
sun:
ansible_host: 192.168.122.184
ansible_user: root
ansible_ssh_common_args: "-o StrictHostKeyChecking=no"

View File

@@ -5,6 +5,9 @@
# Usage: # Usage:
# ansible-playbook -i inventory.yml playbooks/customize.yml # ansible-playbook -i inventory.yml playbooks/customize.yml
# ansible-playbook -i inventory.yml playbooks/customize.yml --limit zap # ansible-playbook -i inventory.yml playbooks/customize.yml --limit zap
#
# Per-host variables (set in host_vars/<name>.yml):
# brew_packages: [] # list of Homebrew packages to install
- name: OpenClaw VM customizations - name: OpenClaw VM customizations
hosts: openclaw_servers hosts: openclaw_servers
@@ -26,27 +29,50 @@
line: 'vm.swappiness=10' line: 'vm.swappiness=10'
state: present state: present
- name: Create virtiofs mount point # ── swarm-common virtiofs share ────────────────────────────────────────
# Host: ~/lab/swarm/swarm-common → Guest: /mnt/swarm-common
# Virtiofs is mounted raw to /mnt/swarm-common-raw, then bindfs remaps
# ownership to openclaw before presenting at /mnt/swarm-common.
- name: Create swarm-common raw virtiofs mount point
ansible.builtin.file: ansible.builtin.file:
path: /mnt/swarm path: /mnt/swarm-common-raw
state: directory state: directory
mode: "0755" mode: "0755"
- name: Mount virtiofs swarm share via fstab - name: Create swarm-common bindfs mount point
ansible.builtin.file:
path: /mnt/swarm-common
state: directory
mode: "0755"
- name: Install bindfs (for virtiofs ownership remapping)
ansible.builtin.apt:
name: bindfs
state: present
- name: Add swarm-common virtiofs entry to fstab
ansible.posix.mount: ansible.posix.mount:
path: /mnt/swarm path: /mnt/swarm-common-raw
src: swarm src: swarm-common
fstype: virtiofs fstype: virtiofs
opts: defaults opts: defaults
state: present state: present
# Note: actual mount requires reboot after VM config update
- name: Add swarm-common bindfs entry to fstab
ansible.posix.mount:
path: /mnt/swarm-common
src: "bindfs#/mnt/swarm-common-raw"
fstype: fuse
opts: "force-user=openclaw,force-group=openclaw,perms=a+rX,create-for-user=openclaw,create-for-group=openclaw"
state: present
- name: Ensure openclaw user lingering is enabled (for user systemd services) - name: Ensure openclaw user lingering is enabled (for user systemd services)
ansible.builtin.command: ansible.builtin.command:
cmd: loginctl enable-linger openclaw cmd: loginctl enable-linger openclaw
changed_when: false changed_when: false
# ── Homebrew ─────────────────────────────────────────────────────────────── # ── Homebrew ───────────────────────────────────────────────────────────
- name: Install Homebrew dependencies - name: Install Homebrew dependencies
ansible.builtin.apt: ansible.builtin.apt:
@@ -58,6 +84,14 @@
- git - git
state: present state: present
- name: Pre-create /home/linuxbrew owned by openclaw
ansible.builtin.file:
path: /home/linuxbrew
state: directory
owner: openclaw
group: openclaw
mode: "0755"
- name: Install Homebrew (as openclaw user) - name: Install Homebrew (as openclaw user)
ansible.builtin.shell: | ansible.builtin.shell: |
NONINTERACTIVE=1 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" NONINTERACTIVE=1 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
@@ -82,11 +116,7 @@
environment: environment:
HOME: /home/openclaw HOME: /home/openclaw
PATH: /home/linuxbrew/.linuxbrew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PATH: /home/linuxbrew/.linuxbrew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
loop: loop: "{{ brew_packages | default([]) }}"
- gogcli
- himalaya
- kubernetes-cli
- opencode
register: brew_install register: brew_install
changed_when: "'Installing' in brew_install.stdout" changed_when: "'Installing' in brew_install.stdout"

View File

@@ -21,7 +21,7 @@
- name: Provision OpenClaw VM - name: Provision OpenClaw VM
hosts: openclaw_servers hosts: openclaw_servers
connection: local connection: local
become: true become: false
vars: vars:
ansible_python_interpreter: /usr/bin/python3 ansible_python_interpreter: /usr/bin/python3

View File

@@ -1,6 +1,7 @@
--- ---
# Provision a KVM/libvirt VM from an Ubuntu cloud image. # Provision a KVM/libvirt VM from an Ubuntu cloud image.
# Runs on the hypervisor host (localhost). # Runs on the hypervisor host (localhost) without requiring sudo.
# All writes to /var/lib/libvirt/images/ go through libvirtd (virsh vol-upload).
- name: Validate required variables - name: Validate required variables
ansible.builtin.assert: ansible.builtin.assert:
@@ -12,44 +13,71 @@
- vm_ip | length > 0 - vm_ip | length > 0
fail_msg: "vm_domain, vm_hostname, vm_disk_path, vm_mac, and vm_ip must all be set in host_vars" fail_msg: "vm_domain, vm_hostname, vm_disk_path, vm_mac, and vm_ip must all be set in host_vars"
- name: Install host dependencies
ansible.builtin.package:
name:
- qemu-img
- genisoimage
- libvirt-utils
state: present
# ── Cloud image ──────────────────────────────────────────────────────────── # ── Cloud image ────────────────────────────────────────────────────────────
- name: Check if cloud image cache exists - name: Check if cloud image volume exists in pool
ansible.builtin.stat: ansible.builtin.command:
path: "{{ vm_cloud_image_cache }}" cmd: virsh -c {{ vm_libvirt_uri }} vol-info --pool default {{ vm_cloud_image_cache | basename }}
register: cloud_image_stat register: cloud_image_vol_stat
failed_when: false
changed_when: false
- name: Download Ubuntu cloud image - name: Download Ubuntu cloud image to temp path
ansible.builtin.get_url: ansible.builtin.get_url:
url: "{{ vm_cloud_image_url }}" url: "{{ vm_cloud_image_url }}"
dest: "{{ vm_cloud_image_cache }}" dest: "/tmp/{{ vm_cloud_image_cache | basename }}"
mode: "0644" mode: "0644"
timeout: 300 timeout: 600
when: not cloud_image_stat.stat.exists when: cloud_image_vol_stat.rc != 0
- name: Create cloud image volume in pool
ansible.builtin.shell:
cmd: >
virsh -c {{ vm_libvirt_uri }} vol-create-as default
'{{ vm_cloud_image_cache | basename }}' 4G --format raw
register: cloud_vol_create
failed_when:
- cloud_vol_create.rc != 0
- "'exists already' not in cloud_vol_create.stderr"
changed_when: cloud_vol_create.rc == 0
when: cloud_image_vol_stat.rc != 0
- name: Upload cloud image to pool
ansible.builtin.shell:
cmd: >
virsh -c {{ vm_libvirt_uri }} vol-upload
--pool default '{{ vm_cloud_image_cache | basename }}'
'/tmp/{{ vm_cloud_image_cache | basename }}'
when: cloud_image_vol_stat.rc != 0 and (cloud_vol_create.rc | default(1)) == 0
- name: Remove temp cloud image download
ansible.builtin.file:
path: "/tmp/{{ vm_cloud_image_cache | basename }}"
state: absent
when: cloud_image_vol_stat.rc != 0
# ── Disk image ───────────────────────────────────────────────────────────── # ── Disk image ─────────────────────────────────────────────────────────────
- name: Check if VM disk already exists - name: Check if VM disk volume exists in pool
ansible.builtin.stat: ansible.builtin.shell:
path: "{{ vm_disk_path }}" cmd: virsh -c {{ vm_libvirt_uri }} vol-info --pool default '{{ vm_disk_path | basename }}'
register: vm_disk_stat register: vm_disk_vol_stat
failed_when: false
changed_when: false
- name: Create VM disk from cloud image - name: Create VM disk volume backed by cloud image
ansible.builtin.command: ansible.builtin.shell:
cmd: > cmd: >
qemu-img create -f qcow2 -F qcow2 virsh -c {{ vm_libvirt_uri }} vol-create-as default
-b {{ vm_cloud_image_cache }} '{{ vm_disk_path | basename }}' {{ vm_disk_size }} --format qcow2
{{ vm_disk_path }} {{ vm_disk_size }} --backing-vol '{{ vm_cloud_image_cache | basename }}'
creates: "{{ vm_disk_path }}" --backing-vol-format qcow2
when: not vm_disk_stat.stat.exists register: disk_vol_create
failed_when:
- disk_vol_create.rc != 0
- "'exists already' not in disk_vol_create.stderr"
changed_when: disk_vol_create.rc == 0
when: vm_disk_vol_stat.rc != 0
# ── Cloud-init seed ISO ──────────────────────────────────────────────────── # ── Cloud-init seed ISO ────────────────────────────────────────────────────
@@ -73,23 +101,46 @@
dest: "{{ cloud_init_dir.path }}/meta-data" dest: "{{ cloud_init_dir.path }}/meta-data"
mode: "0644" mode: "0644"
- name: Set seed ISO path fact - name: Set seed ISO facts
ansible.builtin.set_fact: ansible.builtin.set_fact:
vm_seed_iso_name: "{{ vm_hostname }}-seed.iso"
vm_seed_iso_tmp: "/tmp/{{ vm_hostname }}-seed.iso"
vm_seed_iso: "/var/lib/libvirt/images/{{ vm_hostname }}-seed.iso" vm_seed_iso: "/var/lib/libvirt/images/{{ vm_hostname }}-seed.iso"
- name: Create cloud-init seed ISO - name: Create cloud-init seed ISO in temp path
ansible.builtin.command: ansible.builtin.command:
cmd: > cmd: >
genisoimage -output {{ vm_seed_iso }} genisoimage -output {{ vm_seed_iso_tmp }}
-volid cidata -joliet -rock -volid cidata -joliet -rock
{{ cloud_init_dir.path }}/user-data {{ cloud_init_dir.path }}/user-data
{{ cloud_init_dir.path }}/meta-data {{ cloud_init_dir.path }}/meta-data
changed_when: true changed_when: true
- name: Clean up cloud-init temp directory - name: Create seed ISO volume in pool
ansible.builtin.shell:
cmd: >
virsh -c {{ vm_libvirt_uri }} vol-create-as default
'{{ vm_seed_iso_name }}' 4M --format raw
register: seed_vol_create
failed_when:
- seed_vol_create.rc != 0
- "'exists already' not in seed_vol_create.stderr"
changed_when: seed_vol_create.rc == 0
- name: Upload seed ISO to pool
ansible.builtin.shell:
cmd: >
virsh -c {{ vm_libvirt_uri }} vol-upload
--pool default '{{ vm_seed_iso_name }}' '{{ vm_seed_iso_tmp }}'
changed_when: true
- name: Clean up cloud-init temp files
ansible.builtin.file: ansible.builtin.file:
path: "{{ cloud_init_dir.path }}" path: "{{ item }}"
state: absent state: absent
loop:
- "{{ cloud_init_dir.path }}"
- "{{ vm_seed_iso_tmp }}"
# ── VM definition ────────────────────────────────────────────────────────── # ── VM definition ──────────────────────────────────────────────────────────
@@ -118,6 +169,7 @@
register: dhcp_result register: dhcp_result
failed_when: failed_when:
- dhcp_result.rc != 0 - dhcp_result.rc != 0
- "'existing dhcp host entry' not in dhcp_result.stderr"
- "'already exists' not in dhcp_result.stderr" - "'already exists' not in dhcp_result.stderr"
changed_when: dhcp_result.rc == 0 changed_when: dhcp_result.rc == 0
@@ -140,7 +192,7 @@
host: "{{ vm_ip }}" host: "{{ vm_ip }}"
port: 22 port: 22
delay: 10 delay: 10
timeout: 180 timeout: 300
state: started state: started
delegate_to: localhost delegate_to: localhost

View File

@@ -84,7 +84,7 @@
<target type='virtio' name='org.qemu.guest_agent.0'/> <target type='virtio' name='org.qemu.guest_agent.0'/>
</channel> </channel>
{% if vm_virtiofs_source and vm_virtiofs_tag %} {% if vm_virtiofs_source | default('') and vm_virtiofs_tag | default('') %}
<!-- virtiofs host share --> <!-- virtiofs host share -->
<filesystem type='mount' accessmode='passthrough'> <filesystem type='mount' accessmode='passthrough'>
<driver type='virtiofs'/> <driver type='virtiofs'/>
@@ -93,6 +93,13 @@
</filesystem> </filesystem>
{% endif %} {% endif %}
<!-- virtiofs swarm-common share -->
<filesystem type='mount' accessmode='passthrough'>
<driver type='virtiofs'/>
<source dir='/home/will/lab/swarm/swarm-common'/>
<target dir='swarm-common'/>
</filesystem>
<!-- TPM 2.0 --> <!-- TPM 2.0 -->
<tpm model='tpm-crb'> <tpm model='tpm-crb'>
<backend type='emulator' version='2.0'/> <backend type='emulator' version='2.0'/>