Skip to content

Instantly share code, notes, and snippets.

@mlbiam
Created January 20, 2025 15:06
Show Gist options
  • Save mlbiam/e7d0c1c5df82302c7c98494658c2e898 to your computer and use it in GitHub Desktop.
Save mlbiam/e7d0c1c5df82302c7c98494658c2e898 to your computer and use it in GitHub Desktop.
Deploy Single VM Talos Cluster with libvirt

This script will deploy a single VM talos cluster, Ingress NGINX, and local path provisioner. It was tested on Ubuntu 20.04, 22.04, and 24.04. It will pre-configure the networking configuration with a static IP, DNS, and time server. Before running, you must have the folloing installed:

  • Docker
  • kubectl
  • kustomize
  • helm

Make sure all of these commands available from sudo

#!/bin/bash
# Variables
VM_NAME="$1"
RAM_MB=4096 # 4GB RAM
VCPUS=2
DISK_PATH="/var/lib/libvirt/images/${VM_NAME}.qcow2"
DISK_SIZE="50G" # 50GB disk
TALOS_ISO="/var/lib/libvirt/images/metal-amd64.iso" # Replace with the path to your Talos ISO
METADATA_ISO="/var/lib/libvirt/images/talos-config.iso"
NETWORK_INTERFACE="ens3"
STATIC_IP="$2"
GATEWAY="192.168.2.1"
NAMESERVERS="192.168.2.76,192.168.2.25"
# Create disk image
echo "Creating disk image at $DISK_PATH with size $DISK_SIZE..."
qemu-img create -f qcow2 "$DISK_PATH" "$DISK_SIZE"
# Create the metadata file for Talos
echo "Creating metadata configuration for Talos..."
CONFIG_DIR=$(mktemp -d)
cat > "${CONFIG_DIR}/0xa" <<EOF
addresses:
- address: $STATIC_IP/24
linkName: $NETWORK_INTERFACE
family: inet4
scope: global
flags: permanent
layer: platform
links:
- name: $NETWORK_INTERFACE
logical: false
up: true
mtu: 0
kind: ""
type: ether
layer: platform
routes:
- family: inet4
dst: ""
src: ""
gateway: 192.168.2.1
outLinkName: $NETWORK_INTERFACE
table: main
scope: global
type: unicast
flags: ""
protocol: static
layer: platform
hostnames:
- hostname: $VM_NAME
domainname: ""
layer: platform
resolvers:
- dnsServers:
- 192.168.2.76
- 192.168.2.25
layer: platform
timeServers:
- timeServers:
- time-a-g.nist.gov
layer: platform
operators: []
eternalIPs: []
EOF
cat ${CONFIG_DIR}/0xa
mkdir $VM_NAME
docker run --rm -t -v $PWD/$VM_NAME/_out:/out ghcr.io/siderolabs/imager:v1.9.2 iso --meta "0xa=$(cat ${CONFIG_DIR}/0xa)"
mv $VM_NAME/_out/metal-amd64.iso /var/lib/libvirt/images/metal-amd64-$VM_NAME.iso
# Run QEMU with the ISO and metadata
virt-install \
--name $VM_NAME \
--memory 4096 \
--vcpus 2 \
--disk size=50,path=$DISK_PATH,format=qcow2 \
--cdrom /var/lib/libvirt/images/metal-amd64-$VM_NAME.iso \
--network network=default,model=virtio \
--os-type linux \
--os-variant generic \
--graphics vnc \
--noautoconsole
# Clean up temporary files
echo "Cleaning up temporary files..."
rm -rf "$CONFIG_DIR"
echo "VM creation complete. Use the QEMU console to complete the Talos installation."
cd $VM_NAME
talosctl gen config $VM_NAME https://$STATIC_IP:6443 --install-disk=/dev/sda --config-patch-control-plane='{"cluster":{"allowSchedulingOnControlPlanes":true}}'
while ! curl -k -s -o /dev/null https://$STATIC_IP:50000; do
echo "Waiting for HTTPS on port 50000..."
sleep 5
done
echo "Port 50000 is now available!"
talosctl apply-config --insecure -n $STATIC_IP --file controlplane.yaml
sleep 30
PORT=50000 # The port to check
CHECK_INTERVAL=5 # Time to wait between checks (in seconds)
# Function to check if the port is open
check_port() {
nc -z -w3 "$STATIC_IP" "$PORT" 2>/dev/null
return $?
}
# Wait for the port to open
echo "Waiting for port $PORT on $STATIC_IP to be open..."
while ! check_port; do
echo "Port $PORT is not open yet. Retrying in $CHECK_INTERVAL seconds..."
sleep $CHECK_INTERVAL
done
talosctl bootstrap --nodes $STATIC_IP --endpoints $STATIC_IP --talosconfig=./talosconfig
while ! curl -k -s -o /dev/null https://$STATIC_IP:6443; do
echo "Waiting for HTTPS on port 6443..."
sleep 5
done
echo "Port 6443 is now available!"
talosctl kubeconfig --nodes $STATIC_IP --endpoints $STATIC_IP --talosconfig=./talosconfig ./kubeconfig
export KUBECONFIG=./kubeconfig
talosctl health --talosconfig=talosconfig --nodes "$STATIC_IP" --endpoints "$STATIC_IP"
helm upgrade --install ingress-nginx ingress-nginx \
--repo https://kubernetes.github.io/ingress-nginx \
--namespace ingress-nginx --create-namespace
kubectl patch ns ingress-nginx -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}'
kubectl patch deployments ingress-nginx-controller -n ingress-nginx -p '{"spec":{"template":{"spec":{"containers":[{"name":"controller","ports":[{"containerPort":80,"hostPort":80,"protocol":"TCP"},{"containerPort":443,"hostPort":443,"protocol":"TCP"}]}]}}}}'
KUSTOMIZE_DIR=$(mktemp -d)
cat > "${KUSTOMIZE_DIR}/kustomization.yaml" <<EOF
# kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- github.com/rancher/local-path-provisioner/deploy?ref=v0.0.26
patches:
- patch: |-
kind: ConfigMap
apiVersion: v1
metadata:
name: local-path-config
namespace: local-path-storage
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/var/local-path-provisioner"]
}
]
}
- patch: |-
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-path
annotations:
storageclass.kubernetes.io/is-default-class: "true"
- patch: |-
apiVersion: v1
kind: Namespace
metadata:
name: local-path-storage
labels:
pod-security.kubernetes.io/enforce: privileged
EOF
kustomize build $KUSTOMIZE_DIR | kubectl apply -f -
cd ../
chown -R virt $VM_NAME
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment