| 1 | #!/usr/bin/env bash |
| 2 | # SPDX-License-Identifier: AGPL-3.0-or-later |
| 3 | # |
| 4 | # Provision a small DigitalOcean-backed shithub Actions runner pool. |
| 5 | # |
| 6 | # This intentionally creates cattle runner hosts only. It does not register |
| 7 | # runner tokens and does not write any shithub production secrets. Registration |
| 8 | # tokens are generated with shithubd and distributed by Ansible after droplets |
| 9 | # exist. |
| 10 | |
| 11 | set -euo pipefail |
| 12 | |
| 13 | SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" |
| 14 | |
| 15 | POOL_NAME="${POOL_NAME:-shared-linux}" |
| 16 | PROJECT_NAME="${PROJECT_NAME:-shithub-prod}" |
| 17 | REGION="${REGION:-sfo3}" |
| 18 | SIZE="${SIZE:-s-2vcpu-4gb}" |
| 19 | IMAGE="${IMAGE:-ubuntu-24-04-x64}" |
| 20 | COUNT="${COUNT:-1}" |
| 21 | SSH_KEY_NAME="${SSH_KEY_NAME:-}" |
| 22 | SSH_ALLOWED_CIDRS="${SSH_ALLOWED_CIDRS:-}" |
| 23 | VPC_UUID="${VPC_UUID:-}" |
| 24 | RESOURCE_TAG="${RESOURCE_TAG:-shithub-actions-runner}" |
| 25 | POOL_TAG="${POOL_TAG:-}" |
| 26 | FIREWALL_NAME="${FIREWALL_NAME:-}" |
| 27 | USER_DATA_FILE="${USER_DATA_FILE:-$SCRIPT_DIR/actions-runner-cloud-init.yaml}" |
| 28 | DRY_RUN=0 |
| 29 | |
| 30 | usage() { |
| 31 | cat <<'USAGE' |
| 32 | Usage: |
| 33 | deploy/doctl/provision-actions-runner-pool.sh [flags] |
| 34 | |
| 35 | Flags: |
| 36 | --pool-name NAME Pool slug used in droplet names (default: shared-linux) |
| 37 | --project-name NAME DigitalOcean project name (default: shithub-prod) |
| 38 | --region SLUG Droplet region (default: sfo3) |
| 39 | --size SLUG Droplet size (default: s-2vcpu-4gb) |
| 40 | --image SLUG Droplet image (default: ubuntu-24-04-x64) |
| 41 | --count N Desired droplet count for this pool (default: 1) |
| 42 | --ssh-key-name NAME DigitalOcean SSH key name to install for root |
| 43 | --ssh-allowed-cidrs LIST Comma-separated CIDRs allowed to SSH to runners |
| 44 | --vpc-uuid UUID Optional VPC UUID for the droplets |
| 45 | --resource-tag TAG Shared tag for all runner droplets (default: shithub-actions-runner) |
| 46 | --pool-tag TAG Extra pool tag (default: shithub-actions-<pool-name>) |
| 47 | --firewall-name NAME Cloud firewall name (default: shithub-actions-runners-<pool-name>) |
| 48 | --user-data-file PATH Cloud-init file with no secrets |
| 49 | --dry-run Validate inputs and print the plan without creating resources |
| 50 | -h, --help Show this help |
| 51 | |
| 52 | Environment variables with the same uppercase names are also honored. |
| 53 | |
| 54 | Example: |
| 55 | SSH_KEY_NAME=macbook-pro \ |
| 56 | SSH_ALLOWED_CIDRS=203.0.113.4/32 \ |
| 57 | ./deploy/doctl/provision-actions-runner-pool.sh --dry-run |
| 58 | USAGE |
| 59 | } |
| 60 | |
| 61 | fatal() { |
| 62 | echo "fatal: $*" >&2 |
| 63 | exit 2 |
| 64 | } |
| 65 | |
| 66 | log() { |
| 67 | echo "$*" >&2 |
| 68 | } |
| 69 | |
| 70 | trim() { |
| 71 | local s="$1" |
| 72 | s="${s#"${s%%[![:space:]]*}"}" |
| 73 | s="${s%"${s##*[![:space:]]}"}" |
| 74 | printf '%s' "$s" |
| 75 | } |
| 76 | |
| 77 | require_tool() { |
| 78 | command -v "$1" >/dev/null 2>&1 || fatal "$1 not on PATH" |
| 79 | } |
| 80 | |
| 81 | while [[ $# -gt 0 ]]; do |
| 82 | case "$1" in |
| 83 | --pool-name) |
| 84 | POOL_NAME="${2:?missing value for --pool-name}" |
| 85 | shift 2 |
| 86 | ;; |
| 87 | --project-name) |
| 88 | PROJECT_NAME="${2:?missing value for --project-name}" |
| 89 | shift 2 |
| 90 | ;; |
| 91 | --region) |
| 92 | REGION="${2:?missing value for --region}" |
| 93 | shift 2 |
| 94 | ;; |
| 95 | --size) |
| 96 | SIZE="${2:?missing value for --size}" |
| 97 | shift 2 |
| 98 | ;; |
| 99 | --image) |
| 100 | IMAGE="${2:?missing value for --image}" |
| 101 | shift 2 |
| 102 | ;; |
| 103 | --count) |
| 104 | COUNT="${2:?missing value for --count}" |
| 105 | shift 2 |
| 106 | ;; |
| 107 | --ssh-key-name) |
| 108 | SSH_KEY_NAME="${2:?missing value for --ssh-key-name}" |
| 109 | shift 2 |
| 110 | ;; |
| 111 | --ssh-allowed-cidrs) |
| 112 | SSH_ALLOWED_CIDRS="${2:?missing value for --ssh-allowed-cidrs}" |
| 113 | shift 2 |
| 114 | ;; |
| 115 | --vpc-uuid) |
| 116 | VPC_UUID="${2:?missing value for --vpc-uuid}" |
| 117 | shift 2 |
| 118 | ;; |
| 119 | --resource-tag) |
| 120 | RESOURCE_TAG="${2:?missing value for --resource-tag}" |
| 121 | shift 2 |
| 122 | ;; |
| 123 | --pool-tag) |
| 124 | POOL_TAG="${2:?missing value for --pool-tag}" |
| 125 | shift 2 |
| 126 | ;; |
| 127 | --firewall-name) |
| 128 | FIREWALL_NAME="${2:?missing value for --firewall-name}" |
| 129 | shift 2 |
| 130 | ;; |
| 131 | --user-data-file) |
| 132 | USER_DATA_FILE="${2:?missing value for --user-data-file}" |
| 133 | shift 2 |
| 134 | ;; |
| 135 | --dry-run) |
| 136 | DRY_RUN=1 |
| 137 | shift |
| 138 | ;; |
| 139 | -h | --help) |
| 140 | usage |
| 141 | exit 0 |
| 142 | ;; |
| 143 | *) |
| 144 | fatal "unknown flag: $1" |
| 145 | ;; |
| 146 | esac |
| 147 | done |
| 148 | |
| 149 | [[ "$POOL_NAME" =~ ^[a-z0-9][a-z0-9-]*$ ]] || fatal "pool name must be a lowercase slug" |
| 150 | [[ "$RESOURCE_TAG" =~ ^[A-Za-z0-9:_.-]+$ ]] || fatal "resource tag contains unsupported characters" |
| 151 | [[ "$COUNT" =~ ^[0-9]+$ ]] || fatal "count must be a positive integer" |
| 152 | (( COUNT > 0 )) || fatal "count must be greater than zero" |
| 153 | [[ -n "$REGION" ]] || fatal "region is required" |
| 154 | [[ -n "$SIZE" ]] || fatal "size is required" |
| 155 | [[ -n "$IMAGE" ]] || fatal "image is required" |
| 156 | [[ -n "$SSH_KEY_NAME" ]] || fatal "set --ssh-key-name or SSH_KEY_NAME" |
| 157 | [[ -n "$SSH_ALLOWED_CIDRS" ]] || fatal "set --ssh-allowed-cidrs or SSH_ALLOWED_CIDRS" |
| 158 | [[ -r "$USER_DATA_FILE" ]] || fatal "user-data file not readable: $USER_DATA_FILE" |
| 159 | |
| 160 | if [[ -z "$POOL_TAG" ]]; then |
| 161 | POOL_TAG="shithub-actions-$POOL_NAME" |
| 162 | fi |
| 163 | if [[ -z "$FIREWALL_NAME" ]]; then |
| 164 | FIREWALL_NAME="shithub-actions-runners-$POOL_NAME" |
| 165 | fi |
| 166 | |
| 167 | SSH_RULES=() |
| 168 | IFS=',' read -r -a CIDR_PARTS <<<"$SSH_ALLOWED_CIDRS" |
| 169 | for raw in "${CIDR_PARTS[@]}"; do |
| 170 | cidr="$(trim "$raw")" |
| 171 | [[ -n "$cidr" ]] || continue |
| 172 | case "$cidr" in |
| 173 | 0.0.0.0/0 | ::/0 | 0/0) |
| 174 | fatal "refusing public SSH CIDR $cidr; use your operator/VPN IP range" |
| 175 | ;; |
| 176 | esac |
| 177 | [[ "$cidr" == */* ]] || fatal "SSH CIDR must include a prefix length: $cidr" |
| 178 | SSH_RULES+=("protocol:tcp,ports:22,address:$cidr") |
| 179 | done |
| 180 | (( ${#SSH_RULES[@]} > 0 )) || fatal "at least one non-public SSH CIDR is required" |
| 181 | SSH_INBOUND_RULES="${SSH_RULES[*]}" |
| 182 | # DigitalOcean firewall rules accept explicit TCP/UDP port ranges here, not |
| 183 | # the human shorthand "all". Keep this broad at the cloud firewall layer; the |
| 184 | # runner host's ipset firewall enforces the DNS allowlist for job containers. |
| 185 | OUTBOUND_RULES="protocol:tcp,ports:1-65535,address:0.0.0.0/0 protocol:udp,ports:1-65535,address:0.0.0.0/0 protocol:icmp,address:0.0.0.0/0" |
| 186 | |
| 187 | require_tool doctl |
| 188 | require_tool jq |
| 189 | |
| 190 | if ! doctl auth list >/dev/null 2>&1 || ! doctl account get >/dev/null 2>&1; then |
| 191 | fatal "doctl is not authenticated; run 'doctl auth init'" |
| 192 | fi |
| 193 | |
| 194 | SSH_KEY_ID="$(doctl compute ssh-key list --output json | jq -r --arg name "$SSH_KEY_NAME" 'first(.[] | select(.name == $name) | .id) // ""')" |
| 195 | [[ -n "$SSH_KEY_ID" ]] || fatal "no DigitalOcean SSH key named $SSH_KEY_NAME" |
| 196 | |
| 197 | PROJECT_ID="$(doctl projects list --output json | jq -r --arg name "$PROJECT_NAME" 'first(.[] | select(.name == $name) | .id) // ""')" |
| 198 | if [[ -z "$PROJECT_ID" ]]; then |
| 199 | if (( DRY_RUN )); then |
| 200 | PROJECT_ID="dry-run-project-id" |
| 201 | log "would create project $PROJECT_NAME" |
| 202 | else |
| 203 | log "creating project $PROJECT_NAME" |
| 204 | PROJECT_ID="$(doctl projects create \ |
| 205 | --name "$PROJECT_NAME" \ |
| 206 | --purpose "Service or API" \ |
| 207 | --environment Production \ |
| 208 | --description "shithub Actions runner pool" \ |
| 209 | --no-header --format ID)" |
| 210 | fi |
| 211 | else |
| 212 | log "project $PROJECT_NAME exists ($PROJECT_ID)" |
| 213 | fi |
| 214 | |
| 215 | ensure_tag() { |
| 216 | local tag="$1" |
| 217 | if doctl compute tag list --output json | jq -e --arg name "$tag" 'any(.[]; .name == $name)' >/dev/null; then |
| 218 | log "tag $tag exists" |
| 219 | return |
| 220 | fi |
| 221 | if (( DRY_RUN )); then |
| 222 | log "would create tag $tag" |
| 223 | return |
| 224 | fi |
| 225 | log "creating tag $tag" |
| 226 | doctl compute tag create "$tag" >/dev/null |
| 227 | } |
| 228 | |
| 229 | ensure_tag "$RESOURCE_TAG" |
| 230 | ensure_tag "$POOL_TAG" |
| 231 | |
| 232 | FIREWALL_ID="$(doctl compute firewall list --no-header --format ID,Name | awk -v n="$FIREWALL_NAME" '$2==n {print $1; exit}')" |
| 233 | if [[ -z "$FIREWALL_ID" ]]; then |
| 234 | if (( DRY_RUN )); then |
| 235 | FIREWALL_ID="dry-run-firewall-id" |
| 236 | log "would create firewall $FIREWALL_NAME for tag $RESOURCE_TAG" |
| 237 | else |
| 238 | log "creating firewall $FIREWALL_NAME for tag $RESOURCE_TAG" |
| 239 | FIREWALL_ID="$(doctl compute firewall create \ |
| 240 | --name "$FIREWALL_NAME" \ |
| 241 | --tag-names "$RESOURCE_TAG" \ |
| 242 | --inbound-rules "$SSH_INBOUND_RULES" \ |
| 243 | --outbound-rules "$OUTBOUND_RULES" \ |
| 244 | --no-header --format ID)" |
| 245 | fi |
| 246 | else |
| 247 | log "firewall $FIREWALL_NAME exists ($FIREWALL_ID); leaving rules unchanged" |
| 248 | fi |
| 249 | |
| 250 | NAME_PREFIX="shithub-runner-$POOL_NAME-" |
| 251 | |
| 252 | droplet_id_by_name() { |
| 253 | local name="$1" |
| 254 | doctl compute droplet list --no-header --format ID,Name | awk -v n="$name" '$2==n {print $1; exit}' |
| 255 | } |
| 256 | |
| 257 | created_or_reused=() |
| 258 | for i in $(seq 1 "$COUNT"); do |
| 259 | name="$NAME_PREFIX$i" |
| 260 | existing="$(droplet_id_by_name "$name")" |
| 261 | if [[ -n "$existing" ]]; then |
| 262 | log "droplet $name exists ($existing); skipping" |
| 263 | created_or_reused+=("$existing:$name:existing") |
| 264 | continue |
| 265 | fi |
| 266 | |
| 267 | if (( DRY_RUN )); then |
| 268 | log "would create droplet $name ($REGION, $SIZE, $IMAGE)" |
| 269 | created_or_reused+=("dry-run-$i:$name:planned") |
| 270 | continue |
| 271 | fi |
| 272 | |
| 273 | cmd=(doctl compute droplet create "$name" |
| 274 | --image "$IMAGE" |
| 275 | --region "$REGION" |
| 276 | --size "$SIZE" |
| 277 | --ssh-keys "$SSH_KEY_ID" |
| 278 | --enable-monitoring |
| 279 | --tag-names "$RESOURCE_TAG,$POOL_TAG" |
| 280 | --user-data-file "$USER_DATA_FILE" |
| 281 | --project-id "$PROJECT_ID" |
| 282 | --wait |
| 283 | --no-header |
| 284 | --format ID) |
| 285 | if [[ -n "$VPC_UUID" ]]; then |
| 286 | cmd+=(--vpc-uuid "$VPC_UUID") |
| 287 | fi |
| 288 | |
| 289 | log "creating droplet $name ($REGION, $SIZE, $IMAGE)" |
| 290 | id="$("${cmd[@]}")" |
| 291 | created_or_reused+=("$id:$name:created") |
| 292 | done |
| 293 | |
| 294 | if (( ! DRY_RUN )); then |
| 295 | resource_args=() |
| 296 | for entry in "${created_or_reused[@]}"; do |
| 297 | id="${entry%%:*}" |
| 298 | resource_args+=(--resource "do:droplet:$id") |
| 299 | done |
| 300 | if (( ${#resource_args[@]} > 0 )); then |
| 301 | log "assigning runner droplets to project $PROJECT_NAME" |
| 302 | doctl projects resources assign "$PROJECT_ID" "${resource_args[@]}" >/dev/null |
| 303 | fi |
| 304 | fi |
| 305 | |
| 306 | if (( DRY_RUN )); then |
| 307 | droplets_json="$( |
| 308 | printf '%s\n' "${created_or_reused[@]}" | |
| 309 | jq -Rn '[inputs | split(":") | { |
| 310 | id: .[0], |
| 311 | name: .[1], |
| 312 | status: .[2], |
| 313 | public_ipv4: null, |
| 314 | private_ipv4: null |
| 315 | }]' |
| 316 | )" |
| 317 | else |
| 318 | droplets_json="$(doctl compute droplet list --tag-name "$RESOURCE_TAG" --output json | |
| 319 | jq --arg prefix "$NAME_PREFIX" '[.[] | select(.name | startswith($prefix)) | { |
| 320 | id: (.id | tostring), |
| 321 | name: .name, |
| 322 | status: .status, |
| 323 | public_ipv4: ((.networks.v4 // []) | map(select(.type == "public")) | first | .ip_address // null), |
| 324 | private_ipv4: ((.networks.v4 // []) | map(select(.type == "private")) | first | .ip_address // null) |
| 325 | }] | sort_by(.name)')" |
| 326 | fi |
| 327 | |
| 328 | jq -n \ |
| 329 | --arg pool_name "$POOL_NAME" \ |
| 330 | --arg project_name "$PROJECT_NAME" \ |
| 331 | --arg project_id "$PROJECT_ID" \ |
| 332 | --arg region "$REGION" \ |
| 333 | --arg size "$SIZE" \ |
| 334 | --arg image "$IMAGE" \ |
| 335 | --arg resource_tag "$RESOURCE_TAG" \ |
| 336 | --arg pool_tag "$POOL_TAG" \ |
| 337 | --arg firewall_name "$FIREWALL_NAME" \ |
| 338 | --arg firewall_id "$FIREWALL_ID" \ |
| 339 | --argjson droplets "$droplets_json" \ |
| 340 | '{ |
| 341 | pool_name: $pool_name, |
| 342 | project: {name: $project_name, id: $project_id}, |
| 343 | region: $region, |
| 344 | size: $size, |
| 345 | image: $image, |
| 346 | tags: [$resource_tag, $pool_tag], |
| 347 | firewall: {name: $firewall_name, id: $firewall_id}, |
| 348 | droplets: $droplets |
| 349 | }' |