@@ -0,0 +1,346 @@ |
| | 1 | +#!/usr/bin/env bash |
| | 2 | +# SPDX-License-Identifier: AGPL-3.0-or-later |
| | 3 | +# |
| | 4 | +# Provision a small DigitalOcean-backed shithub Actions runner pool. |
| | 5 | +# |
| | 6 | +# This intentionally creates cattle runner hosts only. It does not register |
| | 7 | +# runner tokens and does not write any shithub production secrets. Registration |
| | 8 | +# tokens are generated with shithubd and distributed by Ansible after droplets |
| | 9 | +# exist. |
| | 10 | + |
| | 11 | +set -euo pipefail |
| | 12 | + |
| | 13 | +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" |
| | 14 | + |
| | 15 | +POOL_NAME="${POOL_NAME:-shared-linux}" |
| | 16 | +PROJECT_NAME="${PROJECT_NAME:-shithub-prod}" |
| | 17 | +REGION="${REGION:-sfo3}" |
| | 18 | +SIZE="${SIZE:-s-2vcpu-4gb}" |
| | 19 | +IMAGE="${IMAGE:-ubuntu-24-04-x64}" |
| | 20 | +COUNT="${COUNT:-1}" |
| | 21 | +SSH_KEY_NAME="${SSH_KEY_NAME:-}" |
| | 22 | +SSH_ALLOWED_CIDRS="${SSH_ALLOWED_CIDRS:-}" |
| | 23 | +VPC_UUID="${VPC_UUID:-}" |
| | 24 | +RESOURCE_TAG="${RESOURCE_TAG:-shithub-actions-runner}" |
| | 25 | +POOL_TAG="${POOL_TAG:-}" |
| | 26 | +FIREWALL_NAME="${FIREWALL_NAME:-}" |
| | 27 | +USER_DATA_FILE="${USER_DATA_FILE:-$SCRIPT_DIR/actions-runner-cloud-init.yaml}" |
| | 28 | +DRY_RUN=0 |
| | 29 | + |
| | 30 | +usage() { |
| | 31 | + cat <<'USAGE' |
| | 32 | +Usage: |
| | 33 | + deploy/doctl/provision-actions-runner-pool.sh [flags] |
| | 34 | + |
| | 35 | +Flags: |
| | 36 | + --pool-name NAME Pool slug used in droplet names (default: shared-linux) |
| | 37 | + --project-name NAME DigitalOcean project name (default: shithub-prod) |
| | 38 | + --region SLUG Droplet region (default: sfo3) |
| | 39 | + --size SLUG Droplet size (default: s-2vcpu-4gb) |
| | 40 | + --image SLUG Droplet image (default: ubuntu-24-04-x64) |
| | 41 | + --count N Desired droplet count for this pool (default: 1) |
| | 42 | + --ssh-key-name NAME DigitalOcean SSH key name to install for root |
| | 43 | + --ssh-allowed-cidrs LIST Comma-separated CIDRs allowed to SSH to runners |
| | 44 | + --vpc-uuid UUID Optional VPC UUID for the droplets |
| | 45 | + --resource-tag TAG Shared tag for all runner droplets (default: shithub-actions-runner) |
| | 46 | + --pool-tag TAG Extra pool tag (default: shithub-actions-<pool-name>) |
| | 47 | + --firewall-name NAME Cloud firewall name (default: shithub-actions-runners-<pool-name>) |
| | 48 | + --user-data-file PATH Cloud-init file with no secrets |
| | 49 | + --dry-run Validate inputs and print the plan without creating resources |
| | 50 | + -h, --help Show this help |
| | 51 | + |
| | 52 | +Environment variables with the same uppercase names are also honored. |
| | 53 | + |
| | 54 | +Example: |
| | 55 | + SSH_KEY_NAME=macbook-pro \ |
| | 56 | + SSH_ALLOWED_CIDRS=203.0.113.4/32 \ |
| | 57 | + ./deploy/doctl/provision-actions-runner-pool.sh --dry-run |
| | 58 | +USAGE |
| | 59 | +} |
| | 60 | + |
| | 61 | +fatal() { |
| | 62 | + echo "fatal: $*" >&2 |
| | 63 | + exit 2 |
| | 64 | +} |
| | 65 | + |
| | 66 | +log() { |
| | 67 | + echo "$*" >&2 |
| | 68 | +} |
| | 69 | + |
| | 70 | +trim() { |
| | 71 | + local s="$1" |
| | 72 | + s="${s#"${s%%[![:space:]]*}"}" |
| | 73 | + s="${s%"${s##*[![:space:]]}"}" |
| | 74 | + printf '%s' "$s" |
| | 75 | +} |
| | 76 | + |
| | 77 | +require_tool() { |
| | 78 | + command -v "$1" >/dev/null 2>&1 || fatal "$1 not on PATH" |
| | 79 | +} |
| | 80 | + |
| | 81 | +while [[ $# -gt 0 ]]; do |
| | 82 | + case "$1" in |
| | 83 | + --pool-name) |
| | 84 | + POOL_NAME="${2:?missing value for --pool-name}" |
| | 85 | + shift 2 |
| | 86 | + ;; |
| | 87 | + --project-name) |
| | 88 | + PROJECT_NAME="${2:?missing value for --project-name}" |
| | 89 | + shift 2 |
| | 90 | + ;; |
| | 91 | + --region) |
| | 92 | + REGION="${2:?missing value for --region}" |
| | 93 | + shift 2 |
| | 94 | + ;; |
| | 95 | + --size) |
| | 96 | + SIZE="${2:?missing value for --size}" |
| | 97 | + shift 2 |
| | 98 | + ;; |
| | 99 | + --image) |
| | 100 | + IMAGE="${2:?missing value for --image}" |
| | 101 | + shift 2 |
| | 102 | + ;; |
| | 103 | + --count) |
| | 104 | + COUNT="${2:?missing value for --count}" |
| | 105 | + shift 2 |
| | 106 | + ;; |
| | 107 | + --ssh-key-name) |
| | 108 | + SSH_KEY_NAME="${2:?missing value for --ssh-key-name}" |
| | 109 | + shift 2 |
| | 110 | + ;; |
| | 111 | + --ssh-allowed-cidrs) |
| | 112 | + SSH_ALLOWED_CIDRS="${2:?missing value for --ssh-allowed-cidrs}" |
| | 113 | + shift 2 |
| | 114 | + ;; |
| | 115 | + --vpc-uuid) |
| | 116 | + VPC_UUID="${2:?missing value for --vpc-uuid}" |
| | 117 | + shift 2 |
| | 118 | + ;; |
| | 119 | + --resource-tag) |
| | 120 | + RESOURCE_TAG="${2:?missing value for --resource-tag}" |
| | 121 | + shift 2 |
| | 122 | + ;; |
| | 123 | + --pool-tag) |
| | 124 | + POOL_TAG="${2:?missing value for --pool-tag}" |
| | 125 | + shift 2 |
| | 126 | + ;; |
| | 127 | + --firewall-name) |
| | 128 | + FIREWALL_NAME="${2:?missing value for --firewall-name}" |
| | 129 | + shift 2 |
| | 130 | + ;; |
| | 131 | + --user-data-file) |
| | 132 | + USER_DATA_FILE="${2:?missing value for --user-data-file}" |
| | 133 | + shift 2 |
| | 134 | + ;; |
| | 135 | + --dry-run) |
| | 136 | + DRY_RUN=1 |
| | 137 | + shift |
| | 138 | + ;; |
| | 139 | + -h | --help) |
| | 140 | + usage |
| | 141 | + exit 0 |
| | 142 | + ;; |
| | 143 | + *) |
| | 144 | + fatal "unknown flag: $1" |
| | 145 | + ;; |
| | 146 | + esac |
| | 147 | +done |
| | 148 | + |
| | 149 | +[[ "$POOL_NAME" =~ ^[a-z0-9][a-z0-9-]*$ ]] || fatal "pool name must be a lowercase slug" |
| | 150 | +[[ "$RESOURCE_TAG" =~ ^[A-Za-z0-9:_.-]+$ ]] || fatal "resource tag contains unsupported characters" |
| | 151 | +[[ "$COUNT" =~ ^[0-9]+$ ]] || fatal "count must be a positive integer" |
| | 152 | +(( COUNT > 0 )) || fatal "count must be greater than zero" |
| | 153 | +[[ -n "$REGION" ]] || fatal "region is required" |
| | 154 | +[[ -n "$SIZE" ]] || fatal "size is required" |
| | 155 | +[[ -n "$IMAGE" ]] || fatal "image is required" |
| | 156 | +[[ -n "$SSH_KEY_NAME" ]] || fatal "set --ssh-key-name or SSH_KEY_NAME" |
| | 157 | +[[ -n "$SSH_ALLOWED_CIDRS" ]] || fatal "set --ssh-allowed-cidrs or SSH_ALLOWED_CIDRS" |
| | 158 | +[[ -r "$USER_DATA_FILE" ]] || fatal "user-data file not readable: $USER_DATA_FILE" |
| | 159 | + |
| | 160 | +if [[ -z "$POOL_TAG" ]]; then |
| | 161 | + POOL_TAG="shithub-actions-$POOL_NAME" |
| | 162 | +fi |
| | 163 | +if [[ -z "$FIREWALL_NAME" ]]; then |
| | 164 | + FIREWALL_NAME="shithub-actions-runners-$POOL_NAME" |
| | 165 | +fi |
| | 166 | + |
| | 167 | +SSH_RULES=() |
| | 168 | +IFS=',' read -r -a CIDR_PARTS <<<"$SSH_ALLOWED_CIDRS" |
| | 169 | +for raw in "${CIDR_PARTS[@]}"; do |
| | 170 | + cidr="$(trim "$raw")" |
| | 171 | + [[ -n "$cidr" ]] || continue |
| | 172 | + case "$cidr" in |
| | 173 | + 0.0.0.0/0 | ::/0 | 0/0) |
| | 174 | + fatal "refusing public SSH CIDR $cidr; use your operator/VPN IP range" |
| | 175 | + ;; |
| | 176 | + esac |
| | 177 | + [[ "$cidr" == */* ]] || fatal "SSH CIDR must include a prefix length: $cidr" |
| | 178 | + SSH_RULES+=("protocol:tcp,ports:22,address:$cidr") |
| | 179 | +done |
| | 180 | +(( ${#SSH_RULES[@]} > 0 )) || fatal "at least one non-public SSH CIDR is required" |
| | 181 | +SSH_INBOUND_RULES="${SSH_RULES[*]}" |
| | 182 | +OUTBOUND_RULES="protocol:tcp,ports:all,address:0.0.0.0/0 protocol:udp,ports:all,address:0.0.0.0/0 protocol:icmp,ports:all,address:0.0.0.0/0" |
| | 183 | + |
| | 184 | +require_tool doctl |
| | 185 | +require_tool jq |
| | 186 | + |
| | 187 | +if ! doctl auth list >/dev/null 2>&1 || ! doctl account get >/dev/null 2>&1; then |
| | 188 | + fatal "doctl is not authenticated; run 'doctl auth init'" |
| | 189 | +fi |
| | 190 | + |
| | 191 | +SSH_KEY_ID="$(doctl compute ssh-key list --output json | jq -r --arg name "$SSH_KEY_NAME" 'first(.[] | select(.name == $name) | .id) // ""')" |
| | 192 | +[[ -n "$SSH_KEY_ID" ]] || fatal "no DigitalOcean SSH key named $SSH_KEY_NAME" |
| | 193 | + |
| | 194 | +PROJECT_ID="$(doctl projects list --output json | jq -r --arg name "$PROJECT_NAME" 'first(.[] | select(.name == $name) | .id) // ""')" |
| | 195 | +if [[ -z "$PROJECT_ID" ]]; then |
| | 196 | + if (( DRY_RUN )); then |
| | 197 | + PROJECT_ID="dry-run-project-id" |
| | 198 | + log "would create project $PROJECT_NAME" |
| | 199 | + else |
| | 200 | + log "creating project $PROJECT_NAME" |
| | 201 | + PROJECT_ID="$(doctl projects create \ |
| | 202 | + --name "$PROJECT_NAME" \ |
| | 203 | + --purpose "Service or API" \ |
| | 204 | + --environment Production \ |
| | 205 | + --description "shithub Actions runner pool" \ |
| | 206 | + --no-header --format ID)" |
| | 207 | + fi |
| | 208 | +else |
| | 209 | + log "project $PROJECT_NAME exists ($PROJECT_ID)" |
| | 210 | +fi |
| | 211 | + |
| | 212 | +ensure_tag() { |
| | 213 | + local tag="$1" |
| | 214 | + if doctl compute tag list --output json | jq -e --arg name "$tag" 'any(.[]; .name == $name)' >/dev/null; then |
| | 215 | + log "tag $tag exists" |
| | 216 | + return |
| | 217 | + fi |
| | 218 | + if (( DRY_RUN )); then |
| | 219 | + log "would create tag $tag" |
| | 220 | + return |
| | 221 | + fi |
| | 222 | + log "creating tag $tag" |
| | 223 | + doctl compute tag create "$tag" >/dev/null |
| | 224 | +} |
| | 225 | + |
| | 226 | +ensure_tag "$RESOURCE_TAG" |
| | 227 | +ensure_tag "$POOL_TAG" |
| | 228 | + |
| | 229 | +FIREWALL_ID="$(doctl compute firewall list --no-header --format ID,Name | awk -v n="$FIREWALL_NAME" '$2==n {print $1; exit}')" |
| | 230 | +if [[ -z "$FIREWALL_ID" ]]; then |
| | 231 | + if (( DRY_RUN )); then |
| | 232 | + FIREWALL_ID="dry-run-firewall-id" |
| | 233 | + log "would create firewall $FIREWALL_NAME for tag $RESOURCE_TAG" |
| | 234 | + else |
| | 235 | + log "creating firewall $FIREWALL_NAME for tag $RESOURCE_TAG" |
| | 236 | + FIREWALL_ID="$(doctl compute firewall create \ |
| | 237 | + --name "$FIREWALL_NAME" \ |
| | 238 | + --tag-names "$RESOURCE_TAG" \ |
| | 239 | + --inbound-rules "$SSH_INBOUND_RULES" \ |
| | 240 | + --outbound-rules "$OUTBOUND_RULES" \ |
| | 241 | + --no-header --format ID)" |
| | 242 | + fi |
| | 243 | +else |
| | 244 | + log "firewall $FIREWALL_NAME exists ($FIREWALL_ID); leaving rules unchanged" |
| | 245 | +fi |
| | 246 | + |
| | 247 | +NAME_PREFIX="shithub-runner-$POOL_NAME-" |
| | 248 | + |
| | 249 | +droplet_id_by_name() { |
| | 250 | + local name="$1" |
| | 251 | + doctl compute droplet list --no-header --format ID,Name | awk -v n="$name" '$2==n {print $1; exit}' |
| | 252 | +} |
| | 253 | + |
| | 254 | +created_or_reused=() |
| | 255 | +for i in $(seq 1 "$COUNT"); do |
| | 256 | + name="$NAME_PREFIX$i" |
| | 257 | + existing="$(droplet_id_by_name "$name")" |
| | 258 | + if [[ -n "$existing" ]]; then |
| | 259 | + log "droplet $name exists ($existing); skipping" |
| | 260 | + created_or_reused+=("$existing:$name:existing") |
| | 261 | + continue |
| | 262 | + fi |
| | 263 | + |
| | 264 | + if (( DRY_RUN )); then |
| | 265 | + log "would create droplet $name ($REGION, $SIZE, $IMAGE)" |
| | 266 | + created_or_reused+=("dry-run-$i:$name:planned") |
| | 267 | + continue |
| | 268 | + fi |
| | 269 | + |
| | 270 | + cmd=(doctl compute droplet create "$name" |
| | 271 | + --image "$IMAGE" |
| | 272 | + --region "$REGION" |
| | 273 | + --size "$SIZE" |
| | 274 | + --ssh-keys "$SSH_KEY_ID" |
| | 275 | + --enable-monitoring |
| | 276 | + --tag-names "$RESOURCE_TAG,$POOL_TAG" |
| | 277 | + --user-data-file "$USER_DATA_FILE" |
| | 278 | + --project-id "$PROJECT_ID" |
| | 279 | + --wait |
| | 280 | + --no-header |
| | 281 | + --format ID) |
| | 282 | + if [[ -n "$VPC_UUID" ]]; then |
| | 283 | + cmd+=(--vpc-uuid "$VPC_UUID") |
| | 284 | + fi |
| | 285 | + |
| | 286 | + log "creating droplet $name ($REGION, $SIZE, $IMAGE)" |
| | 287 | + id="$("${cmd[@]}")" |
| | 288 | + created_or_reused+=("$id:$name:created") |
| | 289 | +done |
| | 290 | + |
| | 291 | +if (( ! DRY_RUN )); then |
| | 292 | + resource_args=() |
| | 293 | + for entry in "${created_or_reused[@]}"; do |
| | 294 | + id="${entry%%:*}" |
| | 295 | + resource_args+=(--resource "do:droplet:$id") |
| | 296 | + done |
| | 297 | + if (( ${#resource_args[@]} > 0 )); then |
| | 298 | + log "assigning runner droplets to project $PROJECT_NAME" |
| | 299 | + doctl projects resources assign "$PROJECT_ID" "${resource_args[@]}" >/dev/null |
| | 300 | + fi |
| | 301 | +fi |
| | 302 | + |
| | 303 | +if (( DRY_RUN )); then |
| | 304 | + droplets_json="$( |
| | 305 | + printf '%s\n' "${created_or_reused[@]}" | |
| | 306 | + jq -Rn '[inputs | split(":") | { |
| | 307 | + id: .[0], |
| | 308 | + name: .[1], |
| | 309 | + status: .[2], |
| | 310 | + public_ipv4: null, |
| | 311 | + private_ipv4: null |
| | 312 | + }]' |
| | 313 | + )" |
| | 314 | +else |
| | 315 | + droplets_json="$(doctl compute droplet list --tag-name "$RESOURCE_TAG" --output json | |
| | 316 | + jq --arg prefix "$NAME_PREFIX" '[.[] | select(.name | startswith($prefix)) | { |
| | 317 | + id: (.id | tostring), |
| | 318 | + name: .name, |
| | 319 | + status: .status, |
| | 320 | + public_ipv4: ((.networks.v4 // []) | map(select(.type == "public")) | first | .ip_address // null), |
| | 321 | + private_ipv4: ((.networks.v4 // []) | map(select(.type == "private")) | first | .ip_address // null) |
| | 322 | + }] | sort_by(.name)')" |
| | 323 | +fi |
| | 324 | + |
| | 325 | +jq -n \ |
| | 326 | + --arg pool_name "$POOL_NAME" \ |
| | 327 | + --arg project_name "$PROJECT_NAME" \ |
| | 328 | + --arg project_id "$PROJECT_ID" \ |
| | 329 | + --arg region "$REGION" \ |
| | 330 | + --arg size "$SIZE" \ |
| | 331 | + --arg image "$IMAGE" \ |
| | 332 | + --arg resource_tag "$RESOURCE_TAG" \ |
| | 333 | + --arg pool_tag "$POOL_TAG" \ |
| | 334 | + --arg firewall_name "$FIREWALL_NAME" \ |
| | 335 | + --arg firewall_id "$FIREWALL_ID" \ |
| | 336 | + --argjson droplets "$droplets_json" \ |
| | 337 | + '{ |
| | 338 | + pool_name: $pool_name, |
| | 339 | + project: {name: $project_name, id: $project_id}, |
| | 340 | + region: $region, |
| | 341 | + size: $size, |
| | 342 | + image: $image, |
| | 343 | + tags: [$resource_tag, $pool_tag], |
| | 344 | + firewall: {name: $firewall_name, id: $firewall_id}, |
| | 345 | + droplets: $droplets |
| | 346 | + }' |