Create pipeline to push an image to AWS upon release (#3131)

* Create pipeline to push to aws on releases

+ a test pipeline for the PR

Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>

* Remove unecessary cleanup of worker space

Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>

* Remove testing pipeline

Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>

* Copy image to all regions and make public in parallel

Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>

---------

Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
This commit is contained in:
Dimitris Karakasilis 2025-01-24 13:46:08 +02:00 committed by GitHub
parent e777999082
commit 8830ba2764
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 426 additions and 121 deletions

46
.github/release-space-from-ubuntu-runners.sh vendored Executable file
View File

@ -0,0 +1,46 @@
#!/bin/bash
echo "Listing top largest packages"
pkgs=$(dpkg-query -Wf '${Installed-Size}\t${Package}\t${Status}\n' | awk '$NF == "installed"{print $1 "\t" $2}' | sort -nr)
head -n 30 <<< "${pkgs}"
echo
df -h
echo
sudo apt-get remove -y '^llvm-.*|^libllvm.*' || true
sudo apt-get remove --auto-remove android-sdk-platform-tools || true
sudo apt-get purge --auto-remove android-sdk-platform-tools || true
sudo rm -rf /usr/local/lib/android
sudo apt-get remove -y '^dotnet-.*|^aspnetcore-.*' || true
sudo rm -rf /usr/share/dotnet
sudo apt-get remove -y '^mono-.*' || true
sudo apt-get remove -y '^ghc-.*' || true
sudo apt-get remove -y '.*jdk.*|.*jre.*' || true
sudo apt-get remove -y 'php.*' || true
sudo apt-get remove -y hhvm || true
sudo apt-get remove -y powershell || true
sudo apt-get remove -y firefox || true
sudo apt-get remove -y monodoc-manual || true
sudo apt-get remove -y msbuild || true
sudo apt-get remove -y microsoft-edge-stable || true
sudo apt-get remove -y '^google-.*' || true
sudo apt-get remove -y azure-cli || true
sudo apt-get remove -y '^mongo.*-.*|^postgresql-.*|^mysql-.*|^mssql-.*' || true
sudo apt-get remove -y '^gfortran-.*' || true
sudo apt-get remove -y '^gcc-*' || true
sudo apt-get remove -y '^g++-*' || true
sudo apt-get remove -y '^cpp-*' || true
sudo apt-get autoremove -y
sudo apt-get clean
echo
echo "Listing top largest packages"
pkgs=$(dpkg-query -Wf '${Installed-Size}\t${Package}\t${Status}\n' | awk '$NF == "installed"{print $1 "\t" $2}' | sort -nr)
head -n 30 <<< "${pkgs}"
echo
sudo rm -rfv build || true
sudo rm -rf /usr/local/lib/android # will release about 10 GB if you don't need Android
sudo rm -rf /usr/share/dotnet # will release about 20GB if you don't need .NET
df -h

300
.github/upload-image-to-aws.sh vendored Executable file
View File

@ -0,0 +1,300 @@
#!/bin/bash
# Given a raw image created with Auroraboot, this script will upload it to the speficied AWS account as a public AMI.
# Docs:
# https://docs.aws.amazon.com/vm-import/latest/userguide/required-permissions.html
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html#creating-launching-ami-from-snapshot
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-ami-boot-mode.html
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/launch-instance-boot-mode.html
set -e
set -o pipefail
checkArguments() {
if [ $# -lt 1 ]; then
echo "Error: You need to specify the cloud image to upload."
echo "Usage: $0 <cloud-image>"
exit 1
fi
local file="$1"
if [ ! -f "$file" ]; then
echo "Error: File '$file' does not exist."
exit 1
fi
if ! file "$file" | grep -q 'DOS/MBR boot sector'; then
echo "Error: File '$file' is not a raw image."
exit 1
fi
}
checkEnvVars() {
if [ -z "$AWS_REGION" ] || [ -z "$AWS_S3_BUCKET" ]; then
echo "Error: AWS_REGION and AWS_S3_BUCKET environment variables must be set."
exit 1
fi
}
AWS() {
if [ -z "$AWS_PROFILE" ]; then
aws --region "$AWS_REGION" "$@"
else
aws --region "$AWS_REGION" --profile "$AWS_PROFILE" "$@"
fi
}
# https://docs.aws.amazon.com/vm-import/latest/userguide/required-permissions.html#vmimport-role
ensureVmImportRole() {
(AWS iam list-roles | jq -r '.Roles[] | select(.RoleName | contains("vmimport")) | .RoleName' | grep -q "vmimport" && echo "vmimport role found. All good.") || {
echo "Creating vmimport role"
export AWS_PAGER="" # Avoid being dropped to a pager
AWS iam create-role --role-name vmimport --assume-role-policy-document file://<(cat <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "vmie.amazonaws.com"
},
"Action": "sts:AssumeRole",
"Condition": {
"StringEquals": {
"sts:ExternalId": "vmimport"
}
}
}
]
}
EOF
)
# AWS iam attach-role-policy --role-name vmimport --policy-arn arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM
AWS iam put-role-policy --role-name vmimport --policy-name vmimport --policy-document file://<(cat <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetBucketLocation",
"s3:GetBucketAcl",
"s3:GetObject",
"s3:PutObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::$AWS_S3_BUCKET",
"arn:aws:s3:::$AWS_S3_BUCKET/*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:ModifySnapshotAttribute",
"ec2:CopySnapshot",
"ec2:RegisterImage",
"ec2:Describe*"
],
"Resource": "*"
}
]
}
EOF
)
sleep 10 # Wait for the policy and permissions to be effective. This is not ideal but I couldn't find any better way.
}
}
uploadImageToS3() {
local file
local baseName
file="$1"
baseName=$(basename "$file")
if AWS s3 ls "$AWS_S3_BUCKET/$baseName" > /dev/null 2>&1; then
echo "File '$baseName' already exists in S3 bucket '$AWS_S3_BUCKET'."
else
echo "File '$baseName' does not exist in S3 bucket '$AWS_S3_BUCKET'. Uploading now."
AWS s3 cp "$1" "s3://$AWS_S3_BUCKET/$baseName"
fi
}
waitForSnapshotCompletion() {
local taskID="$1"
local status=""
while true; do
status=$(AWS ec2 describe-import-snapshot-tasks --import-task-ids "$taskID" --query 'ImportSnapshotTasks[0].SnapshotTaskDetail.Status' --output text)
if [ "$status" == "completed" ]; then
echo "Snapshot import completed."
break
elif [ "$status" == "deleted" ] || [ "$status" == "cancelling" ] || [ "$status" == "cancelled" ]; then
echo "Snapshot import failed with status: $status"
exit 1
else
echo "Waiting for snapshot import to complete. Current status: $status" >&2
sleep 30
fi
done
AWS ec2 describe-import-snapshot-tasks --import-task-ids "$taskID" --query 'ImportSnapshotTasks[0].SnapshotTaskDetail.SnapshotId' --output text
}
importAsSnapshot() {
local file="$1"
local snapshotID
snapshotID=$(AWS ec2 describe-snapshots --filters "Name=tag:SourceFile,Values=$file" --query "Snapshots[0].SnapshotId" --output text)
if [ "$snapshotID" != "None" ]; then
echo "Snapshot $snapshotID already exists for file $file"
echo "$snapshotID"
return 0
fi
taskID=$(AWS ec2 import-snapshot --description "$file" --disk-container file://<(cat <<EOF
{
"Description": "$file",
"Format": "RAW",
"UserBucket": {
"S3Bucket": "$AWS_S3_BUCKET",
"S3Key": "$file"
}
}
EOF
) --query 'ImportTaskId' --output text | tee /dev/fd/2) || return 1
echo "Snapshot import task started with ID: $taskID"
snapshotID=$(waitForSnapshotCompletion "$taskID" | tail -1 | tee /dev/fd/2)
echo "Adding tag to the snapshot with ID: $snapshotID"
AWS ec2 create-tags --resources "$snapshotID" --tags "Key=SourceFile,Value=$file"
echo "$snapshotID" # Return the snapshot ID so that we can grab it with `tail -1`
}
checkImageExistsOrCreate() {
local imageName="$1"
local snapshotID="$2"
local imageID
# Check if the image already exists
imageID=$(AWS ec2 describe-images --filters "Name=name,Values=$imageName" --query 'Images[0].ImageId' --output text)
if [ "$imageID" != "None" ]; then
echo "Image '$imageName' already exists with Image ID: $imageID"
else
echo "Image '$imageName' does not exist. Creating from snapshot..."
description="AMI created from snapshot $snapshotID"
imageID=$(AWS ec2 register-image \
--name "$imageName" \
--description "$description" \
--architecture x86_64 \
--root-device-name /dev/xvda \
--block-device-mappings "[{\"DeviceName\":\"/dev/xvda\",\"Ebs\":{\"SnapshotId\":\"$snapshotID\"}}]" \
--virtualization-type hvm \
--boot-mode uefi \
--ena-support \
--query 'ImageId' \
--output text)
echo "Image '$imageName' created with Image ID: $imageID"
fi
waitAMI "$imageID" "$AWS_REGION"
makeAMIpublic "$imageID" "$AWS_REGION"
copyToAllRegions "$imageID" "$imageName" "$description"
}
# Function to wait for the AMI to become available
waitAMI() {
local amiID="$1"
local region="$2"
echo "[$region] Waiting for AMI $amiID to be available"
while true; do
status=$(aws --profile "$AWS_PROFILE" ec2 describe-images --region "$region" --image-ids "$amiID" --query "Images[0].State" --output text 2>/dev/null)
if [[ "$status" == "available" ]]; then
echo "[$region] AMI $amiID is now available!"
break
elif [[ "$status" == "pending" || "$status" == "null" ]]; then
sleep 10
else
echo "[$region] AMI is in an unexpected state: $status. Exiting."
exit 1
fi
done
}
makeAMIpublic() {
local imageID="$1"
local region="$2"
echo "[$region] calling DisableImageBlockPublicAccess"
aws --profile "$AWS_PROFILE" --region "$region" ec2 disable-image-block-public-access > /dev/null 2>&1
echo "[$region] Making image '$imageID' public..."
aws --profile "$AWS_PROFILE" --region "$region" ec2 modify-image-attribute --image-id "$imageID" --launch-permission "{\"Add\":[{\"Group\":\"all\"}]}"
echo "[$region] Image '$imageID' is now public."
}
copyToAllRegions() {
local imageID="$1"
local imageName="$2"
local description="$3"
echo "Copying AMI '$imageName ($imageID)' to all regions"
mapfile -t regions < <(AWS ec2 describe-regions | jq -r '.Regions[].RegionName')
for reg in "${regions[@]}"; do
# If the current region is the same as the region we are trying to copy, just ignore, the AMI is already there
if [[ "${AWS_REGION}" == "${reg}" ]]
then
continue
fi
(
echo "[$reg] Copying AMI '$imageName' to region $reg"
# Check if the image already exists in this region
amiCopyID=$(aws --profile "$AWS_PROFILE" --region "$reg" ec2 describe-images --filters "Name=name,Values=$imageName" --query 'Images[0].ImageId' --output text)
if [ "$amiCopyID" != "None" ]; then
echo "[$reg] Image '$imageName' already exists with Image ID: $amiCopyID"
else
amiCopyID=$(AWS ec2 copy-image \
--name "${imageName}" \
--description "${description}" \
--source-image-id "${imageID}" \
--source-region "${AWS_REGION}" \
--region "${reg}" \
| jq -r '.ImageId'
)
echo "[$reg] Tagging Copied AMI ${amiCopyID}"
AWS ec2 create-tags --resources "${imageID}" --tags \
--tags Key=Name,Value="${imageName}" Key=Project,Value=Kairos
fi
waitAMI "${amiCopyID}" "${reg}"
makeAMIpublic "${amiCopyID}" "${reg}"
echo "[$reg] AMI Copied: ${amiCopyID}"
) &
done
wait # Wait for all background jobs to finish
}
# ----- Main script -----
baseName=$(basename "$1")
checkEnvVars
checkArguments "$@"
# This is an one-off operation and require additional permissions which we don't need to give to CI.
#ensureVmImportRole
uploadImageToS3 "$1"
output=$(importAsSnapshot "$baseName" | tee /dev/fd/2)
snapshotID=$(echo "$output" | tail -1)
checkImageExistsOrCreate "$baseName" "$snapshotID"

View File

@ -113,46 +113,7 @@ jobs:
uses: sigstore/cosign-installer@main
- name: Release space from worker
run: |
echo "Listing top largest packages"
pkgs=$(dpkg-query -Wf '${Installed-Size}\t${Package}\t${Status}\n' | awk '$NF == "installed"{print $1 "\t" $2}' | sort -nr)
head -n 30 <<< "${pkgs}"
echo
df -h
echo
sudo apt-get remove -y '^llvm-.*|^libllvm.*' || true
sudo apt-get remove --auto-remove android-sdk-platform-tools || true
sudo apt-get purge --auto-remove android-sdk-platform-tools || true
sudo rm -rf /usr/local/lib/android
sudo apt-get remove -y '^dotnet-.*|^aspnetcore-.*' || true
sudo rm -rf /usr/share/dotnet
sudo apt-get remove -y '^mono-.*' || true
sudo apt-get remove -y '^ghc-.*' || true
sudo apt-get remove -y '.*jdk.*|.*jre.*' || true
sudo apt-get remove -y 'php.*' || true
sudo apt-get remove -y hhvm || true
sudo apt-get remove -y powershell || true
sudo apt-get remove -y firefox || true
sudo apt-get remove -y monodoc-manual || true
sudo apt-get remove -y msbuild || true
sudo apt-get remove -y microsoft-edge-stable || true
sudo apt-get remove -y '^google-.*' || true
sudo apt-get remove -y azure-cli || true
sudo apt-get remove -y '^mongo.*-.*|^postgresql-.*|^mysql-.*|^mssql-.*' || true
sudo apt-get remove -y '^gfortran-.*' || true
sudo apt-get remove -y '^gcc-*' || true
sudo apt-get remove -y '^g++-*' || true
sudo apt-get remove -y '^cpp-*' || true
sudo apt-get autoremove -y
sudo apt-get clean
echo
echo "Listing top largest packages"
pkgs=$(dpkg-query -Wf '${Installed-Size}\t${Package}\t${Status}\n' | awk '$NF == "installed"{print $1 "\t" $2}' | sort -nr)
head -n 30 <<< "${pkgs}"
echo
sudo rm -rfv build || true
sudo rm -rf /usr/local/lib/android # will release about 10 GB if you don't need Android
sudo rm -rf /usr/share/dotnet # will release about 20GB if you don't need .NET
df -h
./.github/release-space-from-ubuntu-runners.sh
- name: Login to Quay Registry
run: echo ${{ secrets.QUAY_PASSWORD }} | docker login -u ${{ secrets.QUAY_USERNAME }} --password-stdin quay.io
- name: Install earthly
@ -249,53 +210,12 @@ jobs:
strategy:
matrix: ${{ fromJson(needs.get-uki-matrix.outputs.matrix) }}
steps:
- name: Release space from worker
run: |
echo "Listing top largest packages"
pkgs=$(dpkg-query -Wf '${Installed-Size}\t${Package}\t${Status}\n' | awk '$NF == "installed"{print $1 "\t" $2}' | sort -nr)
head -n 30 <<< "${pkgs}"
echo
df -h
echo
sudo apt-get remove -y '^llvm-.*|^libllvm.*' || true
sudo apt-get remove --auto-remove android-sdk-platform-tools || true
sudo apt-get purge --auto-remove android-sdk-platform-tools || true
sudo rm -rf /usr/local/lib/android
sudo apt-get remove -y '^dotnet-.*|^aspnetcore-.*' || true
sudo rm -rf /usr/share/dotnet
sudo apt-get remove -y '^mono-.*' || true
sudo apt-get remove -y '^ghc-.*' || true
sudo apt-get remove -y '.*jdk.*|.*jre.*' || true
sudo apt-get remove -y 'php.*' || true
sudo apt-get remove -y hhvm || true
sudo apt-get remove -y powershell || true
sudo apt-get remove -y firefox || true
sudo apt-get remove -y monodoc-manual || true
sudo apt-get remove -y msbuild || true
sudo apt-get remove -y microsoft-edge-stable || true
sudo apt-get remove -y '^google-.*' || true
sudo apt-get remove -y azure-cli || true
sudo apt-get remove -y '^mongo.*-.*|^postgresql-.*|^mysql-.*|^mssql-.*' || true
sudo apt-get remove -y '^gfortran-.*' || true
sudo apt-get remove -y '^gcc-*' || true
sudo apt-get remove -y '^g++-*' || true
sudo apt-get remove -y '^cpp-*' || true
sudo apt-get autoremove -y
sudo apt-get clean
echo
echo "Listing top largest packages"
pkgs=$(dpkg-query -Wf '${Installed-Size}\t${Package}\t${Status}\n' | awk '$NF == "installed"{print $1 "\t" $2}' | sort -nr)
head -n 30 <<< "${pkgs}"
echo
sudo rm -rfv build || true
df -h
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- run: |
git fetch --prune --unshallow
- name: Release space from worker
run: |
sudo rm -rf /usr/local/lib/android # will release about 10 GB if you don't need Android
sudo rm -rf /usr/share/dotnet # will release about 20GB if you don't need .NET
./.github/release-space-from-ubuntu-runners.sh
- name: Set up QEMU
uses: docker/setup-qemu-action@master
with:
@ -354,7 +274,6 @@ jobs:
with:
files: |
build/*.efi
build-standard:
runs-on: ubuntu-latest
needs:
@ -375,44 +294,7 @@ jobs:
uses: sigstore/cosign-installer@main
- name: Release space from worker
run: |
echo "Listing top largest packages"
pkgs=$(dpkg-query -Wf '${Installed-Size}\t${Package}\t${Status}\n' | awk '$NF == "installed"{print $1 "\t" $2}' | sort -nr)
head -n 30 <<< "${pkgs}"
echo
df -h
echo
sudo apt-get remove -y '^llvm-.*|^libllvm.*' || true
sudo apt-get remove --auto-remove android-sdk-platform-tools || true
sudo apt-get purge --auto-remove android-sdk-platform-tools || true
sudo rm -rf /usr/local/lib/android
sudo apt-get remove -y '^dotnet-.*|^aspnetcore-.*' || true
sudo rm -rf /usr/share/dotnet
sudo apt-get remove -y '^mono-.*' || true
sudo apt-get remove -y '^ghc-.*' || true
sudo apt-get remove -y '.*jdk.*|.*jre.*' || true
sudo apt-get remove -y 'php.*' || true
sudo apt-get remove -y hhvm || true
sudo apt-get remove -y powershell || true
sudo apt-get remove -y firefox || true
sudo apt-get remove -y monodoc-manual || true
sudo apt-get remove -y msbuild || true
sudo apt-get remove -y microsoft-edge-stable || true
sudo apt-get remove -y '^google-.*' || true
sudo apt-get remove -y azure-cli || true
sudo apt-get remove -y '^mongo.*-.*|^postgresql-.*|^mysql-.*|^mssql-.*' || true
sudo apt-get remove -y '^gfortran-.*' || true
sudo apt-get remove -y '^gcc-*' || true
sudo apt-get remove -y '^g++-*' || true
sudo apt-get remove -y '^cpp-*' || true
sudo apt-get autoremove -y
sudo apt-get clean
echo
echo "Listing top largest packages"
pkgs=$(dpkg-query -Wf '${Installed-Size}\t${Package}\t${Status}\n' | awk '$NF == "installed"{print $1 "\t" $2}' | sort -nr)
head -n 30 <<< "${pkgs}"
echo
sudo rm -rfv build || true
df -h
./.github/release-space-from-ubuntu-runners.sh
- name: Install earthly
uses: Luet-lab/luet-install-action@cec77490c3f2416d7d07a47cfab04d448641d7ce # v1.1
with:
@ -458,3 +340,80 @@ jobs:
with:
files: |
release/*
build-aws-image:
runs-on: ubuntu-latest
needs:
- build-standard
permissions:
id-token: write # OIDC support
contents: write
actions: read
security-events: write
strategy:
matrix:
include:
# We don't publish AWS images for all combinations so we go hardcoded here
- flavor: ubuntu
flavor_release: 24.04
family: ubuntu
base_image: ubuntu:24.04
variant: standard
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- run: |
git fetch --prune --unshallow
- name: Install kairos-agent (for versioneer)
uses: Luet-lab/luet-install-action@cec77490c3f2416d7d07a47cfab04d448641d7ce # v1.1
with:
repository: quay.io/kairos/packages
packages: system/kairos-agent
- name: Build 🔧
run: |
# TODO: Does "sort" work correctly on k3s versions? Which version do we want?
k3s_version=$(echo '${{ needs.get-standard-matrix.outputs.matrix }}' | \
jq -r '[.[].k3s_version] | unique | sort | .[0]')
version=$(git describe --always --tags --dirty)
containerImage=$(kairos-agent versioneer container-artifact-name \
--flavor ${{ matrix.flavor }} \
--flavor-release ${{ matrix.flavor_release }} \
--variant ${{ matrix.variant }} \
--model generic \
--arch amd64 \
--software-version-prefix k3s \
--registry-and-org quay.io/kairos \
--software-version "$k3s_version" \
--version "$version"
)
echo "Using $containerImage to build a cloud image"
docker run -v /var/run/docker.sock:/var/run/docker.sock --net host \
--privileged \
-v $PWD:/aurora --rm quay.io/kairos/auroraboot \
--debug \
--set "disable_http_server=true" \
--set "container_image=docker:${containerImage}" \
--set "disable_netboot=true" \
--set "disk.raw=true" \
--set "state_dir=/aurora"
# https://github.com/aws-actions/configure-aws-credentials?tab=readme-ov-file#assumerole-with-static-iam-credentials-in-repository-secrets
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: eu-central-1
#role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
#role-external-id: ${{ secrets.AWS_ROLE_EXTERNAL_ID }}
role-duration-seconds: 1200
role-session-name: AWSCIPush
- name: Push to AWS
env:
AWS_S3_BUCKET: kairos-cloud-images
AWS_REGION: eu-central-1
run: |
.github/upload-image-to-aws.sh $(ls *.raw)