Compare commits

..

1 Commits

Author SHA1 Message Date
Bassem Dghaidi
3e37a29c21 Prevent releases on wrong tag 2023-03-14 12:42:22 +00:00
58 changed files with 494 additions and 2604 deletions

View File

@@ -0,0 +1,45 @@
name: 'E2E ARC Test Action'
description: 'Includes common arc installation, setup and test file run'
inputs:
github-token:
description: 'JWT generated with Github App inputs'
required: true
config-url:
description: "URL of the repo, org or enterprise where the runner scale sets will be registered"
required: true
docker-image-repo:
description: "Local docker image repo for testing"
required: true
docker-image-tag:
description: "Tag of ARC Docker image for testing"
required: true
runs:
using: "composite"
steps:
- name: Install ARC
run: helm install arc --namespace "arc-systems" --create-namespace --set image.tag=${{ inputs.docker-image-tag }} --set image.repository=${{ inputs.docker-image-repo }} ./charts/gha-runner-scale-set-controller
shell: bash
- name: Get datetime
# We are using this value further in the runner installation to avoid runner name collision that are a risk with hard coded values.
# A datetime including the 3 nanoseconds are a good option for this and also adds to readability and runner sorting if needed.
run: echo "DATE_TIME=$(date +'%Y-%m-%d-%H-%M-%S-%3N')" >> $GITHUB_ENV
shell: bash
- name: Install runners
run: |
helm install "arc-runner-${{ env.DATE_TIME }}" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="${{ inputs.config-url }}" \
--set githubConfigSecret.github_token="${{ inputs.github-token }}" \
./charts/gha-runner-scale-set \
--debug
kubectl get pods -A
shell: bash
- name: Test ARC scales pods up and down
run: |
export GITHUB_TOKEN="${{ inputs.github-token }}"
export DATE_TIME="${{ env.DATE_TIME }}"
go test ./test_e2e_arc -v
shell: bash

View File

@@ -1,64 +0,0 @@
name: 'Setup ARC E2E Test Action'
description: 'Build controller image, create kind cluster, load the image, and exchange ARC configure token.'
inputs:
github-app-id:
description: 'GitHub App Id for exchange access token'
required: true
github-app-pk:
description: "GitHub App private key for exchange access token"
required: true
github-app-org:
description: 'The organization the GitHub App has installed on'
required: true
docker-image-name:
description: "Local docker image name for building"
required: true
docker-image-tag:
description: "Tag of ARC Docker image for building"
required: true
outputs:
token:
description: 'Token to use for configure ARC'
value: ${{steps.config-token.outputs.token}}
runs:
using: "composite"
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
# failures pushing images to GHCR
version: v0.9.1
driver-opts: image=moby/buildkit:v0.10.6
- name: Build controller image
uses: docker/build-push-action@v3
with:
file: Dockerfile
platforms: linux/amd64
load: true
build-args: |
DOCKER_IMAGE_NAME=${{inputs.docker-image-name}}
VERSION=${{inputs.docker-image-tag}}
tags: |
${{inputs.docker-image-name}}:${{inputs.docker-image-tag}}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Create minikube cluster and load image
shell: bash
run: |
minikube start
minikube image load ${{inputs.docker-image-name}}:${{inputs.docker-image-tag}}
- name: Get configure token
id: config-token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
with:
application_id: ${{ inputs.github-app-id }}
application_private_key: ${{ inputs.github-app-pk }}
organization: ${{ inputs.github-app-org }}

View File

@@ -5,730 +5,47 @@ on:
branches:
- master
pull_request:
branches:
- master
workflow_dispatch:
inputs:
target_org:
description: The org of the test repository.
required: true
default: actions-runner-controller
target_repo:
description: The repository to install the ARC.
required: true
default: arc_e2e_test_dummy
env:
TARGET_ORG: actions-runner-controller
TARGET_REPO: arc_e2e_test_dummy
IMAGE_NAME: "arc-test-image"
IMAGE_VERSION: "dev"
CLUSTER_NAME: e2e-test
RUNNER_VERSION: 2.302.1
IMAGE_REPO: "test/test-image"
jobs:
default-setup:
runs-on: ubuntu-latest
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
setup-steps:
runs-on: [ubuntu-latest]
steps:
- uses: actions/checkout@v3
- name: Resolve inputs
id: resolved_inputs
- name: Add env variables
run: |
TARGET_ORG="${{env.TARGET_ORG}}"
TARGET_REPO="${{env.TARGET_REPO}}"
if [ ! -z "${{inputs.target_org}}" ]; then
TARGET_ORG="${{inputs.target_org}}"
fi
if [ ! -z "${{inputs.target_repo}}" ]; then
TARGET_REPO="${{inputs.target_repo}}"
fi
echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT
echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-arc-e2e
id: setup
TAG=$(echo "0.0.$GITHUB_SHA")
echo "TAG=$TAG" >> $GITHUB_ENV
echo "IMAGE=$IMAGE_REPO:$TAG" >> $GITHUB_ENV
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}}
github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}}
github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}}
docker-image-name: ${{env.IMAGE_NAME}}
docker-image-tag: ${{env.IMAGE_VERSION}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
version: latest
- name: Docker Build Test Image
run: |
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository=${{ env.IMAGE_NAME }} \
--set image.tag=${{ env.IMAGE_VERSION }} \
./charts/gha-runner-scale-set-controller \
--debug
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
- name: Install gha-runner-scale-set
id: install_arc
DOCKER_CLI_EXPERIMENTAL=enabled DOCKER_BUILDKIT=1 docker buildx build --build-arg RUNNER_VERSION=$RUNNER_VERSION --build-arg TAG=$TAG -t $IMAGE . --load
- name: Create Kind cluster
run: |
ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC scales pods up and down
id: test
run: |
export GITHUB_TOKEN="${{ steps.setup.outputs.token }}"
export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}"
export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}"
go test ./test_e2e_arc -v
- name: Uninstall gha-runner-scale-set
if: always() && steps.install_arc.outcome == 'success'
run: |
helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }}
- name: Dump gha-runner-scale-set-controller logs
if: always() && steps.install_arc_controller.outcome == 'success'
run: |
kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems
- name: Job summary
if: always() && steps.install_arc.outcome == 'success'
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Outcome** | ${{ steps.test.outcome }} |
|----------------|--------------------------------------------- |
| **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) |
EOF
single-namespace-setup:
runs-on: ubuntu-latest
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
- name: Resolve inputs
id: resolved_inputs
run: |
TARGET_ORG="${{env.TARGET_ORG}}"
TARGET_REPO="${{env.TARGET_REPO}}"
if [ ! -z "${{inputs.target_org}}" ]; then
TARGET_ORG="${{inputs.target_org}}"
fi
if [ ! -z "${{inputs.target_repo}}" ]; then
TARGET_REPO="${{inputs.target_repo}}"
fi
echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT
echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-arc-e2e
id: setup
PATH=$(go env GOPATH)/bin:$PATH
kind create cluster --name $CLUSTER_NAME
- name: Load Image to Kind Cluster
run: kind load docker-image $IMAGE --name $CLUSTER_NAME
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
with:
github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}}
github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}}
github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}}
docker-image-name: ${{env.IMAGE_NAME}}
docker-image-tag: ${{env.IMAGE_VERSION}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
run: |
kubectl create namespace arc-runners
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository=${{ env.IMAGE_NAME }} \
--set image.tag=${{ env.IMAGE_VERSION }} \
--set flags.watchSingleNamespace=arc-runners \
./charts/gha-runner-scale-set-controller \
--debug
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
- name: Install gha-runner-scale-set
id: install_arc
run: |
ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC scales pods up and down
id: test
run: |
export GITHUB_TOKEN="${{ steps.setup.outputs.token }}"
export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}"
export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}"
go test ./test_e2e_arc -v
- name: Uninstall gha-runner-scale-set
if: always() && steps.install_arc.outcome == 'success'
run: |
helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }}
- name: Dump gha-runner-scale-set-controller logs
if: always() && steps.install_arc_controller.outcome == 'success'
run: |
kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems
- name: Job summary
if: always() && steps.install_arc.outcome == 'success'
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Outcome** | ${{ steps.test.outcome }} |
|----------------|--------------------------------------------- |
| **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) |
EOF
dind-mode-setup:
runs-on: ubuntu-latest
env:
WORKFLOW_FILE: arc-test-dind-workflow.yaml
steps:
- uses: actions/checkout@v3
- name: Resolve inputs
id: resolved_inputs
run: |
TARGET_ORG="${{env.TARGET_ORG}}"
TARGET_REPO="${{env.TARGET_REPO}}"
if [ ! -z "${{inputs.target_org}}" ]; then
TARGET_ORG="${{inputs.target_org}}"
fi
if [ ! -z "${{inputs.target_repo}}" ]; then
TARGET_REPO="${{inputs.target_repo}}"
fi
echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT
echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-arc-e2e
id: setup
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
organization: ${{ env.TARGET_ORG }}
- uses: ./.github/actions/e2e-arc-test
with:
github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}}
github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}}
github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}}
docker-image-name: ${{env.IMAGE_NAME}}
docker-image-tag: ${{env.IMAGE_VERSION}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
run: |
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository=${{ env.IMAGE_NAME }} \
--set image.tag=${{ env.IMAGE_VERSION }} \
./charts/gha-runner-scale-set-controller \
--debug
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
- name: Install gha-runner-scale-set
id: install_arc
run: |
ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set containerMode.type="dind" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC scales pods up and down
id: test
run: |
export GITHUB_TOKEN="${{ steps.setup.outputs.token }}"
export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}"
export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}"
go test ./test_e2e_arc -v
- name: Uninstall gha-runner-scale-set
if: always() && steps.install_arc.outcome == 'success'
run: |
helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }}
- name: Dump gha-runner-scale-set-controller logs
if: always() && steps.install_arc_controller.outcome == 'success'
run: |
kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems
- name: Job summary
if: always() && steps.install_arc.outcome == 'success'
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Outcome** | ${{ steps.test.outcome }} |
|----------------|--------------------------------------------- |
| **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) |
EOF
kubernetes-mode-setup:
runs-on: ubuntu-latest
env:
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
steps:
- uses: actions/checkout@v3
- name: Resolve inputs
id: resolved_inputs
run: |
TARGET_ORG="${{env.TARGET_ORG}}"
TARGET_REPO="${{env.TARGET_REPO}}"
if [ ! -z "${{inputs.target_org}}" ]; then
TARGET_ORG="${{inputs.target_org}}"
fi
if [ ! -z "${{inputs.target_repo}}" ]; then
TARGET_REPO="${{inputs.target_repo}}"
fi
echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT
echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}}
github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}}
github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}}
docker-image-name: ${{env.IMAGE_NAME}}
docker-image-tag: ${{env.IMAGE_VERSION}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
run: |
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository=${{ env.IMAGE_NAME }} \
--set image.tag=${{ env.IMAGE_VERSION }} \
./charts/gha-runner-scale-set-controller \
--debug
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
- name: Install gha-runner-scale-set
id: install_arc
run: |
echo "Install openebs/dynamic-localpv-provisioner"
helm repo add openebs https://openebs.github.io/charts
helm repo update
helm install openebs openebs/openebs -n openebs --create-namespace
ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set containerMode.type="kubernetes" \
--set containerMode.kubernetesModeWorkVolumeClaim.accessModes={"ReadWriteOnce"} \
--set containerMode.kubernetesModeWorkVolumeClaim.storageClassName="openebs-hostpath" \
--set containerMode.kubernetesModeWorkVolumeClaim.resources.requests.storage="1Gi" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC scales pods up and down
id: test
run: |
export GITHUB_TOKEN="${{ steps.setup.outputs.token }}"
export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}"
export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}"
go test ./test_e2e_arc -v
- name: Uninstall gha-runner-scale-set
if: always() && steps.install_arc.outcome == 'success'
run: |
helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }}
- name: Dump gha-runner-scale-set-controller logs
if: always() && steps.install_arc_controller.outcome == 'success'
run: |
kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems
- name: Job summary
if: always() && steps.install_arc.outcome == 'success'
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Outcome** | ${{ steps.test.outcome }} |
|----------------|--------------------------------------------- |
| **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) |
EOF
auth-proxy-setup:
runs-on: ubuntu-latest
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
- name: Resolve inputs
id: resolved_inputs
run: |
TARGET_ORG="${{env.TARGET_ORG}}"
TARGET_REPO="${{env.TARGET_REPO}}"
if [ ! -z "${{inputs.target_org}}" ]; then
TARGET_ORG="${{inputs.target_org}}"
fi
if [ ! -z "${{inputs.target_repo}}" ]; then
TARGET_REPO="${{inputs.target_repo}}"
fi
echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT
echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}}
github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}}
github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}}
docker-image-name: ${{env.IMAGE_NAME}}
docker-image-tag: ${{env.IMAGE_VERSION}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
run: |
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository=${{ env.IMAGE_NAME }} \
--set image.tag=${{ env.IMAGE_VERSION }} \
./charts/gha-runner-scale-set-controller \
--debug
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
- name: Install gha-runner-scale-set
id: install_arc
run: |
docker run -d \
--name squid \
--publish 3128:3128 \
huangtingluo/squid-proxy:latest
kubectl create namespace arc-runners
kubectl create secret generic proxy-auth \
--namespace=arc-runners \
--from-literal=username=github \
--from-literal=password='actions'
ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set proxy.https.url="http://host.minikube.internal:3128" \
--set proxy.https.credentialSecretRef="proxy-auth" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC scales pods up and down
id: test
run: |
export GITHUB_TOKEN="${{ steps.setup.outputs.token }}"
export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}"
export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}"
go test ./test_e2e_arc -v
- name: Uninstall gha-runner-scale-set
if: always() && steps.install_arc.outcome == 'success'
run: |
helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }}
- name: Dump gha-runner-scale-set-controller logs
if: always() && steps.install_arc_controller.outcome == 'success'
run: |
kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems
- name: Job summary
if: always() && steps.install_arc.outcome == 'success'
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Outcome** | ${{ steps.test.outcome }} |
|----------------|--------------------------------------------- |
| **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) |
EOF
anonymous-proxy-setup:
runs-on: ubuntu-latest
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
- name: Resolve inputs
id: resolved_inputs
run: |
TARGET_ORG="${{env.TARGET_ORG}}"
TARGET_REPO="${{env.TARGET_REPO}}"
if [ ! -z "${{inputs.target_org}}" ]; then
TARGET_ORG="${{inputs.target_org}}"
fi
if [ ! -z "${{inputs.target_repo}}" ]; then
TARGET_REPO="${{inputs.target_repo}}"
fi
echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT
echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}}
github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}}
github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}}
docker-image-name: ${{env.IMAGE_NAME}}
docker-image-tag: ${{env.IMAGE_VERSION}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
run: |
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository=${{ env.IMAGE_NAME }} \
--set image.tag=${{ env.IMAGE_VERSION }} \
./charts/gha-runner-scale-set-controller \
--debug
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
- name: Install gha-runner-scale-set
id: install_arc
run: |
docker run -d \
--name squid \
--publish 3128:3128 \
ubuntu/squid:latest
ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set proxy.https.url="http://host.minikube.internal:3128" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC scales pods up and down
id: test
run: |
export GITHUB_TOKEN="${{ steps.setup.outputs.token }}"
export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}"
export WORKFLOW_FILE="${{ env.WORKFLOW_FILE }}"
go test ./test_e2e_arc -v
- name: Uninstall gha-runner-scale-set
if: always() && steps.install_arc.outcome == 'success'
run: |
helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }}
- name: Dump gha-runner-scale-set-controller logs
if: always() && steps.install_arc_controller.outcome == 'success'
run: |
kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems
- name: Job summary
if: always() && steps.install_arc.outcome == 'success'
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Outcome** | ${{ steps.test.outcome }} |
|----------------|--------------------------------------------- |
| **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) |
EOF
github-token: ${{ steps.get_workflow_token.outputs.token }}
config-url: "https://github.com/actions-runner-controller/arc_e2e_test_dummy"
docker-image-repo: $IMAGE_REPO
docker-image-tag: $TAG

View File

@@ -20,4 +20,4 @@ jobs:
uses: golangci/golangci-lint-action@v3
with:
only-new-issues: true
version: v1.51.1
version: v1.49.0

View File

@@ -29,9 +29,6 @@ jobs:
release-controller:
name: Release
runs-on: ubuntu-latest
# gha-runner-scale-set has its own release workflow.
# We don't want to publish a new actions-runner-controller image
# we release gha-runner-scale-set.
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
steps:
- name: Checkout

View File

@@ -8,47 +8,35 @@ on:
- master
paths-ignore:
- '**.md'
- '.github/actions/**'
- '.github/ISSUE_TEMPLATE/**'
- '.github/workflows/e2e-test-dispatch-workflow.yaml'
- '.github/workflows/e2e-test-linux-vm.yaml'
- '.github/workflows/publish-arc.yaml'
- '.github/workflows/publish-chart.yaml'
- '.github/workflows/publish-runner-scale-set.yaml'
- '.github/workflows/release-runners.yaml'
- '.github/workflows/run-codeql.yaml'
- '.github/workflows/run-first-interaction.yaml'
- '.github/workflows/run-stale.yaml'
- '.github/workflows/update-runners.yaml'
- '.github/workflows/validate-arc.yaml'
- '.github/workflows/validate-chart.yaml'
- '.github/workflows/validate-gha-chart.yaml'
- '.github/workflows/validate-runners.yaml'
- '.github/dependabot.yml'
- '.github/RELEASE_NOTE_TEMPLATE.md'
- '.github/workflows/publish-chart.yaml'
- '.github/workflows/publish-arc.yaml'
- '.github/workflows/runners.yaml'
- '.github/workflows/validate-entrypoint.yaml'
- '.github/renovate.*'
- 'runner/**'
- '.gitignore'
- 'PROJECT'
- 'LICENSE'
- 'Makefile'
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
permissions:
contents: read
packages: write
env:
# Safeguard to prevent pushing images to registeries after build
PUSH_TO_REGISTRIES: true
TARGET_ORG: actions-runner-controller
TARGET_REPO: actions-runner-controller
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
permissions:
contents: read
jobs:
legacy-canary-build:
name: Build and Publish Legacy Canary Image
canary-build:
name: Build and Publish Canary Image
runs-on: ubuntu-latest
env:
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
TARGET_ORG: actions-runner-controller
TARGET_REPO: actions-runner-controller
steps:
- name: Checkout
uses: actions/checkout@v3
@@ -80,50 +68,3 @@ jobs:
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml)" >> $GITHUB_STEP_SUMMARY
canary-build:
name: Build and Publish gha-runner-scale-set-controller Canary Image
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# Normalization is needed because upper case characters are not allowed in the repository name
# and the short sha is needed for image tagging
- name: Resolve parameters
id: resolve_parameters
run: |
echo "INFO: Resolving short sha"
echo "short_sha=$(git rev-parse --short ${{ github.ref }})" >> $GITHUB_OUTPUT
echo "INFO: Normalizing repository name (lowercase)"
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
version: latest
# Unstable builds - run at your own risk
- name: Build and Push
uses: docker/build-push-action@v3
with:
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64
build-args: VERSION=canary-"${{ github.ref }}"
push: ${{ env.PUSH_TO_REGISTRIES }}
tags: |
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary-${{ steps.resolve_parameters.outputs.short_sha }}
cache-from: type=gha
cache-to: type=gha,mode=max

View File

@@ -77,7 +77,6 @@ jobs:
permissions:
pull-requests: write
contents: write
actions: write
env:
GH_TOKEN: ${{ github.token }}
CURRENT_VERSION: ${{ needs.check_versions.outputs.current_version }}

View File

@@ -5,7 +5,7 @@ else
endif
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
VERSION ?= dev
RUNNER_VERSION ?= 2.303.0
RUNNER_VERSION ?= 2.302.1
TARGETPLATFORM ?= $(shell arch)
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
RUNNER_TAG ?= ${VERSION}

View File

@@ -1,5 +1,4 @@
# ADR 2022-10-17: Produce the runner image for the scaleset client
# ADR 0001: Produce the runner image for the scaleset client
**Date**: 2022-10-17
**Status**: Done
@@ -8,7 +7,6 @@
We aim to provide an similar experience (as close as possible) between self-hosted and GitHub-hosted runners. To achieve this, we are making the following changes to align our self-hosted runner container image with the Ubuntu runners managed by GitHub.
Here are the changes:
- We created a USER `runner(1001)` and a GROUP `docker(123)`
- `sudo` has been on the image and the `runner` will be a passwordless sudoer.
- The runner binary was placed placed under `/home/runner/` and launched using `/home/runner/run.sh`
@@ -20,33 +18,31 @@ The latest Dockerfile can be found at: https://github.com/actions/runner/blob/ma
# Context
users can bring their own runner images, the contract we require is:
user can bring their own runner images, the contract we have are:
- It must have a runner binary under /actions-runner (/actions-runner/run.sh exists)
- The WORKDIR is set to /actions-runner
- If the user inside the container is root, the ENV RUNNER_ALLOW_RUNASROOT should be set to 1
- It must have a runner binary under `/actions-runner` i.e. `/actions-runner/run.sh` exists
- The `WORKDIR` is set to `/actions-runner`
- If the user inside the container is root, the environment variable `RUNNER_ALLOW_RUNASROOT` should be set to `1`
The existing ARC runner images will not work with the new ARC mode out-of-box for the following reason:
The existing [ARC runner images](https://github.com/orgs/actions-runner-controller/packages?tab=packages&q=actions-runner) will not work with the new ARC mode out-of-box for the following reason:
- The current runner image requires the caller to pass runner configuration info, ex: URL and Config Token
- The current runner image has the runner binary under `/runner` which violates the contract described above
- The current runner image requires caller to pass runner configure info, ex: URL and Config Token
- The current runner image has the runner binary under /runner
- The current runner image requires a special entrypoint script in order to work around some volume mount limitation for setting up DinD.
Since we expose the raw runner PodSpec to our end users, they can modify the helm `values.yaml` to adjust the runner container to their needs.
However, since we expose the raw runner Pod spec to our user, advanced user can modify the helm values.yaml to make everything lines up properly.
# Guiding Principles
- Build image is separated in two stages.
## The first stage (build)
- Reuses the same base image, so it is faster to build.
- Installs utilities needed to download assets (`runner` and `runner-container-hooks`).
- Installs utilities needed to download assets (runner and runner-container-hooks).
- Downloads the runner and stores it into `/actions-runner` directory.
- Downloads the runner-container-hooks and stores it into `/actions-runner/k8s` directory.
- You can use build arguments to control the runner version, the target platform and runner container hooks version.
Preview (the published runner image might vary):
Preview:
```Dockerfile
FROM mcr.microsoft.com/dotnet/runtime-deps:6.0 as build
@@ -68,7 +64,6 @@ RUN curl -f -L -o runner-container-hooks.zip https://github.com/actions/runner-c
```
## The main image:
- Copies assets from the build stage to `/actions-runner`
- Does not provide an entrypoint. The entrypoint should be set within the container definition.
@@ -82,7 +77,6 @@ COPY --from=build /actions-runner .
```
## Example of pod spec with the init container copying assets
```yaml
apiVersion: v1
kind: Pod
@@ -90,20 +84,20 @@ metadata:
name: <name>
spec:
containers:
- name: runner
image: <image>
command: ["/runner/run.sh"]
volumeMounts:
- name: runner
image: <image>
command: ["/runner/run.sh"]
volumeMounts:
- name: runner
mountPath: /runner
mountPath: /runner
initContainers:
- name: setup
image: <image>
command: ["sh", "-c", "cp -r /actions-runner/* /runner/"]
volumeMounts:
- name: runner
mountPath: /runner
volumes:
- name: setup
image: <image>
command: ["sh", "-c", "cp -r /actions-runner/* /runner/"]
volumeMounts:
- name: runner
emptyDir: {}
mountPath: /runner
volumes:
- name: runner
emptyDir: {}
```

View File

@@ -1,4 +1,4 @@
# ADR 2022-10-27: Lifetime of RunnerScaleSet on Service
# ADR 0003: Lifetime of RunnerScaleSet on Service
**Date**: 2022-10-27
@@ -12,9 +12,8 @@ The `RunnerScaleSet` object will represent a set of homogeneous self-hosted runn
A `RunnerScaleSet` client (ARC) needs to communicate with the Actions service via HTTP long-poll in a certain protocol to get a workflow job successfully landed on one of its homogeneous self-hosted runners.
In this ADR, we discuss the following within the context of actions-runner-controller's new scaling mode:
- Who and how to create a RunnerScaleSet on the service?
In this ADR, I want to discuss the following within the context of actions-runner-controller's new scaling mode:
- Who and how to create a RunnerScaleSet on the service?
- Who and how to delete a RunnerScaleSet on the service?
- What will happen to all the runners and jobs when the deletion happens?
@@ -31,19 +30,18 @@ In this ADR, we discuss the following within the context of actions-runner-contr
- When the user patch existing `AutoScalingRunnerSet`'s RunnerScaleSet related properly, ex: `runnerGroupName`, `runnerWorkDir`, the controller needs to make an HTTP PATCH call to the `_apis/runtime/runnerscalesets/2` endpoint in order to update the object on the service.
- We will put the deployed `AutoScalingRunnerSet` resource in an error state when the user tries to patch the resource with a different `githubConfigUrl`
> Basically, you can't move a deployed `AutoScalingRunnerSet` across GitHub entity, repoA->repoB, repoA->OrgC, etc.
> We evaluated blocking the change before instead of erroring at runtime and that we decided not to go down this route because it forces us to re-introduce admission webhooks (require cert-manager).
> Basically, you can't move a deployed `AutoScalingRunnerSet` across GitHub entity, repoA->repoB, repoA->OrgC, etc.
> We evaluated blocking the change before instead of erroring at runtime and that we decided not to go down this route because it forces us to re-introduce admission webhooks (require cert-manager).
## RunnerScaleSet deletion
- `AutoScalingRunnerSet` custom resource controller will delete the `RunnerScaleSet` object in the Actions service on any `AutoScalingRunnerSet` resource deletion.
> `AutoScalingRunnerSet` deletion will contain several steps:
>
> - Stop the listener app so no more new jobs coming and no more scaling up/down.
> - Request scale down to 0
> - Force stop all runners
> - Wait for the scale down to 0
> - Delete the `RunnerScaleSet` object from service via REST API
> `AutoScalingRunnerSet` deletion will contain several steps:
> - Stop the listener app so no more new jobs coming and no more scaling up/down.
> - Request scale down to 0
> - Force stop all runners
> - Wait for the scale down to 0
> - Delete the `RunnerScaleSet` object from service via REST API
- The deletion is via REST API on Actions service `DELETE _apis/runtime/runnerscalesets/1`
- The deletion needs to use the runner registration token (admin).

View File

@@ -1,5 +1,4 @@
# ADR 2022-11-04: Technical detail about actions-runner-controller repository transfer
# ADR 0004: Technical detail about actions-runner-controller repository transfer
**Date**: 2022-11-04
**Status**: Done
@@ -9,18 +8,17 @@
As part of ARC Private Beta: Repository Migration & Open Sourcing Process, we have decided to transfer the current [actions-runner-controller repository](https://github.com/actions-runner-controller/actions-runner-controller) into the [Actions org](https://github.com/actions).
**Goals:**
- A clear signal that GitHub will start taking over ARC and provide support.
- Since we are going to deprecate the existing auto-scale mode in ARC at some point, we want to have a clear separation between the legacy mode (not supported) and the new mode (supported).
- Avoid disrupting users as much as we can, existing ARC users will not notice any difference after the repository transfer, they can keep upgrading to the newer version of ARC and keep using the legacy mode.
- Avoid disrupting users as much as we can, existing ARC users will not notice any difference after the repository transfer, they can keep upgrading to the newer version of ARC and keep using the legacy mode.
**Challenges**
- The original creator's name (`summerwind`) is all over the place, including some critical parts of ARC:
- The k8s user resource API's full name is `actions.summerwind.dev/v1alpha1/RunnerDeployment`, renaming it to `actions.github.com` is a breaking change and will force the user to rebuild their entire k8s cluster.
- All docker images around ARC (controller + default runner) is published to [dockerhub/summerwind](https://hub.docker.com/u/summerwind)
- The k8s user resource API's full name is `actions.summerwind.dev/v1alpha1/RunnerDeployment`, renaming it to `actions.github.com` is a breaking change and will force the user to rebuild their entire k8s cluster.
- All docker images around ARC (controller + default runner) is published to [dockerhub/summerwind](https://hub.docker.com/u/summerwind)
- The helm chart for ARC is currently hosted on [GitHub pages](https://actions-runner-controller.github.io/actions-runner-controller) for https://github.com/actions-runner-controller/actions-runner-controller, moving the repository means we will break users who install ARC via the helm chart
# Decisions
## APIs group names for k8s custom resources, `actions.summerwind` or `actions.github`
@@ -29,9 +27,8 @@ As part of ARC Private Beta: Repository Migration & Open Sourcing Process, we ha
- For any new resource API we are going to add, those will be named properly under GitHub, ex: `actions.github.com/v1alpha1/AutoScalingRunnerSet`
Benefits:
- A clear separation from existing ARC:
- Easy for the support engineer to triage income tickets and figure out whether we need to support the use case from the user
- Easy for the support engineer to triage income tickets and figure out whether we need to support the use case from the user
- We won't break existing users when they upgrade to a newer version of ARC after the repository transfer
Based on the spike done by `@nikola-jokic`, we have confidence that we can host multiple resources with different API names under the same repository, and the published ARC controller can handle both resources properly.

View File

@@ -1,8 +1,8 @@
# ADR 2022-12-05: Adding labels to our resources
# ADR 0007: Adding labels to our resources
**Date**: 2022-12-05
**Status**: Superceded [^1]
**Status**: Done
## Context
@@ -20,15 +20,12 @@ Assuming standard logging that would allow us to get all ARC logs by running
```bash
kubectl logs -l 'app.kubernetes.io/part-of=actions-runner-controller'
```
which would be very useful for development to begin with.
The proposal is to add these sets of labels to the pods ARC creates:
#### controller-manager
Labels to be set by the Helm chart:
```yaml
metadata:
labels:
@@ -38,9 +35,7 @@ metadata:
```
#### Listener
Labels to be set by controller at creation:
```yaml
metadata:
labels:
@@ -48,7 +43,7 @@ metadata:
app.kubernetes.io/component: runner-scale-set-listener
app.kubernetes.io/version: "x.x.x"
actions.github.com/scale-set-name: scale-set-name # this corresponds to metadata.name as set for AutoscalingRunnerSet
# the following labels are to be extracted by the config URL
actions.github.com/enterprise: enterprise
actions.github.com/organization: organization
@@ -56,9 +51,7 @@ metadata:
```
#### Runner
Labels to be set by controller at creation:
```yaml
metadata:
labels:
@@ -85,5 +78,3 @@ Or for example if they're having problems specifically with runners:
This way users don't have to understand ARC moving parts but we still have a
way to target them specifically if we need to.
[^1]: Superseded by [ADR 2023-04-14](2023-04-14-adding-labels-k8s-resources.md)

View File

@@ -1,5 +1,4 @@
# ADR 2022-12-27: Pick the right runner to scale down
# ADR 0008: Pick the right runner to scale down
**Date**: 2022-12-27
**Status**: Done
@@ -8,37 +7,35 @@
- A custom resource `EphemeralRunnerSet` manage a set of custom resource `EphemeralRunners`
- The `EphemeralRunnerSet` has `Replicas` in its `Spec`, and the responsibility of the `EphemeralRunnerSet_controller` is to reconcile a given `EphemeralRunnerSet` to have
the same amount of `EphemeralRunners` as the `Spec.Replicas` defined.
- This means the `EphemeralRunnerSet_controller` will scale up the `EphemeralRunnerSet` by creating more `EphemeralRunner` in the case of the `Spec.Replicas` is higher than
the current amount of `EphemeralRunners`.
- This also means the `EphemeralRunnerSet_controller` will scale down the `EphemeralRunnerSet` by finding some existing `EphemeralRunner` to delete in the case of
the same amount of `EphemeralRunners` as the `Spec.Replicas` defined.
- This means the `EphemeralRunnerSet_controller` will scale up the `EphemeralRunnerSet` by creating more `EphemeralRunner` in the case of the `Spec.Replicas` is higher than
the current amount of `EphemeralRunners`.
- This also means the `EphemeralRunnerSet_controller` will scale down the `EphemeralRunnerSet` by finding some existing `EphemeralRunner` to delete in the case of
the `Spec.Replicas` is less than the current amount of `EphemeralRunners`.
This ADR is about how can we find the right existing `EphemeralRunner` to delete when we need to scale down.
## Current approach
This ADR is about how can we find the right existing `EphemeralRunner` to delete when we need to scale down.
## Current approach
1. `EphemeralRunnerSet_controller` figure out how many `EphemeralRunner` it needs to delete, ex: need to scale down from 10 to 2 means we need to delete 8 `EphemeralRunner`
2. `EphemeralRunnerSet_controller` find all `EphemeralRunner` that is in the `Running` or `Pending` phase.
> `Pending` means the `EphemeralRunner` is still probably creating and a runner has not yet configured with the Actions service.
> `Running` means the `EphemeralRunner` is created and a runner has probably configured with Actions service, the runner may sit there idle,
> or maybe actively running a workflow job. We don't have a clear answer for it from the ARC side. (Actions service knows it for sure)
> `Pending` means the `EphemeralRunner` is still probably creating and a runner has not yet configured with the Actions service.
> `Running` means the `EphemeralRunner` is created and a runner has probably configured with Actions service, the runner may sit there idle,
> or maybe actively running a workflow job. We don't have a clear answer for it from the ARC side. (Actions service knows it for sure)
3. `EphemeralRunnerSet_controller` make an HTTP DELETE request to the Actions service for each `EphemeralRunner` from the previous step and ask the Actions service to delete the runner via `RunnerId`.
(The `RunnerId` is generated after the runner registered with the Actions service, and stored on the `EphemeralRunner.Status.RunnerId`)
(The `RunnerId` is generated after the runner registered with the Actions service, and stored on the `EphemeralRunner.Status.RunnerId`)
> - The HTTP DELETE request looks like the following:
> `DELETE https://pipelines.actions.githubusercontent.com/WoxlUxJHrKEzIp4Nz3YmrmLlZBonrmj9xCJ1lrzcJ9ZsD1Tnw7/_apis/distributedtask/pools/0/agents/1024`
> The Actions service will return 2 types of responses:
> 1. 204 (No Content): The runner with Id 1024 has been successfully removed from the service or the runner with Id 1024 doesn't exist.
> 2. 400 (Bad Request) with JSON body that contains an error message like `JobStillRunningException`: The service can't remove this runner at this point since it has been
> assigned to a job request, the client won't be able to remove the runner until the runner finishes its current assigned job request.
> - The HTTP DELETE request looks like the following:
> `DELETE https://pipelines.actions.githubusercontent.com/WoxlUxJHrKEzIp4Nz3YmrmLlZBonrmj9xCJ1lrzcJ9ZsD1Tnw7/_apis/distributedtask/pools/0/agents/1024`
> The Actions service will return 2 types of responses:
>
> 1. 204 (No Content): The runner with Id 1024 has been successfully removed from the service or the runner with Id 1024 doesn't exist.
> 2. 400 (Bad Request) with JSON body that contains an error message like `JobStillRunningException`: The service can't remove this runner at this point since it has been
> assigned to a job request, the client won't be able to remove the runner until the runner finishes its current assigned job request.
4. `EphemeralRunnerSet_controller` will ignore any deletion error from runners that are still running a job, and keep trying deletion until the amount of `204` equals the amount of
`EphemeralRunner` needs to delete.
4. `EphemeralRunnerSet_controller` will ignore any deletion error from runners that are still running a job, and keep trying deletion until the amount of `204` equals the amount of
`EphemeralRunner` needs to delete.
## The problem with the current approach
@@ -71,7 +68,6 @@ this would be a big `NO` from a security point of view since we may not trust th
The nature of the k8s controller-runtime means we might reconcile the resource base on stale cache data.
I think our goal for the solution should be:
- Reduce wasteful HTTP requests on a scale-down as much as we can.
- We can accept that we might make 1 or 2 wasteful requests to Actions service, but we can't accept making 5/10+ of them.
- See if we can meet feature parity with what the RunnerJobHook support with compromise any security concerns.
@@ -81,11 +77,9 @@ a simple thought is how about we somehow attach some info to the `EphemeralRunne
How about we send this info from the service to the auto-scaling-listener via the existing HTTP long-poll
and let the listener patch the `EphemeralRunner.Status` to indicate it's running a job?
> The listener is normally in a separate namespace with elevated permission and it's something we can trust.
Changes:
- Introduce a new message type `JobStarted` (in addition to the existing `JobAvailable/JobAssigned/JobCompleted`) on the service side, the message is sent when a runner of the `RunnerScaleSet` get assigned to a job,
`RequestId`, `RunnerId`, and `RunnerName` will be included in the message.
- Add `RequestId (int)` to `EphemeralRunner.Status`, this will indicate which job the runner is running.

View File

@@ -1,6 +1,4 @@
# ADR 2023-02-02: Automate updating runner version
**Date**: 2023-02-02
# Automate updating runner version
**Status**: Proposed
@@ -18,7 +16,6 @@ version is updated (and this is currently done manually).
We can have another workflow running on a cadence (hourly seems sensible) and checking for new runner
releases, creating a PR updating `RUNNER_VERSION` in:
- `.github/workflows/release-runners.yaml`
- `Makefile`
- `runner/Makefile`

View File

@@ -1,5 +1,4 @@
# ADR 2023-02-10: Limit Permissions for Service Accounts in Actions-Runner-Controller
# ADR 0007: Limit Permissions for Service Accounts in Actions-Runner-Controller
**Date**: 2023-02-10
**Status**: Pending
@@ -8,7 +7,7 @@
- `actions-runner-controller` is a Kubernetes CRD (with controller) built using https://github.com/kubernetes-sigs/controller-runtime
- [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) has a default cache based k8s API client.Reader to make query k8s API server more efficiency.
- [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) has a default cache based k8s API client.Reader to make query k8s API server more efficiency.
- The cache-based API client requires cluster scope `list` and `watch` permission for any resource the controller may query.
@@ -23,7 +22,6 @@ There are 3 service accounts involved for a working `AutoscalingRunnerSet` based
This should have the lowest privilege (not any `RoleBinding` nor `ClusterRoleBinding`) by default, in the case of `containerMode=kubernetes`, it will get certain write permission with `RoleBinding` to limit the permission to a single namespace.
> References:
>
> - ./charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml
> - ./charts/gha-runner-scale-set/templates/kube_mode_role.yaml
> - ./charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml
@@ -54,7 +52,7 @@ The current `ClusterRole` has the following permissions:
## Limit cluster role permission on Secrets
The cluster scope `List` `Secrets` permission might be a blocker for adopting `actions-runner-controller` for certain customers as they may have certain restriction in their cluster that simply doesn't allow any service account to have cluster scope `List Secrets` permission.
The cluster scope `List` `Secrets` permission might be a blocker for adopting `actions-runner-controller` for certain customers as they may have certain restriction in their cluster that simply doesn't allow any service account to have cluster scope `List Secrets` permission.
To help these customers and improve security for `actions-runner-controller` in general, we will try to limit the `ClusterRole` permission of the controller manager's service account down to the following:
@@ -81,10 +79,9 @@ The `Role` and `RoleBinding` creation will happen during the `helm install demo
During `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller`, we will store the controller's service account info as labels on the controller `Deployment`.
Ex:
```yaml
actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }}
actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }}
actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
```
Introduce a new `Role` per `AutoScalingRunnerSet` installation and `RoleBinding` the `Role` with the controller's `ServiceAccount` in the namespace that each `AutoScalingRunnerSet` deployed with the following permission.
@@ -105,9 +102,8 @@ The `gha-runner-scale-set` helm chart will use this service account to properly
The `gha-runner-scale-set` helm chart will also allow customers to explicitly provide the controller service account info, in case the `helm lookup` couldn't locate the right controller `Deployment`.
New sections in `values.yaml` of `gha-runner-scale-set`:
```yaml
## Optional controller service account that needs to have required Role and RoleBinding
## Optional controller service account that needs to have required Role and RoleBinding
## to operate this gha-runner-scale-set installation.
## The helm chart will try to find the controller deployment and its service account at installation time.
## In case the helm chart can't find the right service account, you can explicitly pass in the following value
@@ -133,6 +129,5 @@ You will deploy the `AutoScalingRunnerSet` with something like `helm install dem
In this mode, you will end up with a manager `Role` that has all Get/List/Create/Delete/Update/Patch/Watch permissions on resources we need, and a `RoleBinding` to bind the `Role` with the controller `ServiceAccount` in the watched single namespace and the controller namespace, ex: `test-namespace` and `arc-system` in the above example.
The downside of this mode:
- When you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other.
- You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster.
- You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster.

View File

@@ -1,89 +0,0 @@
# ADR 2023-04-14: Adding labels to our resources
**Date**: 2023-04-14
**Status**: Done [^1]
## Context
Users need to provide us with logs so that we can help support and troubleshoot their issues. We need a way for our users to filter and retrieve the logs we need.
## Proposal
A good start would be a catch-all label to get all logs that are
ARC-related: one of the [recommended labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/)
is `app.kubernetes.io/part-of` and we can set that for all ARC components
to be `actions-runner-controller`.
Assuming standard logging that would allow us to get all ARC logs by running
```bash
kubectl logs -l 'app.kubernetes.io/part-of=gha-runner-scale-set-controller'
```
which would be very useful for development to begin with.
The proposal is to add these sets of labels to the pods ARC creates:
#### controller-manager
Labels to be set by the Helm chart:
```yaml
metadata:
labels:
app.kubernetes.io/part-of: gha-runner-scale-set-controller
app.kubernetes.io/component: controller-manager
app.kubernetes.io/version: "x.x.x"
```
#### Listener
Labels to be set by controller at creation:
```yaml
metadata:
labels:
app.kubernetes.io/part-of: gha-runner-scale-set-controller
app.kubernetes.io/component: runner-scale-set-listener
app.kubernetes.io/version: "x.x.x"
actions.github.com/scale-set-name: scale-set-name # this corresponds to metadata.name as set for AutoscalingRunnerSet
# the following labels are to be extracted by the config URL
actions.github.com/enterprise: enterprise
actions.github.com/organization: organization
actions.github.com/repository: repository
```
#### Runner
Labels to be set by controller at creation:
```yaml
metadata:
labels:
app.kubernetes.io/part-of: gha-runner-scale-set-controller
app.kubernetes.io/component: runner
app.kubernetes.io/version: "x.x.x"
actions.github.com/scale-set-name: scale-set-name # this corresponds to metadata.name as set for AutoscalingRunnerSet
actions.github.com/runner-name: runner-name
actions.github.com/runner-group-name: runner-group-name
# the following labels are to be extracted by the config URL
actions.github.com/enterprise: enterprise
actions.github.com/organization: organization
actions.github.com/repository: repository
```
This would allow us to ask users:
> Can you please send us the logs coming from pods labelled 'app.kubernetes.io/part-of=gha-runner-scale-set-controller'?
Or for example if they're having problems specifically with runners:
> Can you please send us the logs coming from pods labelled 'app.kubernetes.io/component=runner'?
This way users don't have to understand ARC moving parts but we still have a
way to target them specifically if we need to.
[^1]: [ADR 2022-12-05](2022-12-05-adding-labels-k8s-resources.md)

View File

@@ -6,13 +6,13 @@
## Context
_What is the issue or background knowledge necessary for future readers
to understand why this ADR was written?_
*What is the issue or background knowledge necessary for future readers
to understand why this ADR was written?*
## Decision
_**What** is the change being proposed? **How** will it be implemented?_
**What** is the change being proposed? / **How** will it be implemented?*
## Consequences
_What becomes easier or more difficult to do because of this change?_
*What becomes easier or more difficult to do because of this change?*

View File

@@ -248,6 +248,7 @@ type AutoscalingRunnerSetStatus struct {
}
func (ars *AutoscalingRunnerSet) ListenerSpecHash() string {
type listenerSpec = AutoscalingRunnerSetSpec
arsSpec := ars.Spec.DeepCopy()
spec := arsSpec
return hash.ComputeTemplateHash(&spec)

View File

@@ -39,7 +39,7 @@ helm.sh/chart: {{ include "gha-runner-scale-set-controller.chart" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/part-of: gha-runner-scale-set-controller
app.kubernetes.io/part-of: {{ .Chart.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- range $k, $v := .Values.labels }}
{{ $k }}: {{ $v }}
@@ -59,41 +59,25 @@ Create the name of the service account to use
*/}}
{{- define "gha-runner-scale-set-controller.serviceAccountName" -}}
{{- if eq .Values.serviceAccount.name "default"}}
{{- fail "serviceAccount.name cannot be set to 'default'" }}
{{- fail "serviceAccount.name cannot be set to 'default'" }}
{{- end }}
{{- if .Values.serviceAccount.create }}
{{- default (include "gha-runner-scale-set-controller.fullname" .) .Values.serviceAccount.name }}
{{- default (include "gha-runner-scale-set-controller.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- if not .Values.serviceAccount.name }}
{{- fail "serviceAccount.name must be set if serviceAccount.create is false" }}
{{- else }}
{{- .Values.serviceAccount.name }}
{{- end }}
{{- if not .Values.serviceAccount.name }}
{{- fail "serviceAccount.name must be set if serviceAccount.create is false" }}
{{- else }}
{{- .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{- end }}
{{- define "gha-runner-scale-set-controller.managerClusterRoleName" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-cluster-role
{{- define "gha-runner-scale-set-controller.managerRoleName" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-role
{{- end }}
{{- define "gha-runner-scale-set-controller.managerClusterRoleBinding" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-cluster-rolebinding
{{- end }}
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-role
{{- end }}
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-rolebinding
{{- end }}
{{- define "gha-runner-scale-set-controller.managerListenerRoleName" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-role
{{- end }}
{{- define "gha-runner-scale-set-controller.managerListenerRoleBinding" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-rolebinding
{{- define "gha-runner-scale-set-controller.managerRoleBinding" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-rolebinding
{{- end }}
{{- define "gha-runner-scale-set-controller.leaderElectionRoleName" -}}
@@ -107,7 +91,7 @@ Create the name of the service account to use
{{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}}
{{- $names := list }}
{{- range $k, $v := . }}
{{- $names = append $names $v.name }}
{{- $names = append $names $v.name }}
{{- end }}
{{- $names | join ","}}
{{- end }}
{{- end }}

View File

@@ -5,11 +5,6 @@ metadata:
namespace: {{ .Release.Namespace }}
labels:
{{- include "gha-runner-scale-set-controller.labels" . | nindent 4 }}
actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }}
actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
{{- if .Values.flags.watchSingleNamespace }}
actions.github.com/controller-watch-single-namespace: {{ .Values.flags.watchSingleNamespace }}
{{- end }}
spec:
replicas: {{ default 1 .Values.replicaCount }}
selector:
@@ -23,7 +18,7 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
app.kubernetes.io/part-of: gha-runner-scale-set-controller
app.kubernetes.io/part-of: actions-runner-controller
app.kubernetes.io/component: controller-manager
app.kubernetes.io/version: {{ .Chart.Version }}
{{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }}
@@ -56,9 +51,6 @@ spec:
{{- with .Values.flags.logLevel }}
- "--log-level={{ . }}"
{{- end }}
{{- with .Values.flags.watchSingleNamespace }}
- "--watch-single-namespace={{ . }}"
{{- end }}
command:
- "/manager"
env:

View File

@@ -1,4 +1,4 @@
{{- if gt (int (default 1 .Values.replicaCount)) 1 }}
{{- if gt (int (default 1 .Values.replicaCount)) 1 -}}
# permissions to do leader election.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role

View File

@@ -1,4 +1,4 @@
{{- if gt (int (default 1 .Values.replicaCount)) 1 }}
{{- if gt (int (default 1 .Values.replicaCount)) 1 -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:

View File

@@ -1,14 +0,0 @@
{{- if empty .Values.flags.watchSingleNamespace }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "gha-runner-scale-set-controller.managerClusterRoleBinding" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "gha-runner-scale-set-controller.managerClusterRoleName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,40 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "gha-runner-scale-set-controller.managerListenerRoleName" . }}
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- apiGroups:
- ""
resources:
- pods/status
verbs:
- get
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- get
- patch
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- patch
- update

View File

@@ -1,8 +1,7 @@
{{- if empty .Values.flags.watchSingleNamespace }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "gha-runner-scale-set-controller.managerClusterRoleName" . }}
name: {{ include "gha-runner-scale-set-controller.managerRoleName" . }}
rules:
- apiGroups:
- actions.github.com
@@ -21,7 +20,6 @@ rules:
resources:
- autoscalingrunnersets/finalizers
verbs:
- patch
- update
- apiGroups:
- actions.github.com
@@ -56,7 +54,6 @@ rules:
resources:
- autoscalinglisteners/finalizers
verbs:
- patch
- update
- apiGroups:
- actions.github.com
@@ -95,8 +92,13 @@ rules:
resources:
- ephemeralrunners/finalizers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.github.com
resources:
@@ -110,12 +112,44 @@ rules:
resources:
- pods
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods/status
verbs:
- get
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
- configmaps
verbs:
- list
- watch
@@ -124,6 +158,10 @@ rules:
resources:
- rolebindings
verbs:
- create
- delete
- get
- update
- list
- watch
- apiGroups:
@@ -131,6 +169,9 @@ rules:
resources:
- roles
verbs:
- create
- delete
- get
- update
- list
- watch
{{- end }}

View File

@@ -1,12 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
kind: ClusterRoleBinding
metadata:
name: {{ include "gha-runner-scale-set-controller.managerListenerRoleBinding" . }}
namespace: {{ .Release.Namespace }}
name: {{ include "gha-runner-scale-set-controller.managerRoleBinding" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "gha-runner-scale-set-controller.managerListenerRoleName" . }}
kind: ClusterRole
name: {{ include "gha-runner-scale-set-controller.managerRoleName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}

View File

@@ -1,84 +0,0 @@
{{- if .Values.flags.watchSingleNamespace }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- actions.github.com
resources:
- autoscalinglisteners
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.github.com
resources:
- autoscalinglisteners/status
verbs:
- get
- patch
- update
- apiGroups:
- actions.github.com
resources:
- autoscalinglisteners/finalizers
verbs:
- patch
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
verbs:
- list
- watch
- apiGroups:
- actions.github.com
resources:
- autoscalingrunnersets
verbs:
- list
- watch
- apiGroups:
- actions.github.com
resources:
- ephemeralrunnersets
verbs:
- list
- watch
- apiGroups:
- actions.github.com
resources:
- ephemeralrunners
verbs:
- list
- watch
{{- end }}

View File

@@ -1,15 +0,0 @@
{{- if .Values.flags.watchSingleNamespace }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" . }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,117 +0,0 @@
{{- if .Values.flags.watchSingleNamespace }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
namespace: {{ .Values.flags.watchSingleNamespace }}
rules:
- apiGroups:
- actions.github.com
resources:
- autoscalingrunnersets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.github.com
resources:
- autoscalingrunnersets/finalizers
verbs:
- patch
- update
- apiGroups:
- actions.github.com
resources:
- autoscalingrunnersets/status
verbs:
- get
- patch
- update
- apiGroups:
- actions.github.com
resources:
- ephemeralrunnersets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.github.com
resources:
- ephemeralrunnersets/status
verbs:
- get
- patch
- update
- apiGroups:
- actions.github.com
resources:
- ephemeralrunners
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.github.com
resources:
- ephemeralrunners/finalizers
verbs:
- patch
- update
- apiGroups:
- actions.github.com
resources:
- ephemeralrunners/status
verbs:
- get
- patch
- update
- apiGroups:
- actions.github.com
resources:
- autoscalinglisteners
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
verbs:
- list
- watch
{{- end }}

View File

@@ -1,15 +0,0 @@
{{- if .Values.flags.watchSingleNamespace }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" . }}
namespace: {{ .Values.flags.watchSingleNamespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,4 +1,4 @@
{{- if .Values.serviceAccount.create }}
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:

View File

@@ -147,7 +147,7 @@ func TestTemplate_NotCreateServiceAccount_ServiceAccountNotSet(t *testing.T) {
assert.ErrorContains(t, err, "serviceAccount.name must be set if serviceAccount.create is false", "We should get an error because the default service account cannot be used")
}
func TestTemplate_CreateManagerClusterRole(t *testing.T) {
func TestTemplate_CreateManagerRole(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
@@ -162,23 +162,17 @@ func TestTemplate_CreateManagerClusterRole(t *testing.T) {
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role.yaml"})
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"})
var managerClusterRole rbacv1.ClusterRole
helm.UnmarshalK8SYaml(t, output, &managerClusterRole)
var managerRole rbacv1.ClusterRole
helm.UnmarshalK8SYaml(t, output, &managerRole)
assert.Empty(t, managerClusterRole.Namespace, "ClusterRole should not have a namespace")
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRole.Name)
assert.Equal(t, 15, len(managerClusterRole.Rules))
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"})
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_controller_role.yaml in chart", "We should get an error because the template should be skipped")
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role.yaml"})
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_watch_role.yaml in chart", "We should get an error because the template should be skipped")
assert.Empty(t, managerRole.Namespace, "ClusterRole should not have a namespace")
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-role", managerRole.Name)
assert.Equal(t, 18, len(managerRole.Rules))
}
func TestTemplate_ManagerClusterRoleBinding(t *testing.T) {
func TestTemplate_ManagerRoleBinding(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
@@ -195,80 +189,16 @@ func TestTemplate_ManagerClusterRoleBinding(t *testing.T) {
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role_binding.yaml"})
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role_binding.yaml"})
var managerClusterRoleBinding rbacv1.ClusterRoleBinding
helm.UnmarshalK8SYaml(t, output, &managerClusterRoleBinding)
var managerRoleBinding rbacv1.ClusterRoleBinding
helm.UnmarshalK8SYaml(t, output, &managerRoleBinding)
assert.Empty(t, managerClusterRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace")
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-rolebinding", managerClusterRoleBinding.Name)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRoleBinding.RoleRef.Name)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerClusterRoleBinding.Subjects[0].Name)
assert.Equal(t, namespaceName, managerClusterRoleBinding.Subjects[0].Namespace)
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role_binding.yaml"})
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_controller_role_binding.yaml in chart", "We should get an error because the template should be skipped")
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role_binding.yaml"})
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_watch_role_binding.yaml in chart", "We should get an error because the template should be skipped")
}
func TestTemplate_CreateManagerListenerRole(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_listener_role.yaml"})
var managerListenerRole rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &managerListenerRole)
assert.Equal(t, namespaceName, managerListenerRole.Namespace, "Role should have a namespace")
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-role", managerListenerRole.Name)
assert.Equal(t, 4, len(managerListenerRole.Rules))
assert.Equal(t, "pods", managerListenerRole.Rules[0].Resources[0])
assert.Equal(t, "pods/status", managerListenerRole.Rules[1].Resources[0])
assert.Equal(t, "secrets", managerListenerRole.Rules[2].Resources[0])
assert.Equal(t, "serviceaccounts", managerListenerRole.Rules[3].Resources[0])
}
func TestTemplate_ManagerListenerRoleBinding(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"serviceAccount.create": "true",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_listener_role_binding.yaml"})
var managerListenerRoleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &managerListenerRoleBinding)
assert.Equal(t, namespaceName, managerListenerRoleBinding.Namespace, "RoleBinding should have a namespace")
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-rolebinding", managerListenerRoleBinding.Name)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-role", managerListenerRoleBinding.RoleRef.Name)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerListenerRoleBinding.Subjects[0].Name)
assert.Equal(t, namespaceName, managerListenerRoleBinding.Subjects[0].Namespace)
assert.Empty(t, managerRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace")
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-rolebinding", managerRoleBinding.Name)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-role", managerRoleBinding.RoleRef.Name)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerRoleBinding.Subjects[0].Name)
assert.Equal(t, namespaceName, managerRoleBinding.Subjects[0].Namespace)
}
func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
@@ -307,10 +237,6 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"])
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"])
assert.NotContains(t, deployment.Labels, "actions.github.com/controller-watch-single-namespace")
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/part-of"])
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
@@ -417,7 +343,6 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/part-of"])
assert.Equal(t, "bar", deployment.Labels["foo"])
assert.Equal(t, "actions", deployment.Labels["github"])
@@ -610,215 +535,3 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub,ghcr", deployment.Spec.Template.Spec.Containers[0].Args[1])
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
}
func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml"))
require.NoError(t, err)
chart := new(Chart)
err = yaml.Unmarshal(chartContent, chart)
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"image.tag": "dev",
"flags.watchSingleNamespace": "demo",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(t, output, &deployment)
assert.Equal(t, namespaceName, deployment.Namespace)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name)
assert.Equal(t, "gha-runner-scale-set-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"])
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"])
assert.Equal(t, "demo", deployment.Labels["actions.github.com/controller-watch-single-namespace"])
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.ServiceAccountName)
assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext)
assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName)
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
assert.Len(t, deployment.Spec.Template.Spec.Volumes, 1)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name)
assert.NotNil(t, 10, deployment.Spec.Template.Spec.Volumes[0].EmptyDir)
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 0)
assert.Nil(t, deployment.Spec.Template.Spec.Affinity)
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0)
managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev"
assert.Len(t, deployment.Spec.Template.Spec.Containers, 1)
assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name)
assert.Equal(t, "ghcr.io/actions/gha-runner-scale-set-controller:dev", deployment.Spec.Template.Spec.Containers[0].Image)
assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3)
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
assert.Equal(t, "--watch-single-namespace=demo", deployment.Spec.Template.Spec.Containers[0].Args[2])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
}
func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"flags.watchSingleNamespace": "demo",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role.yaml"})
assert.ErrorContains(t, err, "could not find template templates/manager_cluster_role.yaml in chart", "We should get an error because the template should be skipped")
}
func TestTemplate_WatchSingleNamespace_NotManagerClusterRoleBinding(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"serviceAccount.create": "true",
"flags.watchSingleNamespace": "demo",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role_binding.yaml"})
assert.ErrorContains(t, err, "could not find template templates/manager_cluster_role_binding.yaml in chart", "We should get an error because the template should be skipped")
}
func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"flags.watchSingleNamespace": "demo",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"})
var managerSingleNamespaceControllerRole rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRole)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceControllerRole.Name)
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRole.Namespace)
assert.Equal(t, 10, len(managerSingleNamespaceControllerRole.Rules))
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role.yaml"})
var managerSingleNamespaceWatchRole rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRole)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRole.Name)
assert.Equal(t, "demo", managerSingleNamespaceWatchRole.Namespace)
assert.Equal(t, 13, len(managerSingleNamespaceWatchRole.Rules))
}
func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"flags.watchSingleNamespace": "demo",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role_binding.yaml"})
var managerSingleNamespaceControllerRoleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRoleBinding)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-rolebinding", managerSingleNamespaceControllerRoleBinding.Name)
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Namespace)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceControllerRoleBinding.RoleRef.Name)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerSingleNamespaceControllerRoleBinding.Subjects[0].Name)
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Subjects[0].Namespace)
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role_binding.yaml"})
var managerSingleNamespaceWatchRoleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRoleBinding)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-rolebinding", managerSingleNamespaceWatchRoleBinding.Name)
assert.Equal(t, "demo", managerSingleNamespaceWatchRoleBinding.Namespace)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRoleBinding.RoleRef.Name)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerSingleNamespaceWatchRoleBinding.Subjects[0].Name)
assert.Equal(t, namespaceName, managerSingleNamespaceWatchRoleBinding.Subjects[0].Namespace)
}

View File

@@ -68,7 +68,3 @@ flags:
# Log level can be set here with one of the following values: "debug", "info", "warn", "error".
# Defaults to "debug".
logLevel: "debug"
# Restricts the controller to only watch resources in the desired namespace.
# Defaults to watch all namespaces when unset.
# watchSingleNamespace: ""

View File

@@ -75,15 +75,19 @@ app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- define "gha-runner-scale-set.dind-init-container" -}}
{{- range $i, $val := .Values.template.spec.containers }}
{{- if eq $val.name "runner" }}
{{- range $i, $val := .Values.template.spec.containers -}}
{{- if eq $val.name "runner" -}}
image: {{ $val.image }}
{{- if $val.imagePullSecrets }}
imagePullSecrets:
{{ $val.imagePullSecrets | toYaml -}}
{{- end }}
command: ["cp"]
args: ["-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"]
volumeMounts:
- name: dind-externals
mountPath: /home/runner/tmpDir
{{- end }}
{{- end }}
{{- end }}
{{- end }}
@@ -120,7 +124,7 @@ volumeMounts:
{{- $createWorkVolume := 1 }}
{{- range $i, $volume := .Values.template.spec.volumes }}
{{- if eq $volume.name "work" }}
{{- $createWorkVolume = 0 }}
{{- $createWorkVolume = 0 -}}
- {{ $volume | toYaml | nindent 2 }}
{{- end }}
{{- end }}
@@ -134,7 +138,7 @@ volumeMounts:
{{- $createWorkVolume := 1 }}
{{- range $i, $volume := .Values.template.spec.volumes }}
{{- if eq $volume.name "work" }}
{{- $createWorkVolume = 0 }}
{{- $createWorkVolume = 0 -}}
- {{ $volume | toYaml | nindent 2 }}
{{- end }}
{{- end }}
@@ -156,28 +160,25 @@ volumeMounts:
{{- end }}
{{- define "gha-runner-scale-set.non-runner-containers" -}}
{{- range $i, $container := .Values.template.spec.containers }}
{{- if ne $container.name "runner" }}
- {{ $container | toYaml | nindent 2 }}
{{- end }}
{{- end }}
{{- end }}
{{- define "gha-runner-scale-set.non-runner-non-dind-containers" -}}
{{- range $i, $container := .Values.template.spec.containers }}
{{- if and (ne $container.name "runner") (ne $container.name "dind") }}
- {{ $container | toYaml | nindent 2 }}
{{- range $i, $container := .Values.template.spec.containers -}}
{{- if ne $container.name "runner" -}}
- name: {{ $container.name }}
{{- range $key, $val := $container }}
{{- if ne $key "name" }}
{{ $key }}: {{ $val }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- define "gha-runner-scale-set.dind-runner-container" -}}
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
{{- range $i, $container := .Values.template.spec.containers }}
{{- if eq $container.name "runner" }}
{{- range $i, $container := .Values.template.spec.containers -}}
{{- if eq $container.name "runner" -}}
{{- range $key, $val := $container }}
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
{{ $key }}: {{ $val | toYaml | nindent 2 }}
{{ $key }}: {{ $val }}
{{- end }}
{{- end }}
{{- $setDockerHost := 1 }}
@@ -194,24 +195,29 @@ env:
{{- with $container.env }}
{{- range $i, $env := . }}
{{- if eq $env.name "DOCKER_HOST" }}
{{- $setDockerHost = 0 }}
{{- $setDockerHost = 0 -}}
{{- end }}
{{- if eq $env.name "DOCKER_TLS_VERIFY" }}
{{- $setDockerTlsVerify = 0 }}
{{- $setDockerTlsVerify = 0 -}}
{{- end }}
{{- if eq $env.name "DOCKER_CERT_PATH" }}
{{- $setDockerCertPath = 0 }}
{{- $setDockerCertPath = 0 -}}
{{- end }}
{{- if eq $env.name "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS" }}
{{- $setRunnerWaitDocker = 0 }}
{{- $setRunnerWaitDocker = 0 -}}
{{- end }}
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
{{- $setNodeExtraCaCerts = 0 }}
{{- $setNodeExtraCaCerts = 0 -}}
{{- end }}
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
{{- $setRunnerUpdateCaCerts = 0 }}
{{- $setRunnerUpdateCaCerts = 0 -}}
{{- end }}
- name: {{ $env.name }}
{{- range $envKey, $envVal := $env }}
{{- if ne $envKey "name" }}
{{ $envKey }}: {{ $envVal | toYaml | nindent 8 }}
{{- end }}
{{- end }}
- {{ $env | toYaml | nindent 4 }}
{{- end }}
{{- end }}
{{- if $setDockerHost }}
@@ -248,15 +254,20 @@ volumeMounts:
{{- with $container.volumeMounts }}
{{- range $i, $volMount := . }}
{{- if eq $volMount.name "work" }}
{{- $mountWork = 0 }}
{{- $mountWork = 0 -}}
{{- end }}
{{- if eq $volMount.name "dind-cert" }}
{{- $mountDindCert = 0 }}
{{- $mountDindCert = 0 -}}
{{- end }}
{{- if eq $volMount.name "github-server-tls-cert" }}
{{- $mountGitHubServerTLS = 0 }}
{{- $mountGitHubServerTLS = 0 -}}
{{- end }}
- name: {{ $volMount.name }}
{{- range $mountKey, $mountVal := $volMount }}
{{- if ne $mountKey "name" }}
{{ $mountKey }}: {{ $mountVal | toYaml | nindent 8 }}
{{- end }}
{{- end }}
- {{ $volMount | toYaml | nindent 4 }}
{{- end }}
{{- end }}
{{- if $mountWork }}
@@ -279,11 +290,11 @@ volumeMounts:
{{- define "gha-runner-scale-set.kubernetes-mode-runner-container" -}}
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
{{- range $i, $container := .Values.template.spec.containers }}
{{- if eq $container.name "runner" }}
{{- range $i, $container := .Values.template.spec.containers -}}
{{- if eq $container.name "runner" -}}
{{- range $key, $val := $container }}
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
{{ $key }}: {{ $val | toYaml | nindent 2 }}
{{ $key }}: {{ $val }}
{{- end }}
{{- end }}
{{- $setContainerHooks := 1 }}
@@ -299,21 +310,26 @@ env:
{{- with $container.env }}
{{- range $i, $env := . }}
{{- if eq $env.name "ACTIONS_RUNNER_CONTAINER_HOOKS" }}
{{- $setContainerHooks = 0 }}
{{- $setContainerHooks = 0 -}}
{{- end }}
{{- if eq $env.name "ACTIONS_RUNNER_POD_NAME" }}
{{- $setPodName = 0 }}
{{- $setPodName = 0 -}}
{{- end }}
{{- if eq $env.name "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER" }}
{{- $setRequireJobContainer = 0 }}
{{- $setRequireJobContainer = 0 -}}
{{- end }}
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
{{- $setNodeExtraCaCerts = 0 }}
{{- $setNodeExtraCaCerts = 0 -}}
{{- end }}
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
{{- $setRunnerUpdateCaCerts = 0 }}
{{- $setRunnerUpdateCaCerts = 0 -}}
{{- end }}
- name: {{ $env.name }}
{{- range $envKey, $envVal := $env }}
{{- if ne $envKey "name" }}
{{ $envKey }}: {{ $envVal | toYaml | nindent 8 }}
{{- end }}
{{- end }}
- {{ $env | toYaml | nindent 4 }}
{{- end }}
{{- end }}
{{- if $setContainerHooks }}
@@ -347,12 +363,17 @@ volumeMounts:
{{- with $container.volumeMounts }}
{{- range $i, $volMount := . }}
{{- if eq $volMount.name "work" }}
{{- $mountWork = 0 }}
{{- $mountWork = 0 -}}
{{- end }}
{{- if eq $volMount.name "github-server-tls-cert" }}
{{- $mountGitHubServerTLS = 0 }}
{{- $mountGitHubServerTLS = 0 -}}
{{- end }}
- name: {{ $volMount.name }}
{{- range $mountKey, $mountVal := $volMount }}
{{- if ne $mountKey "name" }}
{{ $mountKey }}: {{ $mountVal | toYaml | nindent 8 }}
{{- end }}
{{- end }}
- {{ $volMount | toYaml | nindent 4 }}
{{- end }}
{{- end }}
{{- if $mountWork }}
@@ -370,14 +391,14 @@ volumeMounts:
{{- define "gha-runner-scale-set.default-mode-runner-containers" -}}
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
{{- range $i, $container := .Values.template.spec.containers }}
{{- if ne $container.name "runner" }}
{{- range $i, $container := .Values.template.spec.containers -}}
{{- if ne $container.name "runner" -}}
- {{ $container | toYaml | nindent 2 }}
{{- else }}
- name: {{ $container.name }}
{{- range $key, $val := $container }}
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
{{ $key }}: {{ $val | toYaml | nindent 4 }}
{{ $key }}: {{ $val }}
{{- end }}
{{- end }}
{{- $setNodeExtraCaCerts := 0 }}
@@ -390,12 +411,17 @@ volumeMounts:
{{- with $container.env }}
{{- range $i, $env := . }}
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
{{- $setNodeExtraCaCerts = 0 }}
{{- $setNodeExtraCaCerts = 0 -}}
{{- end }}
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
{{- $setRunnerUpdateCaCerts = 0 }}
{{- $setRunnerUpdateCaCerts = 0 -}}
{{- end }}
- name: {{ $env.name }}
{{- range $envKey, $envVal := $env }}
{{- if ne $envKey "name" }}
{{ $envKey }}: {{ $envVal | toYaml | nindent 10 }}
{{- end }}
{{- end }}
- {{ $env | toYaml | nindent 6 }}
{{- end }}
{{- end }}
{{- if $setNodeExtraCaCerts }}
@@ -414,9 +440,14 @@ volumeMounts:
{{- with $container.volumeMounts }}
{{- range $i, $volMount := . }}
{{- if eq $volMount.name "github-server-tls-cert" }}
{{- $mountGitHubServerTLS = 0 }}
{{- $mountGitHubServerTLS = 0 -}}
{{- end }}
- name: {{ $volMount.name }}
{{- range $mountKey, $mountVal := $volMount }}
{{- if ne $mountKey "name" }}
{{ $mountKey }}: {{ $mountVal | toYaml | nindent 10 }}
{{- end }}
{{- end }}
- {{ $volMount | toYaml | nindent 6 }}
{{- end }}
{{- end }}
{{- if $mountGitHubServerTLS }}
@@ -427,125 +458,3 @@ volumeMounts:
{{- end }}
{{- end }}
{{- end }}
{{- define "gha-runner-scale-set.managerRoleName" -}}
{{- include "gha-runner-scale-set.fullname" . }}-manager-role
{{- end }}
{{- define "gha-runner-scale-set.managerRoleBinding" -}}
{{- include "gha-runner-scale-set.fullname" . }}-manager-role-binding
{{- end }}
{{- define "gha-runner-scale-set.managerServiceAccountName" -}}
{{- $searchControllerDeployment := 1 }}
{{- if .Values.controllerServiceAccount }}
{{- if .Values.controllerServiceAccount.name }}
{{- $searchControllerDeployment = 0 }}
{{- .Values.controllerServiceAccount.name }}
{{- end }}
{{- end }}
{{- if eq $searchControllerDeployment 1 }}
{{- $multiNamespacesCounter := 0 }}
{{- $singleNamespaceCounter := 0 }}
{{- $controllerDeployment := dict }}
{{- $singleNamespaceControllerDeployments := dict }}
{{- $managerServiceAccountName := "" }}
{{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }}
{{- if kindIs "map" $deployment.metadata.labels }}
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-runner-scale-set-controller" }}
{{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }}
{{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }}
{{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}}
{{- else }}
{{- $multiNamespacesCounter = add $multiNamespacesCounter 1 }}
{{- $controllerDeployment = $deployment }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
{{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
{{- fail "Found both gha-runner-scale-set-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if gt $multiNamespacesCounter 1 }}
{{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if eq $multiNamespacesCounter 1 }}
{{- with $controllerDeployment.metadata }}
{{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }}
{{- end }}
{{- else if gt $singleNamespaceCounter 0 }}
{{- if hasKey $singleNamespaceControllerDeployments .Release.Namespace }}
{{- $controllerDeployment = get $singleNamespaceControllerDeployments .Release.Namespace }}
{{- with $controllerDeployment.metadata }}
{{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }}
{{- end }}
{{- else }}
{{- fail "No gha-runner-scale-set-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- end }}
{{- if eq $managerServiceAccountName "" }}
{{- fail "No service account name found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-name), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- $managerServiceAccountName }}
{{- end }}
{{- end }}
{{- define "gha-runner-scale-set.managerServiceAccountNamespace" -}}
{{- $searchControllerDeployment := 1 }}
{{- if .Values.controllerServiceAccount }}
{{- if .Values.controllerServiceAccount.namespace }}
{{- $searchControllerDeployment = 0 }}
{{- .Values.controllerServiceAccount.namespace }}
{{- end }}
{{- end }}
{{- if eq $searchControllerDeployment 1 }}
{{- $multiNamespacesCounter := 0 }}
{{- $singleNamespaceCounter := 0 }}
{{- $controllerDeployment := dict }}
{{- $singleNamespaceControllerDeployments := dict }}
{{- $managerServiceAccountNamespace := "" }}
{{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }}
{{- if kindIs "map" $deployment.metadata.labels }}
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-runner-scale-set-controller" }}
{{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }}
{{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }}
{{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}}
{{- else }}
{{- $multiNamespacesCounter = add $multiNamespacesCounter 1 }}
{{- $controllerDeployment = $deployment }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
{{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
{{- fail "Found both gha-runner-scale-set-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if gt $multiNamespacesCounter 1 }}
{{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if eq $multiNamespacesCounter 1 }}
{{- with $controllerDeployment.metadata }}
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
{{- end }}
{{- else if gt $singleNamespaceCounter 0 }}
{{- if hasKey $singleNamespaceControllerDeployments .Release.Namespace }}
{{- $controllerDeployment = get $singleNamespaceControllerDeployments .Release.Namespace }}
{{- with $controllerDeployment.metadata }}
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
{{- end }}
{{- else }}
{{- fail "No gha-runner-scale-set-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- end }}
{{- if eq $managerServiceAccountNamespace "" }}
{{- fail "No service account namespace found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- $managerServiceAccountNamespace }}
{{- end }}
{{- end }}

View File

@@ -36,21 +36,17 @@ spec:
{{- if .Values.proxy.http }}
http:
url: {{ .Values.proxy.http.url }}
{{- if .Values.proxy.http.credentialSecretRef }}
credentialSecretRef: {{ .Values.proxy.http.credentialSecretRef }}
{{- end }}
{{- end }}
{{ end }}
{{- if .Values.proxy.https }}
https:
url: {{ .Values.proxy.https.url }}
{{- if .Values.proxy.https.credentialSecretRef }}
credentialSecretRef: {{ .Values.proxy.https.credentialSecretRef }}
{{- end }}
{{- end }}
{{ end }}
{{- if and .Values.proxy.noProxy (kindIs "slice" .Values.proxy.noProxy) }}
noProxy: {{ .Values.proxy.noProxy | toYaml | nindent 6}}
{{- end }}
{{- end }}
{{ end }}
{{ end }}
{{- if and (or (kindIs "int64" .Values.minRunners) (kindIs "float64" .Values.minRunners)) (or (kindIs "int64" .Values.maxRunners) (kindIs "float64" .Values.maxRunners)) }}
{{- if gt .Values.minRunners .Values.maxRunners }}
@@ -111,7 +107,7 @@ spec:
{{- include "gha-runner-scale-set.dind-runner-container" . | nindent 8 }}
- name: dind
{{- include "gha-runner-scale-set.dind-container" . | nindent 8 }}
{{- include "gha-runner-scale-set.non-runner-non-dind-containers" . | nindent 6 }}
{{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }}
{{- else if eq .Values.containerMode.type "kubernetes" }}
- name: runner
{{- include "gha-runner-scale-set.kubernetes-mode-runner-container" . | nindent 8 }}

View File

@@ -1,59 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "gha-runner-scale-set.managerRoleName" . }}
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- apiGroups:
- ""
resources:
- pods/status
verbs:
- get
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- get
- list
- patch
- update
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- create
- delete
- get
- patch
- update
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
verbs:
- create
- delete
- get
- patch
- update
{{- if .Values.githubServerTLS }}
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
{{- end }}

View File

@@ -1,13 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "gha-runner-scale-set.managerRoleBinding" . }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "gha-runner-scale-set.managerRoleName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set.managerServiceAccountName" . | nindent 4 }}
namespace: {{ include "gha-runner-scale-set.managerServiceAccountNamespace" . | nindent 4 }}

View File

@@ -27,10 +27,8 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) {
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -62,8 +60,6 @@ func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) {
"githubConfigSecret.github_app_id": "10",
"githubConfigSecret.github_app_installation_id": "100",
"githubConfigSecret.github_app_private_key": "private_key",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -91,11 +87,9 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAuthInput(t *testing.T) {
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_app_id": "",
"githubConfigSecret.github_token": "",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_app_id": "",
"githubConfigSecret.github_token": "",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -118,10 +112,8 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAppInput(t *testing.T) {
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_app_id": "10",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_app_id": "10",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -144,10 +136,8 @@ func TestTemplateNotRenderedGitHubSecretWithPredefinedSecret(t *testing.T) {
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "pre-defined-secret",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "pre-defined-secret",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -168,10 +158,8 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -202,11 +190,9 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "kubernetes",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "kubernetes",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -262,11 +248,9 @@ func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"template.spec.serviceAccountName": "test-service-account",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"template.spec.serviceAccountName": "test-service-account",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -293,10 +277,8 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -340,11 +322,9 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) {
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"runnerScaleSetName": "test-runner-scale-set-name",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"runnerScaleSetName": "test-runner-scale-set-name",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -395,8 +375,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_ProvideMetadata(t *testing.T) {
"template.metadata.labels.test2": "test2",
"template.metadata.annotations.test3": "test3",
"template.metadata.annotations.test4": "test4",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -436,11 +414,9 @@ func TestTemplateRenderedAutoScalingRunnerSet_MaxRunnersValidationError(t *testi
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "-1",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "-1",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -463,12 +439,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinRunnersValidationError(t *testi
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "1",
"minRunners": "-1",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "1",
"minRunners": "-1",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -491,12 +465,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationError(t *te
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "0",
"minRunners": "1",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "0",
"minRunners": "1",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -519,12 +491,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationSameValue(t
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "0",
"minRunners": "0",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "0",
"minRunners": "0",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -550,11 +520,9 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMin(t
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"minRunners": "5",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"minRunners": "5",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -580,11 +548,9 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMax(t
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "5",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "5",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -639,10 +605,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraVolumes(t *testing.T) {
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -673,10 +635,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -709,10 +667,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_K8S_ExtraVolumes(t *testing.T) {
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -741,11 +695,9 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "dind",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "dind",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -832,11 +784,9 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T)
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "kubernetes",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "kubernetes",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -889,10 +839,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_UsePredefinedSecret(t *testing.T)
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "pre-defined-secrets",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "pre-defined-secrets",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -923,10 +871,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_ErrorOnEmptyPredefinedSecret(t *te
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -949,15 +895,13 @@ func TestTemplateRenderedWithProxy(t *testing.T) {
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "pre-defined-secrets",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"proxy.http.url": "http://proxy.example.com",
"proxy.http.credentialSecretRef": "http-secret",
"proxy.https.url": "https://proxy.example.com",
"proxy.https.credentialSecretRef": "https-secret",
"proxy.noProxy": "{example.com,example.org}",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "pre-defined-secrets",
"proxy.http.url": "http://proxy.example.com",
"proxy.http.credentialSecretRef": "http-secret",
"proxy.https.url": "https://proxy.example.com",
"proxy.https.credentialSecretRef": "https-secret",
"proxy.noProxy": "{example.com,example.org}",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -1017,8 +961,6 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"githubServerTLS.runnerMountPath": "/runner/mount/path",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -1076,8 +1018,6 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"githubServerTLS.runnerMountPath": "/runner/mount/path/",
"containerMode.type": "dind",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -1135,8 +1075,6 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"githubServerTLS.runnerMountPath": "/runner/mount/path",
"containerMode.type": "kubernetes",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -1194,8 +1132,6 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
"githubConfigSecret": "pre-defined-secrets",
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -1248,9 +1184,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
"githubConfigSecret": "pre-defined-secrets",
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"containerMode.type": "dind",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"containerMode.type": "dind",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -1303,9 +1237,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
"githubConfigSecret": "pre-defined-secrets",
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"containerMode.type": "kubernetes",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"containerMode.type": "kubernetes",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -1361,10 +1293,8 @@ func TestTemplateNamingConstraints(t *testing.T) {
require.NoError(t, err)
setValues := map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "",
}
tt := map[string]struct {
@@ -1409,10 +1339,8 @@ func TestTemplateRenderedGitHubConfigUrlEndsWIthSlash(t *testing.T) {
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions/",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubConfigUrl": "https://github.com/actions/",
"githubConfigSecret.github_token": "gh_token12345",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -1426,265 +1354,3 @@ func TestTemplateRenderedGitHubConfigUrlEndsWIthSlash(t *testing.T) {
assert.Equal(t, "test-runners", ars.Name)
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
}
func TestTemplate_CreateManagerRole(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"})
var managerRole rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &managerRole)
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
assert.Equal(t, 5, len(managerRole.Rules))
}
func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubServerTLS.certificateFrom.configMapKeyRef.name": "test",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"})
var managerRole rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &managerRole)
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
assert.Equal(t, 6, len(managerRole.Rules))
assert.Equal(t, "configmaps", managerRole.Rules[5].Resources[0])
}
func TestTemplate_CreateManagerRoleBinding(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role_binding.yaml"})
var managerRoleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &managerRoleBinding)
assert.Equal(t, namespaceName, managerRoleBinding.Namespace, "namespace should match the namespace of the Helm release")
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role-binding", managerRoleBinding.Name)
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRoleBinding.RoleRef.Name)
assert.Equal(t, "arc", managerRoleBinding.Subjects[0].Name)
assert.Equal(t, "arc-system", managerRoleBinding.Subjects[0].Namespace)
}
func TestTemplateRenderedAutoScalingRunnerSet_ExtraContainers(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
testValuesPath, err := filepath.Abs("../tests/values_extra_containers.yaml")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}, "--debug")
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "There should be 2 containers")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
assert.Equal(t, "other", ars.Spec.Template.Spec.Containers[1].Name, "Container name should be other")
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set")
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set")
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[1].Resources.Limits.Cpu().String(), "CPU Limit should be set")
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[1].Resources.Limits.Memory().String(), "Memory Limit should be set")
assert.Equal(t, "SOME_ENV", ars.Spec.Template.Spec.Containers[0].Env[0].Name, "SOME_ENV should be set")
assert.Equal(t, "SOME_VALUE", ars.Spec.Template.Spec.Containers[0].Env[0].Value, "SOME_ENV should be set to `SOME_VALUE`")
assert.Equal(t, "MY_NODE_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name, "MY_NODE_NAME should be set")
assert.Equal(t, "spec.nodeName", ars.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath, "MY_NODE_NAME should be set to `spec.nodeName`")
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name, "VolumeMount name should be work")
assert.Equal(t, "/work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath, "VolumeMount mountPath should be /work")
assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others")
assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others")
assert.Equal(t, "work", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be work")
assert.Equal(t, corev1.DNSNone, ars.Spec.Template.Spec.DNSPolicy, "DNS Policy should be None")
assert.Equal(t, "192.0.2.1", ars.Spec.Template.Spec.DNSConfig.Nameservers[0], "DNS Nameserver should be set")
}
func TestTemplateRenderedAutoScalingRunnerSet_ExtraPodSpec(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
testValuesPath, err := filepath.Abs("../tests/values_extra_pod_spec.yaml")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "There should be 1 containers")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
assert.Equal(t, corev1.DNSNone, ars.Spec.Template.Spec.DNSPolicy, "DNS Policy should be None")
assert.Equal(t, "192.0.2.1", ars.Spec.Template.Spec.DNSConfig.Nameservers[0], "DNS Nameserver should be set")
}
func TestTemplateRenderedAutoScalingRunnerSet_DinDMergePodSpec(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
testValuesPath, err := filepath.Abs("../tests/values_dind_merge_spec.yaml")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}, "--debug")
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "There should be 2 containers")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set")
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set")
assert.Equal(t, "DOCKER_HOST", ars.Spec.Template.Spec.Containers[0].Env[0].Name, "DOCKER_HOST should be set")
assert.Equal(t, "tcp://localhost:9999", ars.Spec.Template.Spec.Containers[0].Env[0].Value, "DOCKER_HOST should be set to `tcp://localhost:9999`")
assert.Equal(t, "MY_NODE_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name, "MY_NODE_NAME should be set")
assert.Equal(t, "spec.nodeName", ars.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath, "MY_NODE_NAME should be set to `spec.nodeName`")
assert.Equal(t, "DOCKER_TLS_VERIFY", ars.Spec.Template.Spec.Containers[0].Env[2].Name, "DOCKER_TLS_VERIFY should be set")
assert.Equal(t, "1", ars.Spec.Template.Spec.Containers[0].Env[2].Value, "DOCKER_TLS_VERIFY should be set to `1`")
assert.Equal(t, "DOCKER_CERT_PATH", ars.Spec.Template.Spec.Containers[0].Env[3].Name, "DOCKER_CERT_PATH should be set")
assert.Equal(t, "/certs/client", ars.Spec.Template.Spec.Containers[0].Env[3].Value, "DOCKER_CERT_PATH should be set to `/certs/client`")
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name, "VolumeMount name should be work")
assert.Equal(t, "/work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath, "VolumeMount mountPath should be /work")
assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others")
assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others")
}
func TestTemplateRenderedAutoScalingRunnerSet_KubeModeMergePodSpec(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
testValuesPath, err := filepath.Abs("../tests/values_k8s_merge_spec.yaml")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}, "--debug")
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "There should be 1 containers")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set")
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set")
assert.Equal(t, "ACTIONS_RUNNER_CONTAINER_HOOKS", ars.Spec.Template.Spec.Containers[0].Env[0].Name, "ACTIONS_RUNNER_CONTAINER_HOOKS should be set")
assert.Equal(t, "/k8s/index.js", ars.Spec.Template.Spec.Containers[0].Env[0].Value, "ACTIONS_RUNNER_CONTAINER_HOOKS should be set to `/k8s/index.js`")
assert.Equal(t, "MY_NODE_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name, "MY_NODE_NAME should be set")
assert.Equal(t, "spec.nodeName", ars.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath, "MY_NODE_NAME should be set to `spec.nodeName`")
assert.Equal(t, "ACTIONS_RUNNER_POD_NAME", ars.Spec.Template.Spec.Containers[0].Env[2].Name, "ACTIONS_RUNNER_POD_NAME should be set")
assert.Equal(t, "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER", ars.Spec.Template.Spec.Containers[0].Env[3].Name, "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER should be set")
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name, "VolumeMount name should be work")
assert.Equal(t, "/work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath, "VolumeMount mountPath should be /work")
assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others")
assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others")
}

View File

@@ -2,7 +2,4 @@ githubConfigUrl: https://github.com/actions/actions-runner-controller
githubConfigSecret:
github_token: test
maxRunners: 10
minRunners: 5
controllerServiceAccount:
name: "arc"
namespace: "arc-system"
minRunners: 5

View File

@@ -1,31 +0,0 @@
githubConfigUrl: https://github.com/actions/actions-runner-controller
githubConfigSecret:
github_token: test
template:
spec:
containers:
- name: runner
image: runner-image:latest
env:
- name: DOCKER_HOST
value: tcp://localhost:9999
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: work
mountPath: /work
- name: others
mountPath: /others
resources:
limits:
memory: "64Mi"
cpu: "250m"
volumes:
- name: work
hostPath:
path: /data
type: Directory
containerMode:
type: dind

View File

@@ -1,46 +0,0 @@
githubConfigUrl: https://github.com/actions/actions-runner-controller
githubConfigSecret:
github_token: test
template:
spec:
containers:
- name: runner
image: runner-image:latest
env:
- name: SOME_ENV
value: SOME_VALUE
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: work
mountPath: /work
- name: others
mountPath: /others
resources:
limits:
memory: "64Mi"
cpu: "250m"
- name: other
image: other-image:latest
volumeMounts:
- name: work
mountPath: /work
- name: others
mountPath: /others
resources:
limits:
memory: "64Mi"
cpu: "250m"
volumes:
- name: work
hostPath:
path: /data
type: Directory
dnsPolicy: "None"
dnsConfig:
nameservers:
- 192.0.2.1
containerMode:
type: none

View File

@@ -1,12 +0,0 @@
githubConfigUrl: https://github.com/actions/actions-runner-controller
githubConfigSecret:
github_token: test
template:
spec:
containers:
- name: runner
image: runner-image:latest
dnsPolicy: "None"
dnsConfig:
nameservers:
- 192.0.2.1

View File

@@ -1,31 +0,0 @@
githubConfigUrl: https://github.com/actions/actions-runner-controller
githubConfigSecret:
github_token: test
template:
spec:
containers:
- name: runner
image: runner-image:latest
env:
- name: ACTIONS_RUNNER_CONTAINER_HOOKS
value: /k8s/index.js
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: work
mountPath: /work
- name: others
mountPath: /others
resources:
limits:
memory: "64Mi"
cpu: "250m"
volumes:
- name: work
hostPath:
path: /data
type: Directory
containerMode:
type: kubernetes

View File

@@ -68,29 +68,25 @@ githubConfigSecret:
# key: ca.pem
# runnerMountPath: /usr/local/share/ca-certificates/
containerMode:
type: "" ## type can be set to dind or kubernetes
## the following is required when containerMode.type=kubernetes
# kubernetesModeWorkVolumeClaim:
# accessModes: ["ReadWriteOnce"]
# # For local testing, use https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md to provide dynamic provision volume with storageClassName: openebs-hostpath
# storageClassName: "dynamic-blob-storage"
# resources:
# requests:
# storage: 1Gi
## template is the PodSpec for each runner Pod
template:
## template.spec will be modified if you change the container mode
spec:
containers:
- name: runner
image: ghcr.io/actions/actions-runner:latest
command: ["/home/runner/run.sh"]
containerMode:
type: "" ## type can be set to dind or kubernetes
## with containerMode.type=dind, we will populate the template.spec with following pod spec
## template:
## spec:
## initContainers:
## - name: init-dind-externals
## - name: initExternalsInternalVolume
## image: ghcr.io/actions/actions-runner:latest
## command: ["cp", "-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"]
## volumeMounts:
## - name: dind-externals
## - name: externalsInternal
## mountPath: /home/runner/tmpDir
## containers:
## - name: runner
@@ -103,9 +99,9 @@ template:
## - name: DOCKER_CERT_PATH
## value: /certs/client
## volumeMounts:
## - name: work
## - name: workingDirectoryInternal
## mountPath: /home/runner/_work
## - name: dind-cert
## - name: dinDInternal
## mountPath: /certs/client
## readOnly: true
## - name: dind
@@ -113,18 +109,18 @@ template:
## securityContext:
## privileged: true
## volumeMounts:
## - name: work
## mountPath: /home/runner/_work
## - name: dind-cert
## mountPath: /certs/client
## - name: dind-externals
## mountPath: /home/runner/externals
## - mountPath: /certs/client
## name: dinDInternal
## - mountPath: /home/runner/_work
## name: workingDirectoryInternal
## - mountPath: /home/runner/externals
## name: externalsInternal
## volumes:
## - name: work
## - name: dinDInternal
## emptyDir: {}
## - name: dind-cert
## - name: workingDirectoryInternal
## emptyDir: {}
## - name: dind-externals
## - name: externalsInternal
## emptyDir: {}
######################################################################################################
## with containerMode.type=kubernetes, we will populate the template.spec with following pod spec
@@ -155,18 +151,13 @@ template:
## resources:
## requests:
## storage: 1Gi
spec:
containers:
- name: runner
image: ghcr.io/actions/actions-runner:latest
command: ["/home/runner/run.sh"]
## Optional controller service account that needs to have required Role and RoleBinding
## to operate this gha-runner-scale-set installation.
## The helm chart will try to find the controller deployment and its service account at installation time.
## In case the helm chart can't find the right service account, you can explicitly pass in the following value
## to help it finish RoleBinding with the right service account.
## Note: if your controller is installed to only watch a single namespace, you have to pass these values explicitly.
# controllerServiceAccount:
# namespace: arc-system
# name: test-arc-gha-runner-scale-set-controller
## the following is required when containerMode.type=kubernetes
kubernetesModeWorkVolumeClaim:
accessModes: ["ReadWriteOnce"]
# For testing, use https://github.com/rancher/local-path-provisioner to provide dynamic provision volume
# TODO: remove before release
storageClassName: "dynamic-blob-storage"
resources:
requests:
storage: 1Gi

View File

@@ -144,7 +144,7 @@ func TestCustomerServerRootCA(t *testing.T) {
client, err := newActionsClientFromConfig(config, creds)
require.NoError(t, err)
_, err = client.GetRunnerScaleSet(ctx, 1, "test")
_, err = client.GetRunnerScaleSet(ctx, "test")
require.NoError(t, err)
assert.True(t, serverCalledSuccessfully)
}

View File

@@ -86,7 +86,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
}
if !done {
log.Info("Waiting for resources to be deleted before removing finalizer")
return ctrl.Result{Requeue: true}, nil
return ctrl.Result{}, nil
}
log.Info("Removing finalizer")
@@ -204,7 +204,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
return r.createRoleBindingForListener(ctx, autoscalingListener, listenerRole, serviceAccount, log)
}
// Create a secret containing proxy config if specified
// Create a secret containing proxy config if specifiec
if autoscalingListener.Spec.Proxy != nil {
proxySecret := new(corev1.Secret)
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: proxyListenerSecretName(autoscalingListener)}, proxySecret); err != nil {
@@ -299,6 +299,7 @@ func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error
For(&v1alpha1.AutoscalingListener{}).
Owns(&corev1.Pod{}).
Owns(&corev1.ServiceAccount{}).
Owns(&corev1.Secret{}).
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
@@ -502,7 +503,7 @@ func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Con
}
logger.Info("Created listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
return ctrl.Result{Requeue: true}, nil
return ctrl.Result{}, nil
}
func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
@@ -541,7 +542,7 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a
logger.Info("Created listener proxy secret", "namespace", newProxySecret.Namespace, "name", newProxySecret.Name)
return ctrl.Result{Requeue: true}, nil
return ctrl.Result{}, nil
}
func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
@@ -557,7 +558,7 @@ func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Con
}
logger.Info("Updated listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash)
return ctrl.Result{Requeue: true}, nil
return ctrl.Result{}, nil
}
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {

View File

@@ -316,29 +316,24 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set")
return ctrl.Result{}, err
}
runnerGroupId := 1
if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 {
runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup)
if err != nil {
logger.Error(err, "Failed to get runner group by name", "runnerGroup", autoscalingRunnerSet.Spec.RunnerGroup)
return ctrl.Result{}, err
}
runnerGroupId = int(runnerGroup.ID)
}
runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, runnerGroupId, autoscalingRunnerSet.Spec.RunnerScaleSetName)
runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, autoscalingRunnerSet.Spec.RunnerScaleSetName)
if err != nil {
logger.Error(err, "Failed to get runner scale set from Actions service",
"runnerGroupId",
strconv.Itoa(runnerGroupId),
"runnerScaleSetName",
autoscalingRunnerSet.Spec.RunnerScaleSetName)
logger.Error(err, "Failed to get runner scale set from Actions service")
return ctrl.Result{}, err
}
runnerGroupId := 1
if runnerScaleSet == nil {
if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 {
runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup)
if err != nil {
logger.Error(err, "Failed to get runner group by name", "runnerGroup", autoscalingRunnerSet.Spec.RunnerGroup)
return ctrl.Result{}, err
}
runnerGroupId = int(runnerGroup.ID)
}
runnerScaleSet, err = actionsClient.CreateRunnerScaleSet(
ctx,
&actions.RunnerScaleSet{

View File

@@ -107,7 +107,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
}
if !done {
log.Info("Waiting for ephemeral runner owned resources to be deleted")
return ctrl.Result{Requeue: true}, nil
return ctrl.Result{}, nil
}
done, err = r.cleanupContainerHooksResources(ctx, ephemeralRunner, log)
@@ -643,7 +643,7 @@ func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1
}
log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name)
return ctrl.Result{Requeue: true}, nil
return ctrl.Result{}, nil
}
// updateRunStatusFromPod is responsible for updating non-exiting statuses.
@@ -792,6 +792,7 @@ func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.EphemeralRunner{}).
Owns(&corev1.Pod{}).
Owns(&corev1.Secret{}).
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
Named("ephemeral-runner-controller").
Complete(r)

View File

@@ -1141,13 +1141,12 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status")
currentRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, currentRunnerSet)
updatedRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, updatedRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
updatedRunnerSet := currentRunnerSet.DeepCopy()
updatedRunnerSet.Spec.Replicas = 0
err = k8sClient.Patch(ctx, updatedRunnerSet, client.MergeFrom(currentRunnerSet))
err = k8sClient.Update(ctx, updatedRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
// wait for server to be called

View File

@@ -31,7 +31,7 @@ const (
//go:generate mockery --inpackage --name=ActionsService
type ActionsService interface {
GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runnerScaleSetName string) (*RunnerScaleSet, error)
GetRunnerScaleSet(ctx context.Context, runnerScaleSetName string) (*RunnerScaleSet, error)
GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int) (*RunnerScaleSet, error)
GetRunnerGroupByName(ctx context.Context, runnerGroup string) (*RunnerGroup, error)
CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error)
@@ -285,8 +285,8 @@ func (c *Client) NewActionsServiceRequest(ctx context.Context, method, path stri
return req, nil
}
func (c *Client) GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runnerScaleSetName string) (*RunnerScaleSet, error) {
path := fmt.Sprintf("/%s?runnerGroupId=%d&name=%s", scaleSetEndpoint, runnerGroupId, runnerScaleSetName)
func (c *Client) GetRunnerScaleSet(ctx context.Context, runnerScaleSetName string) (*RunnerScaleSet, error) {
path := fmt.Sprintf("/%s?name=%s", scaleSetEndpoint, runnerScaleSetName)
req, err := c.NewActionsServiceRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, err

View File

@@ -34,7 +34,7 @@ func TestGetRunnerScaleSet(t *testing.T) {
client, err := actions.NewClient(server.configURLForOrg("my-org"), auth)
require.NoError(t, err)
got, err := client.GetRunnerScaleSet(ctx, 1, scaleSetName)
got, err := client.GetRunnerScaleSet(ctx, scaleSetName)
require.NoError(t, err)
assert.Equal(t, want, got)
})
@@ -50,7 +50,7 @@ func TestGetRunnerScaleSet(t *testing.T) {
client, err := actions.NewClient(server.configURLForOrg("my-org"), auth)
require.NoError(t, err)
_, err = client.GetRunnerScaleSet(ctx, 1, scaleSetName)
_, err = client.GetRunnerScaleSet(ctx, scaleSetName)
require.NoError(t, err)
expectedPath := "/tenant/123/_apis/runtime/runnerscalesets"
@@ -67,7 +67,7 @@ func TestGetRunnerScaleSet(t *testing.T) {
client, err := actions.NewClient(server.configURLForOrg("my-org"), auth)
require.NoError(t, err)
_, err = client.GetRunnerScaleSet(ctx, 1, scaleSetName)
_, err = client.GetRunnerScaleSet(ctx, scaleSetName)
assert.NotNil(t, err)
})
@@ -80,7 +80,7 @@ func TestGetRunnerScaleSet(t *testing.T) {
client, err := actions.NewClient(server.configURLForOrg("my-org"), auth)
require.NoError(t, err)
_, err = client.GetRunnerScaleSet(ctx, 1, scaleSetName)
_, err = client.GetRunnerScaleSet(ctx, scaleSetName)
assert.NotNil(t, err)
})
@@ -102,7 +102,7 @@ func TestGetRunnerScaleSet(t *testing.T) {
)
require.NoError(t, err)
_, err = client.GetRunnerScaleSet(ctx, 1, scaleSetName)
_, err = client.GetRunnerScaleSet(ctx, scaleSetName)
assert.NotNil(t, err)
expectedRetry := retryMax + 1
assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry)
@@ -118,7 +118,7 @@ func TestGetRunnerScaleSet(t *testing.T) {
client, err := actions.NewClient(server.configURLForOrg("my-org"), auth)
require.NoError(t, err)
got, err := client.GetRunnerScaleSet(ctx, 1, scaleSetName)
got, err := client.GetRunnerScaleSet(ctx, scaleSetName)
require.NoError(t, err)
assert.Equal(t, want, got)
})
@@ -133,7 +133,7 @@ func TestGetRunnerScaleSet(t *testing.T) {
client, err := actions.NewClient(server.configURLForOrg("my-org"), auth)
require.NoError(t, err)
_, err = client.GetRunnerScaleSet(ctx, 1, scaleSetName)
_, err = client.GetRunnerScaleSet(ctx, scaleSetName)
require.NotNil(t, err)
assert.Equal(t, wantErr.Error(), err.Error())
})

View File

@@ -215,7 +215,7 @@ func (f *FakeClient) applyDefaults() {
f.getRunnerByNameResult.RunnerReference = defaultRunnerReference
}
func (f *FakeClient) GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runnerScaleSetName string) (*actions.RunnerScaleSet, error) {
func (f *FakeClient) GetRunnerScaleSet(ctx context.Context, runnerScaleSetName string) (*actions.RunnerScaleSet, error) {
return f.getRunnerScaleSetResult.RunnerScaleSet, f.getRunnerScaleSetResult.err
}

View File

@@ -263,13 +263,13 @@ func (_m *MockActionsService) GetRunnerGroupByName(ctx context.Context, runnerGr
return r0, r1
}
// GetRunnerScaleSet provides a mock function with given fields: ctx, runnerGroupId, runnerScaleSetName
func (_m *MockActionsService) GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runnerScaleSetName string) (*RunnerScaleSet, error) {
ret := _m.Called(ctx, runnerGroupId, runnerScaleSetName)
// GetRunnerScaleSet provides a mock function with given fields: ctx, runnerScaleSetName
func (_m *MockActionsService) GetRunnerScaleSet(ctx context.Context, runnerScaleSetName string) (*RunnerScaleSet, error) {
ret := _m.Called(ctx, runnerScaleSetName)
var r0 *RunnerScaleSet
if rf, ok := ret.Get(0).(func(context.Context, int, string) *RunnerScaleSet); ok {
r0 = rf(ctx, runnerGroupId, runnerScaleSetName)
if rf, ok := ret.Get(0).(func(context.Context, string) *RunnerScaleSet); ok {
r0 = rf(ctx, runnerScaleSetName)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*RunnerScaleSet)
@@ -277,8 +277,8 @@ func (_m *MockActionsService) GetRunnerScaleSet(ctx context.Context, runnerGroup
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, int, string) error); ok {
r1 = rf(ctx, runnerGroupId, runnerScaleSetName)
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(ctx, runnerScaleSetName)
} else {
r1 = ret.Error(1)
}

28
main.go
View File

@@ -32,13 +32,10 @@ import (
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging"
"github.com/kelseyhightower/envconfig"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
// +kubebuilder:scaffold:imports
)
@@ -91,7 +88,6 @@ func main() {
namespace string
logLevel string
logFormat string
watchSingleNamespace string
autoScalerImagePullSecrets stringSlice
@@ -128,7 +124,6 @@ func main() {
flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.")
flag.Var(&commonRunnerLabels, "common-runner-labels", "Runner labels in the K1=V1,K2=V2,... format that are inherited all the runners created by the controller. See https://github.com/actions/actions-runner-controller/issues/321 for more information")
flag.StringVar(&namespace, "watch-namespace", "", "The namespace to watch for custom resources. Set to empty for letting it watch for all namespaces.")
flag.StringVar(&watchSingleNamespace, "watch-single-namespace", "", "Restrict to watch for custom resources in a single namespace.")
flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
flag.StringVar(&logFormat, "log-format", "text", `The log format. Valid options are "text" and "json". Defaults to "text"`)
flag.BoolVar(&autoScalingRunnerSetOnly, "auto-scaling-runner-set-only", false, "Make controller only reconcile AutoRunnerScaleSet object.")
@@ -152,37 +147,19 @@ func main() {
ctrl.SetLogger(log)
managerNamespace := ""
var newCache cache.NewCacheFunc
if autoScalingRunnerSetOnly {
// We don't support metrics for AutoRunnerScaleSet for now
metricsAddr = "0"
managerNamespace = os.Getenv("CONTROLLER_MANAGER_POD_NAMESPACE")
if managerNamespace == "" {
log.Error(err, "unable to obtain manager pod namespace")
os.Exit(1)
}
if len(watchSingleNamespace) > 0 {
newCache = cache.MultiNamespacedCacheBuilder([]string{managerNamespace, watchSingleNamespace})
}
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
NewCache: newCache,
MetricsBindAddress: metricsAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: leaderElectionId,
Port: port,
SyncPeriod: &syncPeriod,
Namespace: namespace,
ClientDisableCacheFor: []client.Object{
&corev1.Secret{},
&corev1.ConfigMap{},
},
})
if err != nil {
log.Error(err, "unable to start manager")
@@ -195,6 +172,11 @@ func main() {
log.Error(err, "unable to obtain listener image")
os.Exit(1)
}
managerNamespace := os.Getenv("CONTROLLER_MANAGER_POD_NAMESPACE")
if managerNamespace == "" {
log.Error(err, "unable to obtain manager pod namespace")
os.Exit(1)
}
actionsMultiClient := actions.NewMultiClient(
"actions-runner-controller/"+build.Version,

View File

@@ -6,7 +6,7 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless
OS_IMAGE ?= ubuntu-22.04
TARGETPLATFORM ?= $(shell arch)
RUNNER_VERSION ?= 2.303.0
RUNNER_VERSION ?= 2.302.1
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.2.0
DOCKER_VERSION ?= 20.10.23

View File

@@ -1 +1 @@
2.303.0
2.302.1

View File

@@ -41,7 +41,7 @@ var (
testResultCMNamePrefix = "test-result-"
RunnerVersion = "2.303.0"
RunnerVersion = "2.302.1"
)
// If you're willing to run this test via VS Code "run test" or "debug test",

View File

@@ -11,7 +11,6 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
@@ -93,24 +92,17 @@ func TestARCJobs(t *testing.T) {
)
t.Run("Get available pods during job run", func(t *testing.T) {
c := http.Client{}
targetArcName := os.Getenv("ARC_NAME")
require.NotEmpty(t, targetArcName, "ARC_NAME environment variable is required for this test to run. (e.g. arc-e2e-test)")
targetWorkflow := os.Getenv("WORKFLOW_FILE")
require.NotEmpty(t, targetWorkflow, "WORKFLOW_FILE environment variable is required for this test to run. (e.g. e2e_test.yml)")
ght := os.Getenv("GITHUB_TOKEN")
require.NotEmpty(t, ght, "GITHUB_TOKEN environment variable is required for this test to run.")
dateTime := os.Getenv("DATE_TIME")
// We are triggering manually a workflow that already exists in the repo.
// This workflow is expected to spin up a number of runner pods matching the runners value set in podCountsByType.
url := "https://api.github.com/repos/actions-runner-controller/arc_e2e_test_dummy/actions/workflows/" + targetWorkflow + "/dispatches"
jsonStr := []byte(fmt.Sprintf(`{"ref":"main", "inputs":{"arc_name":"%s"}}`, targetArcName))
url := "https://api.github.com/repos/actions-runner-controller/arc_e2e_test_dummy/actions/workflows/e2e-test-dispatch-workflow.yaml/dispatches"
jsonStr := []byte(fmt.Sprintf(`{"ref":"main", "inputs":{"date_time":"%s"}}`, dateTime))
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
if err != nil {
t.Fatal(err)
}
ght := os.Getenv("GITHUB_TOKEN")
req.Header.Add("Accept", "application/vnd.github+json")
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", ght))
req.Header.Add("X-GitHub-Api-Version", "2022-11-28")