Compare commits

..

1 Commits

Author SHA1 Message Date
Bassem Dghaidi
3e37a29c21 Prevent releases on wrong tag 2023-03-14 12:42:22 +00:00
139 changed files with 1425 additions and 6053 deletions

View File

@@ -0,0 +1,45 @@
name: 'E2E ARC Test Action'
description: 'Includes common arc installation, setup and test file run'
inputs:
github-token:
description: 'JWT generated with Github App inputs'
required: true
config-url:
description: "URL of the repo, org or enterprise where the runner scale sets will be registered"
required: true
docker-image-repo:
description: "Local docker image repo for testing"
required: true
docker-image-tag:
description: "Tag of ARC Docker image for testing"
required: true
runs:
using: "composite"
steps:
- name: Install ARC
run: helm install arc --namespace "arc-systems" --create-namespace --set image.tag=${{ inputs.docker-image-tag }} --set image.repository=${{ inputs.docker-image-repo }} ./charts/gha-runner-scale-set-controller
shell: bash
- name: Get datetime
# We are using this value further in the runner installation to avoid runner name collision that are a risk with hard coded values.
# A datetime including the 3 nanoseconds are a good option for this and also adds to readability and runner sorting if needed.
run: echo "DATE_TIME=$(date +'%Y-%m-%d-%H-%M-%S-%3N')" >> $GITHUB_ENV
shell: bash
- name: Install runners
run: |
helm install "arc-runner-${{ env.DATE_TIME }}" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="${{ inputs.config-url }}" \
--set githubConfigSecret.github_token="${{ inputs.github-token }}" \
./charts/gha-runner-scale-set \
--debug
kubectl get pods -A
shell: bash
- name: Test ARC scales pods up and down
run: |
export GITHUB_TOKEN="${{ inputs.github-token }}"
export DATE_TIME="${{ env.DATE_TIME }}"
go test ./test_e2e_arc -v
shell: bash

View File

@@ -1,160 +0,0 @@
name: 'Execute and Assert ARC E2E Test Action'
description: 'Queue E2E test workflow and assert workflow run result to be succeed'
inputs:
auth-token:
description: 'GitHub access token to queue workflow run'
required: true
repo-owner:
description: "The repository owner name that has the test workflow file, ex: actions"
required: true
repo-name:
description: "The repository name that has the test workflow file, ex: test"
required: true
workflow-file:
description: 'The file name of the workflow yaml, ex: test.yml'
required: true
arc-name:
description: 'The name of the configured gha-runner-scale-set'
required: true
arc-namespace:
description: 'The namespace of the configured gha-runner-scale-set'
required: true
arc-controller-namespace:
description: 'The namespace of the configured gha-runner-scale-set-controller'
required: true
runs:
using: "composite"
steps:
- name: Queue test workflow
shell: bash
id: queue_workflow
run: |
queue_time=`date +%FT%TZ`
echo "queue_time=$queue_time" >> $GITHUB_OUTPUT
curl -X POST https://api.github.com/repos/${{inputs.repo-owner}}/${{inputs.repo-name}}/actions/workflows/${{inputs.workflow-file}}/dispatches \
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token ${{inputs.auth-token}}" \
-d '{"ref": "main", "inputs": { "arc_name": "${{inputs.arc-name}}" } }'
- name: Fetch workflow run & job ids
uses: actions/github-script@v6
id: query_workflow
with:
script: |
// Try to find the workflow run triggered by the previous step using the workflow_dispatch event.
// - Find recently create workflow runs in the test repository
// - For each workflow run, list its workflow job and see if the job's labels contain `inputs.arc-name`
// - Since the inputs.arc-name should be unique per e2e workflow run, once we find the job with the label, we find the workflow that we just triggered.
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms))
}
const owner = '${{inputs.repo-owner}}'
const repo = '${{inputs.repo-name}}'
const workflow_id = '${{inputs.workflow-file}}'
let workflow_run_id = 0
let workflow_job_id = 0
let workflow_run_html_url = ""
let count = 0
while (count++<12) {
await sleep(10 * 1000);
let listRunResponse = await github.rest.actions.listWorkflowRuns({
owner: owner,
repo: repo,
workflow_id: workflow_id,
created: '>${{steps.queue_workflow.outputs.queue_time}}'
})
if (listRunResponse.data.total_count > 0) {
console.log(`Found some new workflow runs for ${workflow_id}`)
for (let i = 0; i<listRunResponse.data.total_count; i++) {
let workflowRun = listRunResponse.data.workflow_runs[i]
console.log(`Check if workflow run ${workflowRun.id} is triggered by us.`)
let listJobResponse = await github.rest.actions.listJobsForWorkflowRun({
owner: owner,
repo: repo,
run_id: workflowRun.id
})
console.log(`Workflow run ${workflowRun.id} has ${listJobResponse.data.total_count} jobs.`)
if (listJobResponse.data.total_count > 0) {
for (let j = 0; j<listJobResponse.data.total_count; j++) {
let workflowJob = listJobResponse.data.jobs[j]
console.log(`Check if workflow job ${workflowJob.id} is triggered by us.`)
console.log(JSON.stringify(workflowJob.labels));
if (workflowJob.labels.includes('${{inputs.arc-name}}')) {
console.log(`Workflow job ${workflowJob.id} (Run id: ${workflowJob.run_id}) is triggered by us.`)
workflow_run_id = workflowJob.run_id
workflow_job_id = workflowJob.id
workflow_run_html_url = workflowRun.html_url
break
}
}
}
if (workflow_job_id > 0) {
break;
}
}
}
if (workflow_job_id > 0) {
break;
}
}
if (workflow_job_id == 0) {
core.setFailed(`Can't find workflow run and workflow job triggered to 'runs-on ${{inputs.arc-name}}'`)
} else {
core.setOutput('workflow_run', workflow_run_id);
core.setOutput('workflow_job', workflow_job_id);
core.setOutput('workflow_run_url', workflow_run_html_url);
}
- name: Generate summary about the triggered workflow run
shell: bash
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Triggered workflow run** |
|:--------------------------:|
| ${{steps.query_workflow.outputs.workflow_run_url}} |
EOF
- name: Wait for workflow to finish successfully
uses: actions/github-script@v6
with:
script: |
// Wait 5 minutes and make sure the workflow run we triggered completed with result 'success'
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms))
}
const owner = '${{inputs.repo-owner}}'
const repo = '${{inputs.repo-name}}'
const workflow_run_id = ${{steps.query_workflow.outputs.workflow_run}}
const workflow_job_id = ${{steps.query_workflow.outputs.workflow_job}}
let count = 0
while (count++<10) {
await sleep(30 * 1000);
let getRunResponse = await github.rest.actions.getWorkflowRun({
owner: owner,
repo: repo,
run_id: workflow_run_id
})
console.log(`${getRunResponse.data.html_url}: ${getRunResponse.data.status} (${getRunResponse.data.conclusion})`);
if (getRunResponse.data.status == 'completed') {
if ( getRunResponse.data.conclusion == 'success') {
console.log(`Workflow run finished properly.`)
return
} else {
core.setFailed(`The triggered workflow run finish with result ${getRunResponse.data.conclusion}`)
return
}
}
}
core.setFailed(`The triggered workflow run didn't finish properly using ${{inputs.arc-name}}`)
- name: Gather logs and cleanup
shell: bash
if: always()
run: |
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-name}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
kubectl logs deployment/arc-gha-runner-scale-set-controller -n ${{inputs.arc-controller-namespace}}

View File

@@ -1,63 +0,0 @@
name: 'Setup ARC E2E Test Action'
description: 'Build controller image, create kind cluster, load the image, and exchange ARC configure token.'
inputs:
app-id:
description: 'GitHub App Id for exchange access token'
required: true
app-pk:
description: "GitHub App private key for exchange access token"
required: true
image-name:
description: "Local docker image name for building"
required: true
image-tag:
description: "Tag of ARC Docker image for building"
required: true
target-org:
description: "The test organization for ARC e2e test"
required: true
outputs:
token:
description: 'Token to use for configure ARC'
value: ${{steps.config-token.outputs.token}}
runs:
using: "composite"
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
# failures pushing images to GHCR
version: v0.9.1
driver-opts: image=moby/buildkit:v0.10.6
- name: Build controller image
uses: docker/build-push-action@v3
with:
file: Dockerfile
platforms: linux/amd64
load: true
build-args: |
DOCKER_IMAGE_NAME=${{inputs.image-name}}
VERSION=${{inputs.image-tag}}
tags: |
${{inputs.image-name}}:${{inputs.image-tag}}
no-cache: true
- name: Create minikube cluster and load image
shell: bash
run: |
minikube start
minikube image load ${{inputs.image-name}}:${{inputs.image-tag}}
- name: Get configure token
id: config-token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
with:
application_id: ${{ inputs.app-id }}
application_private_key: ${{ inputs.app-pk }}
organization: ${{ inputs.target-org}}

View File

@@ -0,0 +1,16 @@
name: ARC Reusable Workflow
on:
workflow_dispatch:
inputs:
date_time:
description: 'Datetime for runner name uniqueness, format: %Y-%m-%d-%H-%M-%S-%3N, example: 2023-02-14-13-00-16-791'
required: true
jobs:
arc-runner-job:
strategy:
fail-fast: false
matrix:
job: [1, 2, 3]
runs-on: arc-runner-${{ inputs.date_time }}
steps:
- run: echo "Hello World!" >> $GITHUB_STEP_SUMMARY

View File

@@ -5,701 +5,47 @@ on:
branches: branches:
- master - master
pull_request: pull_request:
branches:
- master
workflow_dispatch: workflow_dispatch:
permissions:
contents: read
env: env:
TARGET_ORG: actions-runner-controller TARGET_ORG: actions-runner-controller
TARGET_REPO: arc_e2e_test_dummy CLUSTER_NAME: e2e-test
IMAGE_NAME: "arc-test-image" RUNNER_VERSION: 2.302.1
IMAGE_VERSION: "0.4.0" IMAGE_REPO: "test/test-image"
jobs: jobs:
default-setup: setup-steps:
runs-on: ubuntu-latest runs-on: [ubuntu-latest]
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: - name: Add env variables
ref: ${{github.head_ref}}
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
run: | run: |
helm install arc \ TAG=$(echo "0.0.$GITHUB_SHA")
--namespace "arc-systems" \ echo "TAG=$TAG" >> $GITHUB_ENV
--create-namespace \ echo "IMAGE=$IMAGE_REPO:$TAG" >> $GITHUB_ENV
--set image.repository=${{ env.IMAGE_NAME }} \ - name: Set up Docker Buildx
--set image.tag=${{ env.IMAGE_VERSION }} \ uses: docker/setup-buildx-action@v2
./charts/gha-runner-scale-set-controller \ with:
--debug version: latest
count=0 - name: Docker Build Test Image
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
- name: Install gha-runner-scale-set
id: install_arc
run: | run: |
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1)) DOCKER_CLI_EXPERIMENTAL=enabled DOCKER_BUILDKIT=1 docker buildx build --build-arg RUNNER_VERSION=$RUNNER_VERSION --build-arg TAG=$TAG -t $IMAGE . --load
helm install "$ARC_NAME" \ - name: Create Kind cluster
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
with:
auth-token: ${{ steps.setup.outputs.token }}
repo-owner: ${{ env.TARGET_ORG }}
repo-name: ${{env.TARGET_REPO}}
workflow-file: ${{env.WORKFLOW_FILE}}
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
single-namespace-setup:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
run: | run: |
kubectl create namespace arc-runners PATH=$(go env GOPATH)/bin:$PATH
helm install arc \ kind create cluster --name $CLUSTER_NAME
--namespace "arc-systems" \ - name: Load Image to Kind Cluster
--create-namespace \ run: kind load docker-image $IMAGE --name $CLUSTER_NAME
--set image.repository=${{ env.IMAGE_NAME }} \ - name: Get Token
--set image.tag=${{ env.IMAGE_VERSION }} \ id: get_workflow_token
--set flags.watchSingleNamespace=arc-runners \ uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
./charts/gha-runner-scale-set-controller \
--debug
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
- name: Install gha-runner-scale-set
id: install_arc
run: |
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
with: with:
auth-token: ${{ steps.setup.outputs.token }} application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
repo-owner: ${{ env.TARGET_ORG }} application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
repo-name: ${{env.TARGET_REPO}} organization: ${{ env.TARGET_ORG }}
workflow-file: ${{env.WORKFLOW_FILE}} - uses: ./.github/actions/e2e-arc-test
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
dind-mode-setup:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: arc-test-dind-workflow.yaml
steps:
- uses: actions/checkout@v3
with: with:
ref: ${{github.head_ref}} github-token: ${{ steps.get_workflow_token.outputs.token }}
config-url: "https://github.com/actions-runner-controller/arc_e2e_test_dummy"
- uses: ./.github/actions/setup-arc-e2e docker-image-repo: $IMAGE_REPO
id: setup docker-image-tag: $TAG
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
run: |
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository=${{ env.IMAGE_NAME }} \
--set image.tag=${{ env.IMAGE_VERSION }} \
./charts/gha-runner-scale-set-controller \
--debug
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
- name: Install gha-runner-scale-set
id: install_arc
run: |
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set containerMode.type="dind" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
with:
auth-token: ${{ steps.setup.outputs.token }}
repo-owner: ${{ env.TARGET_ORG }}
repo-name: ${{env.TARGET_REPO}}
workflow-file: ${{env.WORKFLOW_FILE}}
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
kubernetes-mode-setup:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
steps:
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
run: |
echo "Install openebs/dynamic-localpv-provisioner"
helm repo add openebs https://openebs.github.io/charts
helm repo update
helm install openebs openebs/openebs -n openebs --create-namespace
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository=${{ env.IMAGE_NAME }} \
--set image.tag=${{ env.IMAGE_VERSION }} \
./charts/gha-runner-scale-set-controller \
--debug
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
kubectl wait --timeout=30s --for=condition=ready pod -n openebs -l name=openebs-localpv-provisioner
- name: Install gha-runner-scale-set
id: install_arc
run: |
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set containerMode.type="kubernetes" \
--set containerMode.kubernetesModeWorkVolumeClaim.accessModes={"ReadWriteOnce"} \
--set containerMode.kubernetesModeWorkVolumeClaim.storageClassName="openebs-hostpath" \
--set containerMode.kubernetesModeWorkVolumeClaim.resources.requests.storage="1Gi" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
with:
auth-token: ${{ steps.setup.outputs.token }}
repo-owner: ${{ env.TARGET_ORG }}
repo-name: ${{env.TARGET_REPO}}
workflow-file: ${{env.WORKFLOW_FILE}}
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
auth-proxy-setup:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
run: |
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository=${{ env.IMAGE_NAME }} \
--set image.tag=${{ env.IMAGE_VERSION }} \
./charts/gha-runner-scale-set-controller \
--debug
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
- name: Install gha-runner-scale-set
id: install_arc
run: |
docker run -d \
--name squid \
--publish 3128:3128 \
huangtingluo/squid-proxy:latest
kubectl create namespace arc-runners
kubectl create secret generic proxy-auth \
--namespace=arc-runners \
--from-literal=username=github \
--from-literal=password='actions'
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set proxy.https.url="http://host.minikube.internal:3128" \
--set proxy.https.credentialSecretRef="proxy-auth" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
with:
auth-token: ${{ steps.setup.outputs.token }}
repo-owner: ${{ env.TARGET_ORG }}
repo-name: ${{env.TARGET_REPO}}
workflow-file: ${{env.WORKFLOW_FILE}}
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
anonymous-proxy-setup:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
run: |
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository=${{ env.IMAGE_NAME }} \
--set image.tag=${{ env.IMAGE_VERSION }} \
./charts/gha-runner-scale-set-controller \
--debug
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
- name: Install gha-runner-scale-set
id: install_arc
run: |
docker run -d \
--name squid \
--publish 3128:3128 \
ubuntu/squid:latest
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set proxy.https.url="http://host.minikube.internal:3128" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
with:
auth-token: ${{ steps.setup.outputs.token }}
repo-owner: ${{ env.TARGET_ORG }}
repo-name: ${{env.TARGET_REPO}}
workflow-file: ${{env.WORKFLOW_FILE}}
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
self-signed-ca-setup:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
run: |
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository=${{ env.IMAGE_NAME }} \
--set image.tag=${{ env.IMAGE_VERSION }} \
./charts/gha-runner-scale-set-controller \
--debug
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
- name: Install gha-runner-scale-set
id: install_arc
run: |
docker run -d \
--rm \
--name mitmproxy \
--publish 8080:8080 \
-v ${{ github.workspace }}/mitmproxy:/home/mitmproxy/.mitmproxy \
mitmproxy/mitmproxy:latest \
mitmdump
count=0
while true; do
if [ -f "${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem" ]; then
echo "CA cert generated"
cat ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for mitmproxy generate its CA cert"
exit 1
fi
sleep 1
count=$((count+1))
done
sudo cp ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
sudo chown runner ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
kubectl create namespace arc-runners
kubectl -n arc-runners create configmap ca-cert --from-file="${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt"
kubectl -n arc-runners get configmap ca-cert -o yaml
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set proxy.https.url="http://host.minikube.internal:8080" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
--set "githubServerTLS.certificateFrom.configMapKeyRef.name=ca-cert" \
--set "githubServerTLS.certificateFrom.configMapKeyRef.key=mitmproxy-ca-cert.crt" \
--set "githubServerTLS.runnerMountPath=/usr/local/share/ca-certificates/" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
with:
auth-token: ${{ steps.setup.outputs.token }}
repo-owner: ${{ env.TARGET_ORG }}
repo-name: ${{env.TARGET_REPO}}
workflow-file: ${{env.WORKFLOW_FILE}}
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"

View File

@@ -1,80 +0,0 @@
name: Go
on:
push:
branches:
- master
paths:
- '.github/workflows/go.yaml'
- '**.go'
- 'go.mod'
- 'go.sum'
pull_request:
paths:
- '.github/workflows/go.yaml'
- '**.go'
- 'go.mod'
- 'go.sum'
permissions:
contents: read
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
cache: false
- name: fmt
run: go fmt ./...
- name: Check diff
run: git diff --exit-code
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
cache: false
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
with:
only-new-issues: true
version: v1.51.1
generate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
cache: false
- name: Generate
run: make generate
- name: Check diff
run: git diff --exit-code
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
- run: make manifests
- name: Check diff
run: git diff --exit-code
- name: Install kubebuilder
run: |
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz
tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz
sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder
- name: Run go tests
run: |
go test -short `go list ./... | grep -v ./test_e2e_arc`

23
.github/workflows/golangci-lint.yaml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: golangci-lint
on:
push:
branches:
- master
pull_request:
permissions:
contents: read
pull-requests: read
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v3
with:
go-version: 1.19
- uses: actions/checkout@v3
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
with:
only-new-issues: true
version: v1.49.0

View File

@@ -29,9 +29,6 @@ jobs:
release-controller: release-controller:
name: Release name: Release
runs-on: ubuntu-latest runs-on: ubuntu-latest
# gha-runner-scale-set has its own release workflow.
# We don't want to publish a new actions-runner-controller image
# we release gha-runner-scale-set.
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }} if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
steps: steps:
- name: Checkout - name: Checkout

View File

@@ -8,47 +8,35 @@ on:
- master - master
paths-ignore: paths-ignore:
- '**.md' - '**.md'
- '.github/actions/**'
- '.github/ISSUE_TEMPLATE/**' - '.github/ISSUE_TEMPLATE/**'
- '.github/workflows/e2e-test-dispatch-workflow.yaml'
- '.github/workflows/e2e-test-linux-vm.yaml'
- '.github/workflows/publish-arc.yaml'
- '.github/workflows/publish-chart.yaml'
- '.github/workflows/publish-runner-scale-set.yaml'
- '.github/workflows/release-runners.yaml'
- '.github/workflows/run-codeql.yaml'
- '.github/workflows/run-first-interaction.yaml'
- '.github/workflows/run-stale.yaml'
- '.github/workflows/update-runners.yaml'
- '.github/workflows/validate-arc.yaml'
- '.github/workflows/validate-chart.yaml' - '.github/workflows/validate-chart.yaml'
- '.github/workflows/validate-gha-chart.yaml' - '.github/workflows/publish-chart.yaml'
- '.github/workflows/validate-runners.yaml' - '.github/workflows/publish-arc.yaml'
- '.github/dependabot.yml' - '.github/workflows/runners.yaml'
- '.github/RELEASE_NOTE_TEMPLATE.md' - '.github/workflows/validate-entrypoint.yaml'
- '.github/renovate.*'
- 'runner/**' - 'runner/**'
- '.gitignore' - '.gitignore'
- 'PROJECT' - 'PROJECT'
- 'LICENSE' - 'LICENSE'
- 'Makefile' - 'Makefile'
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
permissions:
contents: read
packages: write
env: env:
# Safeguard to prevent pushing images to registeries after build # Safeguard to prevent pushing images to registeries after build
PUSH_TO_REGISTRIES: true PUSH_TO_REGISTRIES: true
TARGET_ORG: actions-runner-controller
TARGET_REPO: actions-runner-controller
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
permissions:
contents: read
jobs: jobs:
legacy-canary-build: canary-build:
name: Build and Publish Legacy Canary Image name: Build and Publish Canary Image
runs-on: ubuntu-latest runs-on: ubuntu-latest
env: env:
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
TARGET_ORG: actions-runner-controller
TARGET_REPO: actions-runner-controller
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
@@ -80,50 +68,3 @@ jobs:
echo "" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml)" >> $GITHUB_STEP_SUMMARY echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml)" >> $GITHUB_STEP_SUMMARY
canary-build:
name: Build and Publish gha-runner-scale-set-controller Canary Image
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# Normalization is needed because upper case characters are not allowed in the repository name
# and the short sha is needed for image tagging
- name: Resolve parameters
id: resolve_parameters
run: |
echo "INFO: Resolving short sha"
echo "short_sha=$(git rev-parse --short ${{ github.ref }})" >> $GITHUB_OUTPUT
echo "INFO: Normalizing repository name (lowercase)"
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
version: latest
# Unstable builds - run at your own risk
- name: Build and Push
uses: docker/build-push-action@v3
with:
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64
build-args: VERSION=canary-"${{ github.ref }}"
push: ${{ env.PUSH_TO_REGISTRIES }}
tags: |
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary-${{ steps.resolve_parameters.outputs.short_sha }}
cache-from: type=gha
cache-to: type=gha,mode=max

View File

@@ -5,28 +5,22 @@ name: Publish Helm Chart
on: on:
push: push:
branches: branches:
- master - master
paths: paths:
- 'charts/**' - 'charts/**'
- '.github/workflows/publish-chart.yaml' - '.github/workflows/publish-chart.yaml'
- '!charts/actions-runner-controller/docs/**' - '!charts/actions-runner-controller/docs/**'
- '!charts/gha-runner-scale-set-controller/**' - '!charts/gha-runner-scale-set-controller/**'
- '!charts/gha-runner-scale-set/**' - '!charts/gha-runner-scale-set/**'
- '!**.md' - '!**.md'
workflow_dispatch: workflow_dispatch:
inputs:
force:
description: 'Force publish even if the chart version is not bumped'
type: boolean
required: true
default: false
env: env:
KUBE_SCORE_VERSION: 1.10.0 KUBE_SCORE_VERSION: 1.10.0
HELM_VERSION: v3.8.0 HELM_VERSION: v3.8.0
permissions: permissions:
contents: write contents: read
jobs: jobs:
lint-chart: lint-chart:
@@ -35,86 +29,91 @@ jobs:
outputs: outputs:
publish-chart: ${{ steps.publish-chart-step.outputs.publish }} publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Helm - name: Set up Helm
uses: azure/setup-helm@v3.4 uses: azure/setup-helm@v3.4
with: with:
version: ${{ env.HELM_VERSION }} version: ${{ env.HELM_VERSION }}
- name: Set up kube-score - name: Set up kube-score
run: | run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score chmod 755 kube-score
- name: Kube-score generated manifests - name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
--ignore-test pod-networkpolicy
--ignore-test deployment-has-poddisruptionbudget
--ignore-test deployment-has-host-podantiaffinity
--ignore-test container-security-context
--ignore-test pod-probes
--ignore-test container-image-tag
--enable-optional-test container-security-context-privileged
--enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests) # python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: '3.11' python-version: '3.7'
- name: Set up chart-testing - name: Set up chart-testing
uses: helm/chart-testing-action@v2.3.1 uses: helm/chart-testing-action@v2.3.1
- name: Run chart-testing (list-changed) - name: Run chart-testing (list-changed)
id: list-changed id: list-changed
run: | run: |
changed=$(ct list-changed --config charts/.ci/ct-config.yaml) changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
if [[ -n "$changed" ]]; then if [[ -n "$changed" ]]; then
echo "::set-output name=changed::true" echo "::set-output name=changed::true"
fi fi
- name: Run chart-testing (lint) - name: Run chart-testing (lint)
run: | run: |
ct lint --config charts/.ci/ct-config.yaml ct lint --config charts/.ci/ct-config.yaml
- name: Create kind cluster - name: Create kind cluster
if: steps.list-changed.outputs.changed == 'true' if: steps.list-changed.outputs.changed == 'true'
uses: helm/kind-action@v1.4.0 uses: helm/kind-action@v1.4.0
# We need cert-manager already installed in the cluster because we assume the CRDs exist # We need cert-manager already installed in the cluster because we assume the CRDs exist
- name: Install cert-manager - name: Install cert-manager
if: steps.list-changed.outputs.changed == 'true' if: steps.list-changed.outputs.changed == 'true'
run: | run: |
helm repo add jetstack https://charts.jetstack.io --force-update helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
- name: Run chart-testing (install) - name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true' if: steps.list-changed.outputs.changed == 'true'
run: ct install --config charts/.ci/ct-config.yaml run: ct install --config charts/.ci/ct-config.yaml
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml # WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
- name: Check if Chart Publish is Needed - name: Check if Chart Publish is Needed
id: publish-chart-step id: publish-chart-step
run: | run: |
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml) CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2) NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4) RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1) LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION ]]; then
echo "publish=true" >> $GITHUB_OUTPUT
else
echo "publish=false" >> $GITHUB_OUTPUT
fi
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV - name: Job summary
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV run: |
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
# Always publish if force is true echo "" >> $GITHUB_STEP_SUMMARY
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "publish=true" >> $GITHUB_OUTPUT echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
else echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "publish=false" >> $GITHUB_OUTPUT echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
fi
- name: Job summary
run: |
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
publish-chart: publish-chart:
if: needs.lint-chart.outputs.publish-chart == 'true' if: needs.lint-chart.outputs.publish-chart == 'true'
@@ -122,86 +121,85 @@ jobs:
name: Publish Chart name: Publish Chart
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
contents: write # for helm/chart-releaser-action to push chart release and create a release contents: write # for helm/chart-releaser-action to push chart release and create a release
env: env:
CHART_TARGET_ORG: actions-runner-controller CHART_TARGET_ORG: actions-runner-controller
CHART_TARGET_REPO: actions-runner-controller.github.io CHART_TARGET_REPO: actions-runner-controller.github.io
CHART_TARGET_BRANCH: master CHART_TARGET_BRANCH: master
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Configure Git - name: Configure Git
run: | run: |
git config user.name "$GITHUB_ACTOR" git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com" git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Get Token - name: Get Token
id: get_workflow_token id: get_workflow_token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
with: with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }} application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
organization: ${{ env.CHART_TARGET_ORG }} organization: ${{ env.CHART_TARGET_ORG }}
- name: Install chart-releaser - name: Install chart-releaser
uses: helm/chart-releaser-action@v1.4.1 uses: helm/chart-releaser-action@v1.4.1
with: with:
install_only: true install_only: true
install_dir: ${{ github.workspace }}/bin install_dir: ${{ github.workspace }}/bin
- name: Package and upload release assets - name: Package and upload release assets
run: | run: |
cr package \ cr package \
${{ github.workspace }}/charts/actions-runner-controller/ \ ${{ github.workspace }}/charts/actions-runner-controller/ \
--package-path .cr-release-packages --package-path .cr-release-packages
cr upload \ cr upload \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \ --owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \ --git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--package-path .cr-release-packages \ --package-path .cr-release-packages \
--token ${{ secrets.GITHUB_TOKEN }} --token ${{ secrets.GITHUB_TOKEN }}
- name: Generate updated index.yaml - name: Generate updated index.yaml
run: | run: |
cr index \ cr index \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \ --owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \ --git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--index-path ${{ github.workspace }}/index.yaml \ --index-path ${{ github.workspace }}/index.yaml \
--push \ --pages-branch 'gh-pages' \
--pages-branch 'gh-pages' \ --pages-index-path 'index.yaml'
--pages-index-path 'index.yaml'
# Chart Release was never intended to publish to a different repo # Chart Release was never intended to publish to a different repo
# this workaround is intended to move the index.yaml to the target repo # this workaround is intended to move the index.yaml to the target repo
# where the github pages are hosted # where the github pages are hosted
- name: Checkout target repository - name: Checkout pages repository
uses: actions/checkout@v3 uses: actions/checkout@v3
with: with:
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }} repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
path: ${{ env.CHART_TARGET_REPO }} path: ${{ env.CHART_TARGET_REPO }}
ref: ${{ env.CHART_TARGET_BRANCH }} ref: ${{ env.CHART_TARGET_BRANCH }}
token: ${{ steps.get_workflow_token.outputs.token }} token: ${{ steps.get_workflow_token.outputs.token }}
- name: Copy index.yaml - name: Copy index.yaml
run: | run: |
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
- name: Commit and push
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
git add .
git commit -m "Update index.yaml"
git push
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
- name: Commit and push to target repository - name: Job summary
run: | run: |
git config user.name "$GITHUB_ACTOR" echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
git config user.email "$GITHUB_ACTOR@users.noreply.github.com" echo "" >> $GITHUB_STEP_SUMMARY
git add . echo "**Status:**" >> $GITHUB_STEP_SUMMARY
git commit -m "Update index.yaml" echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/main/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
git push
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
- name: Job summary
run: |
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY

View File

@@ -36,7 +36,7 @@ permissions:
packages: write packages: write
jobs: jobs:
build-push-image: build-push-image:
name: Build and push controller image name: Build and push controller image
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@@ -46,14 +46,7 @@ jobs:
# If inputs.ref is empty, it'll resolve to the default branch # If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }} ref: ${{ inputs.ref }}
- name: Check chart versions - name: Resolve parameters
# Binary version and chart versions need to match.
# In case of an upgrade, the controller will try to clean up
# resources with older versions that should have been cleaned up
# during the upgrade process
run: ./hack/check-gh-chart-versions.sh ${{ inputs.release_tag_name }}
- name: Resolve parameters
id: resolve_parameters id: resolve_parameters
run: | run: |
resolvedRef="${{ inputs.ref }}" resolvedRef="${{ inputs.ref }}"
@@ -74,7 +67,7 @@ jobs:
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v2
with: with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6 # Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent # BuildKit v0.11 which has a bug causing intermittent
# failures pushing images to GHCR # failures pushing images to GHCR
version: v0.9.1 version: v0.9.1
driver-opts: image=moby/buildkit:v0.10.6 driver-opts: image=moby/buildkit:v0.10.6
@@ -122,7 +115,7 @@ jobs:
# If inputs.ref is empty, it'll resolve to the default branch # If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }} ref: ${{ inputs.ref }}
- name: Resolve parameters - name: Resolve parameters
id: resolve_parameters id: resolve_parameters
run: | run: |
resolvedRef="${{ inputs.ref }}" resolvedRef="${{ inputs.ref }}"
@@ -133,7 +126,7 @@ jobs:
echo "INFO: Resolving short SHA for $resolvedRef" echo "INFO: Resolving short SHA for $resolvedRef"
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
echo "INFO: Normalizing repository name (lowercase)" echo "INFO: Normalizing repository name (lowercase)"
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm - name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5 # Using https://github.com/Azure/setup-helm/releases/tag/v3.5
@@ -170,7 +163,7 @@ jobs:
# If inputs.ref is empty, it'll resolve to the default branch # If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }} ref: ${{ inputs.ref }}
- name: Resolve parameters - name: Resolve parameters
id: resolve_parameters id: resolve_parameters
run: | run: |
resolvedRef="${{ inputs.ref }}" resolvedRef="${{ inputs.ref }}"
@@ -181,7 +174,7 @@ jobs:
echo "INFO: Resolving short SHA for $resolvedRef" echo "INFO: Resolving short SHA for $resolvedRef"
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
echo "INFO: Normalizing repository name (lowercase)" echo "INFO: Normalizing repository name (lowercase)"
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm - name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5 # Using https://github.com/Azure/setup-helm/releases/tag/v3.5

View File

@@ -1,4 +1,4 @@
name: Release Runner Images name: Runners
# Revert to https://github.com/actions-runner-controller/releases#releases # Revert to https://github.com/actions-runner-controller/releases#releases
# for details on why we use this approach # for details on why we use this approach
@@ -18,6 +18,7 @@ env:
TARGET_ORG: actions-runner-controller TARGET_ORG: actions-runner-controller
TARGET_WORKFLOW: release-runners.yaml TARGET_WORKFLOW: release-runners.yaml
DOCKER_VERSION: 20.10.23 DOCKER_VERSION: 20.10.23
RUNNER_CONTAINER_HOOKS_VERSION: 0.2.0
jobs: jobs:
build-runners: build-runners:
@@ -26,12 +27,10 @@ jobs:
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Get runner version - name: Get runner version
id: versions id: runner_version
run: | run: |
runner_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))" version=$(echo -n $(cat runner/VERSION))
container_hooks_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))" echo runner_version=$version >> $GITHUB_OUTPUT
echo runner_version=$runner_current_version >> $GITHUB_OUTPUT
echo container_hooks_version=$container_hooks_current_version >> $GITHUB_OUTPUT
- name: Get Token - name: Get Token
id: get_workflow_token id: get_workflow_token
@@ -43,8 +42,7 @@ jobs:
- name: Trigger Build And Push Runner Images To Registries - name: Trigger Build And Push Runner Images To Registries
env: env:
RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }} RUNNER_VERSION: ${{ steps.runner_version.outputs.runner_version }}
CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }}
run: | run: |
# Authenticate # Authenticate
gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }} gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
@@ -53,21 +51,20 @@ jobs:
gh workflow run ${{ env.TARGET_WORKFLOW }} -R ${{ env.TARGET_ORG }}/releases \ gh workflow run ${{ env.TARGET_WORKFLOW }} -R ${{ env.TARGET_ORG }}/releases \
-f runner_version=${{ env.RUNNER_VERSION }} \ -f runner_version=${{ env.RUNNER_VERSION }} \
-f docker_version=${{ env.DOCKER_VERSION }} \ -f docker_version=${{ env.DOCKER_VERSION }} \
-f runner_container_hooks_version=${{ env.CONTAINER_HOOKS_VERSION }} \ -f runner_container_hooks_version=${{ env.RUNNER_CONTAINER_HOOKS_VERSION }} \
-f sha='${{ github.sha }}' \ -f sha='${{ github.sha }}' \
-f push_to_registries=${{ env.PUSH_TO_REGISTRIES }} -f push_to_registries=${{ env.PUSH_TO_REGISTRIES }}
- name: Job summary - name: Job summary
env: env:
RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }} RUNNER_VERSION: ${{ steps.runner_version.outputs.runner_version }}
CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }}
run: | run: |
echo "The [release-runners.yaml](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/release-runners.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY echo "The [release-runners.yaml](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/release-runners.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
echo "- runner_version: ${{ env.RUNNER_VERSION }}" >> $GITHUB_STEP_SUMMARY echo "- runner_version: ${{ env.RUNNER_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- docker_version: ${{ env.DOCKER_VERSION }}" >> $GITHUB_STEP_SUMMARY echo "- docker_version: ${{ env.DOCKER_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- runner_container_hooks_version: ${{ env.CONTAINER_HOOKS_VERSION }}" >> $GITHUB_STEP_SUMMARY echo "- runner_container_hooks_version: ${{ env.RUNNER_CONTAINER_HOOKS_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
echo "- push_to_registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY echo "- push_to_registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY

View File

@@ -16,34 +16,21 @@ jobs:
env: env:
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
outputs: outputs:
runner_current_version: ${{ steps.runner_versions.outputs.runner_current_version }} current_version: ${{ steps.versions.outputs.current_version }}
runner_latest_version: ${{ steps.runner_versions.outputs.runner_latest_version }} latest_version: ${{ steps.versions.outputs.latest_version }}
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Get runner current and latest versions - name: Get current and latest versions
id: runner_versions id: versions
run: | run: |
CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))" CURRENT_VERSION=$(echo -n $(cat runner/VERSION))
echo "Current version: $CURRENT_VERSION" echo "Current version: $CURRENT_VERSION"
echo runner_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT echo current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner | grep -oP '(?<=v)[0-9.]+' | head -1) LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner | grep -oP '(?<=v)[0-9.]+' | head -1)
echo "Latest version: $LATEST_VERSION" echo "Latest version: $LATEST_VERSION"
echo runner_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT echo latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
- name: Get container-hooks current and latest versions
id: container_hooks_versions
run: |
CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))"
echo "Current version: $CURRENT_VERSION"
echo container_hooks_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner-container-hooks | grep -oP '(?<=v)[0-9.]+' | head -1)
echo "Latest version: $LATEST_VERSION"
echo container_hooks_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
# check_pr checks if a PR for the same update already exists. It only runs if # check_pr checks if a PR for the same update already exists. It only runs if
# runner latest version != our current version. If no existing PR is found, # runner latest version != our current version. If no existing PR is found,
@@ -51,7 +38,7 @@ jobs:
check_pr: check_pr:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: check_versions needs: check_versions
if: needs.check_versions.outputs.runner_current_version != needs.check_versions.outputs.runner_latest_version || needs.check_versions.outputs.container_hooks_current_version != needs.check_versions.outputs.container_hooks_latest_version if: needs.check_versions.outputs.current_version != needs.check_versions.outputs.latest_version
outputs: outputs:
pr_name: ${{ steps.pr_name.outputs.pr_name }} pr_name: ${{ steps.pr_name.outputs.pr_name }}
env: env:
@@ -59,36 +46,17 @@ jobs:
steps: steps:
- name: debug - name: debug
run: run:
echo "RUNNER_CURRENT_VERSION=${{ needs.check_versions.outputs.runner_current_version }}" echo ${{ needs.check_versions.outputs.current_version }}
echo "RUNNER_LATEST_VERSION=${{ needs.check_versions.outputs.runner_latest_version }}" echo ${{ needs.check_versions.outputs.latest_version }}
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: PR Name - name: PR Name
id: pr_name id: pr_name
env: env:
RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }} LATEST_VERSION: ${{ needs.check_versions.outputs.latest_version }}
RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }}
CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }}
CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }}
# Generate a PR name with the following title:
# Updates: runner to v2.304.0 and container-hooks to v0.3.1
run: | run: |
RUNNER_MESSAGE="runner to v${RUNNER_LATEST_VERSION}" PR_NAME="Update runner to version ${LATEST_VERSION}"
CONTAINER_HOOKS_MESSAGE="container-hooks to v${CONTAINER_HOOKS_LATEST_VERSION}"
PR_NAME="Updates:"
if [ "$RUNNER_CURRENT_VERSION" != "$RUNNER_LATEST_VERSION" ]
then
PR_NAME="$PR_NAME $RUNNER_MESSAGE"
fi
if [ "$CONTAINER_HOOKS_CURRENT_VERSION" != "$CONTAINER_HOOKS_LATEST_VERSION" ]
then
PR_NAME="$PR_NAME $CONTAINER_HOOKS_MESSAGE"
fi
result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1) result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1)
if [ -z "$result" ] if [ -z "$result" ]
then then
@@ -109,32 +77,23 @@ jobs:
permissions: permissions:
pull-requests: write pull-requests: write
contents: write contents: write
actions: write
env: env:
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }} CURRENT_VERSION: ${{ needs.check_versions.outputs.current_version }}
RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }} LATEST_VERSION: ${{ needs.check_versions.outputs.latest_version }}
CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }}
CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }}
PR_NAME: ${{ needs.check_pr.outputs.pr_name }} PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: New branch - name: New branch
run: git checkout -b update-runner-"$(date +%Y-%m-%d)" run: git checkout -b update-runner-$LATEST_VERSION
- name: Update files - name: Update files
run: | run: |
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/VERSION sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/Makefile sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" Makefile sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" test/e2e/e2e_test.go sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" .github/workflows/e2e-test-linux-vm.yaml
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/VERSION
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/Makefile
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" Makefile
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" test/e2e/e2e_test.go
- name: Commit changes - name: Commit changes
run: | run: |

60
.github/workflows/validate-arc.yaml vendored Normal file
View File

@@ -0,0 +1,60 @@
name: Validate ARC
on:
pull_request:
branches:
- master
paths-ignore:
- '**.md'
- '.github/ISSUE_TEMPLATE/**'
- '.github/workflows/publish-canary.yaml'
- '.github/workflows/validate-chart.yaml'
- '.github/workflows/publish-chart.yaml'
- '.github/workflows/runners.yaml'
- '.github/workflows/publish-arc.yaml'
- '.github/workflows/validate-entrypoint.yaml'
- '.github/renovate.*'
- 'runner/**'
- '.gitignore'
- 'PROJECT'
- 'LICENSE'
- 'Makefile'
permissions:
contents: read
jobs:
test-controller:
name: Test ARC
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set-up Go
uses: actions/setup-go@v3
with:
go-version: '1.19'
check-latest: false
- uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Install kubebuilder
run: |
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz
tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz
sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder
- name: Run tests
run: |
make test
- name: Verify manifests are up-to-date
run: |
make manifests
git diff --exit-code

View File

@@ -71,7 +71,7 @@ jobs:
git clone https://github.com/helm/chart-testing git clone https://github.com/helm/chart-testing
cd chart-testing cd chart-testing
unset CT_CONFIG_DIR unset CT_CONFIG_DIR
goreleaser build --clean --skip-validate goreleaser build --clean --skip-validate
./dist/chart-testing_linux_amd64_v1/ct version ./dist/chart-testing_linux_amd64_v1/ct version
echo 'Adding ct directory to PATH...' echo 'Adding ct directory to PATH...'
echo "$RUNNER_TEMP/chart-testing/dist/chart-testing_linux_amd64_v1" >> "$GITHUB_PATH" echo "$RUNNER_TEMP/chart-testing/dist/chart-testing_linux_amd64_v1" >> "$GITHUB_PATH"
@@ -107,7 +107,7 @@ jobs:
load: true load: true
build-args: | build-args: |
DOCKER_IMAGE_NAME=test-arc DOCKER_IMAGE_NAME=test-arc
VERSION=dev VERSION=dev
tags: | tags: |
test-arc:dev test-arc:dev
cache-from: type=gha cache-from: type=gha

2
.gitignore vendored
View File

@@ -35,5 +35,3 @@ bin
.DS_STORE .DS_STORE
/test-assets /test-assets
/.tools

View File

@@ -5,7 +5,7 @@ else
endif endif
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1) DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
VERSION ?= dev VERSION ?= dev
RUNNER_VERSION ?= 2.304.0 RUNNER_VERSION ?= 2.302.1
TARGETPLATFORM ?= $(shell arch) TARGETPLATFORM ?= $(shell arch)
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
RUNNER_TAG ?= ${VERSION} RUNNER_TAG ?= ${VERSION}
@@ -202,7 +202,7 @@ generate: controller-gen
# Run shellcheck on runner scripts # Run shellcheck on runner scripts
shellcheck: shellcheck-install shellcheck: shellcheck-install
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh hack/*.sh $(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh
docker-buildx: docker-buildx:
export DOCKER_CLI_EXPERIMENTAL=enabled ;\ export DOCKER_CLI_EXPERIMENTAL=enabled ;\

View File

@@ -6,14 +6,17 @@
## People ## People
`actions-runner-controller` is an open-source project currently developed and maintained in collaboration with the GitHub Actions team, external maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions). `actions-runner-controller` is an open-source project currently developed and maintained in collaboration with maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions), mostly in their spare time.
If you think the project is awesome and is adding value to your business, please consider directly sponsoring [community maintainers](https://github.com/sponsors/actions-runner-controller) and individual contributors via GitHub Sponsors. If you think the project is awesome and it's becoming a basis for your important business, consider [sponsoring us](https://github.com/sponsors/actions-runner-controller)!
In case you are already the employer of one of contributors, sponsoring via GitHub Sponsors might not be an option. Just support them in other means! In case you are already the employer of one of contributors, sponsoring via GitHub Sponsors might not be an option. Just support them in other means!
We don't currently have [any sponsors dedicated to this project yet](https://github.com/sponsors/actions-runner-controller).
See [the sponsorship dashboard](https://github.com/sponsors/actions-runner-controller) for the former and the current sponsors. However, [HelloFresh](https://www.hellofreshgroup.com/en/) has recently started sponsoring @mumoshu for this project along with his other works. A part of their sponsorship will enable @mumoshu to add an E2E test to keep ARC even more reliable on AWS. Thank you for your sponsorship!
[<img src="https://user-images.githubusercontent.com/22009/170898715-07f02941-35ec-418b-8cd4-251b422fa9ac.png" width="219" height="71" />](https://careers.hellofresh.com/)
## Status ## Status

View File

@@ -61,9 +61,6 @@ if [ "${tool}" == "helm" ]; then
flags+=( --set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET}) flags+=( --set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
flags+=( --set actionsMetricsServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET}) flags+=( --set actionsMetricsServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
fi fi
if [ "${WATCH_NAMESPACE}" != "" ]; then
flags+=( --set watchNamespace=${WATCH_NAMESPACE} --set singleNamespace=true)
fi
if [ "${CHART_VERSION}" != "" ]; then if [ "${CHART_VERSION}" != "" ]; then
flags+=( --version ${CHART_VERSION}) flags+=( --version ${CHART_VERSION})
fi fi
@@ -72,9 +69,6 @@ if [ "${tool}" == "helm" ]; then
flags+=( --set githubWebhookServer.logFormat=${LOG_FORMAT}) flags+=( --set githubWebhookServer.logFormat=${LOG_FORMAT})
flags+=( --set actionsMetricsServer.logFormat=${LOG_FORMAT}) flags+=( --set actionsMetricsServer.logFormat=${LOG_FORMAT})
fi fi
if [ "${ADMISSION_WEBHOOKS_TIMEOUT}" != "" ]; then
flags+=( --set admissionWebHooks.timeoutSeconds=${ADMISSION_WEBHOOKS_TIMEOUT})
fi
if [ -n "${CREATE_SECRETS_USING_HELM}" ]; then if [ -n "${CREATE_SECRETS_USING_HELM}" ]; then
if [ -z "${WEBHOOK_GITHUB_TOKEN}" ]; then if [ -z "${WEBHOOK_GITHUB_TOKEN}" ]; then
echo 'Failed deploying secret "actions-metrics-server" using helm. Set WEBHOOK_GITHUB_TOKEN to deploy.' 1>&2 echo 'Failed deploying secret "actions-metrics-server" using helm. Set WEBHOOK_GITHUB_TOKEN to deploy.' 1>&2
@@ -83,10 +77,6 @@ if [ "${tool}" == "helm" ]; then
flags+=( --set actionsMetricsServer.secret.create=true) flags+=( --set actionsMetricsServer.secret.create=true)
flags+=( --set actionsMetricsServer.secret.github_token=${WEBHOOK_GITHUB_TOKEN}) flags+=( --set actionsMetricsServer.secret.github_token=${WEBHOOK_GITHUB_TOKEN})
fi fi
if [ -n "${GITHUB_WEBHOOK_SERVER_ENV_NAME}" ] && [ -n "${GITHUB_WEBHOOK_SERVER_ENV_VALUE}" ]; then
flags+=( --set githubWebhookServer.env[0].name=${GITHUB_WEBHOOK_SERVER_ENV_NAME})
flags+=( --set githubWebhookServer.env[0].value=${GITHUB_WEBHOOK_SERVER_ENV_VALUE})
fi
set -vx set -vx
@@ -102,7 +92,6 @@ if [ "${tool}" == "helm" ]; then
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \ --set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
--set actionsMetricsServer.podAnnotations.test-id=${TEST_ID} \ --set actionsMetricsServer.podAnnotations.test-id=${TEST_ID} \
${flags[@]} --set image.imagePullPolicy=${IMAGE_PULL_POLICY} \ ${flags[@]} --set image.imagePullPolicy=${IMAGE_PULL_POLICY} \
--set image.dindSidecarRepositoryAndTag=${DIND_SIDECAR_REPOSITORY_AND_TAG} \
-f ${VALUES_FILE} -f ${VALUES_FILE}
set +v set +v
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes` # To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`

View File

@@ -6,10 +6,6 @@ OP=${OP:-apply}
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted} RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
# See https://github.com/actions/actions-runner-controller/issues/2123
kubectl delete secret generic docker-config || :
kubectl create secret generic docker-config --from-file .dockerconfigjson=<(jq -M 'del(.aliases)' $HOME/.docker/config.json) --type=kubernetes.io/dockerconfigjson || :
cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=${RUNNER_NAMESPACE} envsubst | kubectl apply -f - cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=${RUNNER_NAMESPACE} envsubst | kubectl apply -f -
if [ -n "${TEST_REPO}" ]; then if [ -n "${TEST_REPO}" ]; then

View File

@@ -95,24 +95,6 @@ spec:
# that part is created by dockerd. # that part is created by dockerd.
mountPath: /home/runner/.local mountPath: /home/runner/.local
readOnly: false readOnly: false
# See https://github.com/actions/actions-runner-controller/issues/2123
# Be sure to omit the "aliases" field from the config.json.
# Otherwise you may encounter nasty errors like:
# $ docker build
# docker: 'buildx' is not a docker command.
# See 'docker --help'
# due to the incompatibility between your host docker config.json and the runner environment.
# That is, your host dockcer config.json might contain this:
# "aliases": {
# "builder": "buildx"
# }
# And this results in the above error when the runner does not have buildx installed yet.
- name: docker-config
mountPath: /home/runner/.docker/config.json
subPath: config.json
readOnly: true
- name: docker-config-root
mountPath: /home/runner/.docker
volumes: volumes:
- name: rootless-dind-work-dir - name: rootless-dind-work-dir
ephemeral: ephemeral:
@@ -123,15 +105,6 @@ spec:
resources: resources:
requests: requests:
storage: 3Gi storage: 3Gi
- name: docker-config
# Refer to .dockerconfigjson/.docker/config.json
secret:
secretName: docker-config
items:
- key: .dockerconfigjson
path: config.json
- name: docker-config-root
emptyDir: {}
# #
# Non-standard working directory # Non-standard working directory

View File

@@ -1,5 +1,4 @@
# ADR 2022-10-17: Produce the runner image for the scaleset client # ADR 0001: Produce the runner image for the scaleset client
**Date**: 2022-10-17 **Date**: 2022-10-17
**Status**: Done **Status**: Done
@@ -8,7 +7,6 @@
We aim to provide an similar experience (as close as possible) between self-hosted and GitHub-hosted runners. To achieve this, we are making the following changes to align our self-hosted runner container image with the Ubuntu runners managed by GitHub. We aim to provide an similar experience (as close as possible) between self-hosted and GitHub-hosted runners. To achieve this, we are making the following changes to align our self-hosted runner container image with the Ubuntu runners managed by GitHub.
Here are the changes: Here are the changes:
- We created a USER `runner(1001)` and a GROUP `docker(123)` - We created a USER `runner(1001)` and a GROUP `docker(123)`
- `sudo` has been on the image and the `runner` will be a passwordless sudoer. - `sudo` has been on the image and the `runner` will be a passwordless sudoer.
- The runner binary was placed placed under `/home/runner/` and launched using `/home/runner/run.sh` - The runner binary was placed placed under `/home/runner/` and launched using `/home/runner/run.sh`
@@ -20,33 +18,31 @@ The latest Dockerfile can be found at: https://github.com/actions/runner/blob/ma
# Context # Context
users can bring their own runner images, the contract we require is: user can bring their own runner images, the contract we have are:
- It must have a runner binary under /actions-runner (/actions-runner/run.sh exists)
- The WORKDIR is set to /actions-runner
- If the user inside the container is root, the ENV RUNNER_ALLOW_RUNASROOT should be set to 1
- It must have a runner binary under `/actions-runner` i.e. `/actions-runner/run.sh` exists The existing ARC runner images will not work with the new ARC mode out-of-box for the following reason:
- The `WORKDIR` is set to `/actions-runner`
- If the user inside the container is root, the environment variable `RUNNER_ALLOW_RUNASROOT` should be set to `1`
The existing [ARC runner images](https://github.com/orgs/actions-runner-controller/packages?tab=packages&q=actions-runner) will not work with the new ARC mode out-of-box for the following reason: - The current runner image requires caller to pass runner configure info, ex: URL and Config Token
- The current runner image has the runner binary under /runner
- The current runner image requires the caller to pass runner configuration info, ex: URL and Config Token
- The current runner image has the runner binary under `/runner` which violates the contract described above
- The current runner image requires a special entrypoint script in order to work around some volume mount limitation for setting up DinD. - The current runner image requires a special entrypoint script in order to work around some volume mount limitation for setting up DinD.
Since we expose the raw runner PodSpec to our end users, they can modify the helm `values.yaml` to adjust the runner container to their needs. However, since we expose the raw runner Pod spec to our user, advanced user can modify the helm values.yaml to make everything lines up properly.
# Guiding Principles # Guiding Principles
- Build image is separated in two stages. - Build image is separated in two stages.
## The first stage (build) ## The first stage (build)
- Reuses the same base image, so it is faster to build. - Reuses the same base image, so it is faster to build.
- Installs utilities needed to download assets (`runner` and `runner-container-hooks`). - Installs utilities needed to download assets (runner and runner-container-hooks).
- Downloads the runner and stores it into `/actions-runner` directory. - Downloads the runner and stores it into `/actions-runner` directory.
- Downloads the runner-container-hooks and stores it into `/actions-runner/k8s` directory. - Downloads the runner-container-hooks and stores it into `/actions-runner/k8s` directory.
- You can use build arguments to control the runner version, the target platform and runner container hooks version. - You can use build arguments to control the runner version, the target platform and runner container hooks version.
Preview (the published runner image might vary): Preview:
```Dockerfile ```Dockerfile
FROM mcr.microsoft.com/dotnet/runtime-deps:6.0 as build FROM mcr.microsoft.com/dotnet/runtime-deps:6.0 as build
@@ -68,7 +64,6 @@ RUN curl -f -L -o runner-container-hooks.zip https://github.com/actions/runner-c
``` ```
## The main image: ## The main image:
- Copies assets from the build stage to `/actions-runner` - Copies assets from the build stage to `/actions-runner`
- Does not provide an entrypoint. The entrypoint should be set within the container definition. - Does not provide an entrypoint. The entrypoint should be set within the container definition.
@@ -82,7 +77,6 @@ COPY --from=build /actions-runner .
``` ```
## Example of pod spec with the init container copying assets ## Example of pod spec with the init container copying assets
```yaml ```yaml
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
@@ -90,20 +84,20 @@ metadata:
name: <name> name: <name>
spec: spec:
containers: containers:
- name: runner
image: <image>
command: ["/runner/run.sh"]
volumeMounts:
- name: runner - name: runner
image: <image> mountPath: /runner
command: ["/runner/run.sh"]
volumeMounts:
- name: runner
mountPath: /runner
initContainers: initContainers:
- name: setup - name: setup
image: <image> image: <image>
command: ["sh", "-c", "cp -r /actions-runner/* /runner/"] command: ["sh", "-c", "cp -r /actions-runner/* /runner/"]
volumeMounts: volumeMounts:
- name: runner
mountPath: /runner
volumes:
- name: runner - name: runner
emptyDir: {} mountPath: /runner
volumes:
- name: runner
emptyDir: {}
``` ```

View File

@@ -1,4 +1,4 @@
# ADR 2022-10-27: Lifetime of RunnerScaleSet on Service # ADR 0003: Lifetime of RunnerScaleSet on Service
**Date**: 2022-10-27 **Date**: 2022-10-27
@@ -12,9 +12,8 @@ The `RunnerScaleSet` object will represent a set of homogeneous self-hosted runn
A `RunnerScaleSet` client (ARC) needs to communicate with the Actions service via HTTP long-poll in a certain protocol to get a workflow job successfully landed on one of its homogeneous self-hosted runners. A `RunnerScaleSet` client (ARC) needs to communicate with the Actions service via HTTP long-poll in a certain protocol to get a workflow job successfully landed on one of its homogeneous self-hosted runners.
In this ADR, we discuss the following within the context of actions-runner-controller's new scaling mode: In this ADR, I want to discuss the following within the context of actions-runner-controller's new scaling mode:
- Who and how to create a RunnerScaleSet on the service?
- Who and how to create a RunnerScaleSet on the service?
- Who and how to delete a RunnerScaleSet on the service? - Who and how to delete a RunnerScaleSet on the service?
- What will happen to all the runners and jobs when the deletion happens? - What will happen to all the runners and jobs when the deletion happens?
@@ -31,19 +30,18 @@ In this ADR, we discuss the following within the context of actions-runner-contr
- When the user patch existing `AutoScalingRunnerSet`'s RunnerScaleSet related properly, ex: `runnerGroupName`, `runnerWorkDir`, the controller needs to make an HTTP PATCH call to the `_apis/runtime/runnerscalesets/2` endpoint in order to update the object on the service. - When the user patch existing `AutoScalingRunnerSet`'s RunnerScaleSet related properly, ex: `runnerGroupName`, `runnerWorkDir`, the controller needs to make an HTTP PATCH call to the `_apis/runtime/runnerscalesets/2` endpoint in order to update the object on the service.
- We will put the deployed `AutoScalingRunnerSet` resource in an error state when the user tries to patch the resource with a different `githubConfigUrl` - We will put the deployed `AutoScalingRunnerSet` resource in an error state when the user tries to patch the resource with a different `githubConfigUrl`
> Basically, you can't move a deployed `AutoScalingRunnerSet` across GitHub entity, repoA->repoB, repoA->OrgC, etc. > Basically, you can't move a deployed `AutoScalingRunnerSet` across GitHub entity, repoA->repoB, repoA->OrgC, etc.
> We evaluated blocking the change before instead of erroring at runtime and that we decided not to go down this route because it forces us to re-introduce admission webhooks (require cert-manager). > We evaluated blocking the change before instead of erroring at runtime and that we decided not to go down this route because it forces us to re-introduce admission webhooks (require cert-manager).
## RunnerScaleSet deletion ## RunnerScaleSet deletion
- `AutoScalingRunnerSet` custom resource controller will delete the `RunnerScaleSet` object in the Actions service on any `AutoScalingRunnerSet` resource deletion. - `AutoScalingRunnerSet` custom resource controller will delete the `RunnerScaleSet` object in the Actions service on any `AutoScalingRunnerSet` resource deletion.
> `AutoScalingRunnerSet` deletion will contain several steps: > `AutoScalingRunnerSet` deletion will contain several steps:
> > - Stop the listener app so no more new jobs coming and no more scaling up/down.
> - Stop the listener app so no more new jobs coming and no more scaling up/down. > - Request scale down to 0
> - Request scale down to 0 > - Force stop all runners
> - Force stop all runners > - Wait for the scale down to 0
> - Wait for the scale down to 0 > - Delete the `RunnerScaleSet` object from service via REST API
> - Delete the `RunnerScaleSet` object from service via REST API
- The deletion is via REST API on Actions service `DELETE _apis/runtime/runnerscalesets/1` - The deletion is via REST API on Actions service `DELETE _apis/runtime/runnerscalesets/1`
- The deletion needs to use the runner registration token (admin). - The deletion needs to use the runner registration token (admin).

View File

@@ -1,5 +1,4 @@
# ADR 2022-11-04: Technical detail about actions-runner-controller repository transfer # ADR 0004: Technical detail about actions-runner-controller repository transfer
**Date**: 2022-11-04 **Date**: 2022-11-04
**Status**: Done **Status**: Done
@@ -9,18 +8,17 @@
As part of ARC Private Beta: Repository Migration & Open Sourcing Process, we have decided to transfer the current [actions-runner-controller repository](https://github.com/actions-runner-controller/actions-runner-controller) into the [Actions org](https://github.com/actions). As part of ARC Private Beta: Repository Migration & Open Sourcing Process, we have decided to transfer the current [actions-runner-controller repository](https://github.com/actions-runner-controller/actions-runner-controller) into the [Actions org](https://github.com/actions).
**Goals:** **Goals:**
- A clear signal that GitHub will start taking over ARC and provide support. - A clear signal that GitHub will start taking over ARC and provide support.
- Since we are going to deprecate the existing auto-scale mode in ARC at some point, we want to have a clear separation between the legacy mode (not supported) and the new mode (supported). - Since we are going to deprecate the existing auto-scale mode in ARC at some point, we want to have a clear separation between the legacy mode (not supported) and the new mode (supported).
- Avoid disrupting users as much as we can, existing ARC users will not notice any difference after the repository transfer, they can keep upgrading to the newer version of ARC and keep using the legacy mode. - Avoid disrupting users as much as we can, existing ARC users will not notice any difference after the repository transfer, they can keep upgrading to the newer version of ARC and keep using the legacy mode.
**Challenges** **Challenges**
- The original creator's name (`summerwind`) is all over the place, including some critical parts of ARC: - The original creator's name (`summerwind`) is all over the place, including some critical parts of ARC:
- The k8s user resource API's full name is `actions.summerwind.dev/v1alpha1/RunnerDeployment`, renaming it to `actions.github.com` is a breaking change and will force the user to rebuild their entire k8s cluster. - The k8s user resource API's full name is `actions.summerwind.dev/v1alpha1/RunnerDeployment`, renaming it to `actions.github.com` is a breaking change and will force the user to rebuild their entire k8s cluster.
- All docker images around ARC (controller + default runner) is published to [dockerhub/summerwind](https://hub.docker.com/u/summerwind) - All docker images around ARC (controller + default runner) is published to [dockerhub/summerwind](https://hub.docker.com/u/summerwind)
- The helm chart for ARC is currently hosted on [GitHub pages](https://actions-runner-controller.github.io/actions-runner-controller) for https://github.com/actions-runner-controller/actions-runner-controller, moving the repository means we will break users who install ARC via the helm chart - The helm chart for ARC is currently hosted on [GitHub pages](https://actions-runner-controller.github.io/actions-runner-controller) for https://github.com/actions-runner-controller/actions-runner-controller, moving the repository means we will break users who install ARC via the helm chart
# Decisions # Decisions
## APIs group names for k8s custom resources, `actions.summerwind` or `actions.github` ## APIs group names for k8s custom resources, `actions.summerwind` or `actions.github`
@@ -29,9 +27,8 @@ As part of ARC Private Beta: Repository Migration & Open Sourcing Process, we ha
- For any new resource API we are going to add, those will be named properly under GitHub, ex: `actions.github.com/v1alpha1/AutoScalingRunnerSet` - For any new resource API we are going to add, those will be named properly under GitHub, ex: `actions.github.com/v1alpha1/AutoScalingRunnerSet`
Benefits: Benefits:
- A clear separation from existing ARC: - A clear separation from existing ARC:
- Easy for the support engineer to triage income tickets and figure out whether we need to support the use case from the user - Easy for the support engineer to triage income tickets and figure out whether we need to support the use case from the user
- We won't break existing users when they upgrade to a newer version of ARC after the repository transfer - We won't break existing users when they upgrade to a newer version of ARC after the repository transfer
Based on the spike done by `@nikola-jokic`, we have confidence that we can host multiple resources with different API names under the same repository, and the published ARC controller can handle both resources properly. Based on the spike done by `@nikola-jokic`, we have confidence that we can host multiple resources with different API names under the same repository, and the published ARC controller can handle both resources properly.

View File

@@ -1,8 +1,8 @@
# ADR 2022-12-05: Adding labels to our resources # ADR 0007: Adding labels to our resources
**Date**: 2022-12-05 **Date**: 2022-12-05
**Status**: Superceded [^1] **Status**: Done
## Context ## Context
@@ -20,15 +20,12 @@ Assuming standard logging that would allow us to get all ARC logs by running
```bash ```bash
kubectl logs -l 'app.kubernetes.io/part-of=actions-runner-controller' kubectl logs -l 'app.kubernetes.io/part-of=actions-runner-controller'
``` ```
which would be very useful for development to begin with. which would be very useful for development to begin with.
The proposal is to add these sets of labels to the pods ARC creates: The proposal is to add these sets of labels to the pods ARC creates:
#### controller-manager #### controller-manager
Labels to be set by the Helm chart: Labels to be set by the Helm chart:
```yaml ```yaml
metadata: metadata:
labels: labels:
@@ -38,9 +35,7 @@ metadata:
``` ```
#### Listener #### Listener
Labels to be set by controller at creation: Labels to be set by controller at creation:
```yaml ```yaml
metadata: metadata:
labels: labels:
@@ -48,7 +43,7 @@ metadata:
app.kubernetes.io/component: runner-scale-set-listener app.kubernetes.io/component: runner-scale-set-listener
app.kubernetes.io/version: "x.x.x" app.kubernetes.io/version: "x.x.x"
actions.github.com/scale-set-name: scale-set-name # this corresponds to metadata.name as set for AutoscalingRunnerSet actions.github.com/scale-set-name: scale-set-name # this corresponds to metadata.name as set for AutoscalingRunnerSet
# the following labels are to be extracted by the config URL # the following labels are to be extracted by the config URL
actions.github.com/enterprise: enterprise actions.github.com/enterprise: enterprise
actions.github.com/organization: organization actions.github.com/organization: organization
@@ -56,9 +51,7 @@ metadata:
``` ```
#### Runner #### Runner
Labels to be set by controller at creation: Labels to be set by controller at creation:
```yaml ```yaml
metadata: metadata:
labels: labels:
@@ -85,5 +78,3 @@ Or for example if they're having problems specifically with runners:
This way users don't have to understand ARC moving parts but we still have a This way users don't have to understand ARC moving parts but we still have a
way to target them specifically if we need to. way to target them specifically if we need to.
[^1]: Superseded by [ADR 2023-03-14](2023-03-14-adding-labels-k8s-resources.md)

View File

@@ -1,5 +1,4 @@
# ADR 2022-12-27: Pick the right runner to scale down # ADR 0008: Pick the right runner to scale down
**Date**: 2022-12-27 **Date**: 2022-12-27
**Status**: Done **Status**: Done
@@ -8,37 +7,35 @@
- A custom resource `EphemeralRunnerSet` manage a set of custom resource `EphemeralRunners` - A custom resource `EphemeralRunnerSet` manage a set of custom resource `EphemeralRunners`
- The `EphemeralRunnerSet` has `Replicas` in its `Spec`, and the responsibility of the `EphemeralRunnerSet_controller` is to reconcile a given `EphemeralRunnerSet` to have - The `EphemeralRunnerSet` has `Replicas` in its `Spec`, and the responsibility of the `EphemeralRunnerSet_controller` is to reconcile a given `EphemeralRunnerSet` to have
the same amount of `EphemeralRunners` as the `Spec.Replicas` defined. the same amount of `EphemeralRunners` as the `Spec.Replicas` defined.
- This means the `EphemeralRunnerSet_controller` will scale up the `EphemeralRunnerSet` by creating more `EphemeralRunner` in the case of the `Spec.Replicas` is higher than - This means the `EphemeralRunnerSet_controller` will scale up the `EphemeralRunnerSet` by creating more `EphemeralRunner` in the case of the `Spec.Replicas` is higher than
the current amount of `EphemeralRunners`. the current amount of `EphemeralRunners`.
- This also means the `EphemeralRunnerSet_controller` will scale down the `EphemeralRunnerSet` by finding some existing `EphemeralRunner` to delete in the case of - This also means the `EphemeralRunnerSet_controller` will scale down the `EphemeralRunnerSet` by finding some existing `EphemeralRunner` to delete in the case of
the `Spec.Replicas` is less than the current amount of `EphemeralRunners`. the `Spec.Replicas` is less than the current amount of `EphemeralRunners`.
This ADR is about how can we find the right existing `EphemeralRunner` to delete when we need to scale down. This ADR is about how can we find the right existing `EphemeralRunner` to delete when we need to scale down.
## Current approach
## Current approach
1. `EphemeralRunnerSet_controller` figure out how many `EphemeralRunner` it needs to delete, ex: need to scale down from 10 to 2 means we need to delete 8 `EphemeralRunner` 1. `EphemeralRunnerSet_controller` figure out how many `EphemeralRunner` it needs to delete, ex: need to scale down from 10 to 2 means we need to delete 8 `EphemeralRunner`
2. `EphemeralRunnerSet_controller` find all `EphemeralRunner` that is in the `Running` or `Pending` phase. 2. `EphemeralRunnerSet_controller` find all `EphemeralRunner` that is in the `Running` or `Pending` phase.
> `Pending` means the `EphemeralRunner` is still probably creating and a runner has not yet configured with the Actions service.
> `Pending` means the `EphemeralRunner` is still probably creating and a runner has not yet configured with the Actions service. > `Running` means the `EphemeralRunner` is created and a runner has probably configured with Actions service, the runner may sit there idle,
> `Running` means the `EphemeralRunner` is created and a runner has probably configured with Actions service, the runner may sit there idle, > or maybe actively running a workflow job. We don't have a clear answer for it from the ARC side. (Actions service knows it for sure)
> or maybe actively running a workflow job. We don't have a clear answer for it from the ARC side. (Actions service knows it for sure)
3. `EphemeralRunnerSet_controller` make an HTTP DELETE request to the Actions service for each `EphemeralRunner` from the previous step and ask the Actions service to delete the runner via `RunnerId`. 3. `EphemeralRunnerSet_controller` make an HTTP DELETE request to the Actions service for each `EphemeralRunner` from the previous step and ask the Actions service to delete the runner via `RunnerId`.
(The `RunnerId` is generated after the runner registered with the Actions service, and stored on the `EphemeralRunner.Status.RunnerId`) (The `RunnerId` is generated after the runner registered with the Actions service, and stored on the `EphemeralRunner.Status.RunnerId`)
> - The HTTP DELETE request looks like the following:
> `DELETE https://pipelines.actions.githubusercontent.com/WoxlUxJHrKEzIp4Nz3YmrmLlZBonrmj9xCJ1lrzcJ9ZsD1Tnw7/_apis/distributedtask/pools/0/agents/1024`
> The Actions service will return 2 types of responses:
> 1. 204 (No Content): The runner with Id 1024 has been successfully removed from the service or the runner with Id 1024 doesn't exist.
> 2. 400 (Bad Request) with JSON body that contains an error message like `JobStillRunningException`: The service can't remove this runner at this point since it has been
> assigned to a job request, the client won't be able to remove the runner until the runner finishes its current assigned job request.
> - The HTTP DELETE request looks like the following: 4. `EphemeralRunnerSet_controller` will ignore any deletion error from runners that are still running a job, and keep trying deletion until the amount of `204` equals the amount of
> `DELETE https://pipelines.actions.githubusercontent.com/WoxlUxJHrKEzIp4Nz3YmrmLlZBonrmj9xCJ1lrzcJ9ZsD1Tnw7/_apis/distributedtask/pools/0/agents/1024` `EphemeralRunner` needs to delete.
> The Actions service will return 2 types of responses:
>
> 1. 204 (No Content): The runner with Id 1024 has been successfully removed from the service or the runner with Id 1024 doesn't exist.
> 2. 400 (Bad Request) with JSON body that contains an error message like `JobStillRunningException`: The service can't remove this runner at this point since it has been
> assigned to a job request, the client won't be able to remove the runner until the runner finishes its current assigned job request.
4. `EphemeralRunnerSet_controller` will ignore any deletion error from runners that are still running a job, and keep trying deletion until the amount of `204` equals the amount of
`EphemeralRunner` needs to delete.
## The problem with the current approach ## The problem with the current approach
@@ -71,7 +68,6 @@ this would be a big `NO` from a security point of view since we may not trust th
The nature of the k8s controller-runtime means we might reconcile the resource base on stale cache data. The nature of the k8s controller-runtime means we might reconcile the resource base on stale cache data.
I think our goal for the solution should be: I think our goal for the solution should be:
- Reduce wasteful HTTP requests on a scale-down as much as we can. - Reduce wasteful HTTP requests on a scale-down as much as we can.
- We can accept that we might make 1 or 2 wasteful requests to Actions service, but we can't accept making 5/10+ of them. - We can accept that we might make 1 or 2 wasteful requests to Actions service, but we can't accept making 5/10+ of them.
- See if we can meet feature parity with what the RunnerJobHook support with compromise any security concerns. - See if we can meet feature parity with what the RunnerJobHook support with compromise any security concerns.
@@ -81,11 +77,9 @@ a simple thought is how about we somehow attach some info to the `EphemeralRunne
How about we send this info from the service to the auto-scaling-listener via the existing HTTP long-poll How about we send this info from the service to the auto-scaling-listener via the existing HTTP long-poll
and let the listener patch the `EphemeralRunner.Status` to indicate it's running a job? and let the listener patch the `EphemeralRunner.Status` to indicate it's running a job?
> The listener is normally in a separate namespace with elevated permission and it's something we can trust. > The listener is normally in a separate namespace with elevated permission and it's something we can trust.
Changes: Changes:
- Introduce a new message type `JobStarted` (in addition to the existing `JobAvailable/JobAssigned/JobCompleted`) on the service side, the message is sent when a runner of the `RunnerScaleSet` get assigned to a job, - Introduce a new message type `JobStarted` (in addition to the existing `JobAvailable/JobAssigned/JobCompleted`) on the service side, the message is sent when a runner of the `RunnerScaleSet` get assigned to a job,
`RequestId`, `RunnerId`, and `RunnerName` will be included in the message. `RequestId`, `RunnerId`, and `RunnerName` will be included in the message.
- Add `RequestId (int)` to `EphemeralRunner.Status`, this will indicate which job the runner is running. - Add `RequestId (int)` to `EphemeralRunner.Status`, this will indicate which job the runner is running.

View File

@@ -1,8 +1,6 @@
# ADR 2023-02-02: Automate updating runner version # Automate updating runner version
**Date**: 2023-02-02 **Status**: Proposed
**Status**: Done
## Context ## Context
@@ -18,7 +16,6 @@ version is updated (and this is currently done manually).
We can have another workflow running on a cadence (hourly seems sensible) and checking for new runner We can have another workflow running on a cadence (hourly seems sensible) and checking for new runner
releases, creating a PR updating `RUNNER_VERSION` in: releases, creating a PR updating `RUNNER_VERSION` in:
- `.github/workflows/release-runners.yaml` - `.github/workflows/release-runners.yaml`
- `Makefile` - `Makefile`
- `runner/Makefile` - `runner/Makefile`

View File

@@ -1,14 +1,13 @@
# ADR 2023-02-10: Limit Permissions for Service Accounts in Actions-Runner-Controller # ADR 0007: Limit Permissions for Service Accounts in Actions-Runner-Controller
**Date**: 2023-02-10 **Date**: 2023-02-10
**Status**: Superceded [^1] **Status**: Pending
## Context ## Context
- `actions-runner-controller` is a Kubernetes CRD (with controller) built using https://github.com/kubernetes-sigs/controller-runtime - `actions-runner-controller` is a Kubernetes CRD (with controller) built using https://github.com/kubernetes-sigs/controller-runtime
- [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) has a default cache based k8s API client.Reader to make query k8s API server more efficiency. - [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) has a default cache based k8s API client.Reader to make query k8s API server more efficiency.
- The cache-based API client requires cluster scope `list` and `watch` permission for any resource the controller may query. - The cache-based API client requires cluster scope `list` and `watch` permission for any resource the controller may query.
@@ -23,7 +22,6 @@ There are 3 service accounts involved for a working `AutoscalingRunnerSet` based
This should have the lowest privilege (not any `RoleBinding` nor `ClusterRoleBinding`) by default, in the case of `containerMode=kubernetes`, it will get certain write permission with `RoleBinding` to limit the permission to a single namespace. This should have the lowest privilege (not any `RoleBinding` nor `ClusterRoleBinding`) by default, in the case of `containerMode=kubernetes`, it will get certain write permission with `RoleBinding` to limit the permission to a single namespace.
> References: > References:
>
> - ./charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml > - ./charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml
> - ./charts/gha-runner-scale-set/templates/kube_mode_role.yaml > - ./charts/gha-runner-scale-set/templates/kube_mode_role.yaml
> - ./charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml > - ./charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml
@@ -54,7 +52,7 @@ The current `ClusterRole` has the following permissions:
## Limit cluster role permission on Secrets ## Limit cluster role permission on Secrets
The cluster scope `List` `Secrets` permission might be a blocker for adopting `actions-runner-controller` for certain customers as they may have certain restriction in their cluster that simply doesn't allow any service account to have cluster scope `List Secrets` permission. The cluster scope `List` `Secrets` permission might be a blocker for adopting `actions-runner-controller` for certain customers as they may have certain restriction in their cluster that simply doesn't allow any service account to have cluster scope `List Secrets` permission.
To help these customers and improve security for `actions-runner-controller` in general, we will try to limit the `ClusterRole` permission of the controller manager's service account down to the following: To help these customers and improve security for `actions-runner-controller` in general, we will try to limit the `ClusterRole` permission of the controller manager's service account down to the following:
@@ -81,10 +79,9 @@ The `Role` and `RoleBinding` creation will happen during the `helm install demo
During `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller`, we will store the controller's service account info as labels on the controller `Deployment`. During `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller`, we will store the controller's service account info as labels on the controller `Deployment`.
Ex: Ex:
```yaml ```yaml
actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }} actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }}
actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
``` ```
Introduce a new `Role` per `AutoScalingRunnerSet` installation and `RoleBinding` the `Role` with the controller's `ServiceAccount` in the namespace that each `AutoScalingRunnerSet` deployed with the following permission. Introduce a new `Role` per `AutoScalingRunnerSet` installation and `RoleBinding` the `Role` with the controller's `ServiceAccount` in the namespace that each `AutoScalingRunnerSet` deployed with the following permission.
@@ -105,9 +102,8 @@ The `gha-runner-scale-set` helm chart will use this service account to properly
The `gha-runner-scale-set` helm chart will also allow customers to explicitly provide the controller service account info, in case the `helm lookup` couldn't locate the right controller `Deployment`. The `gha-runner-scale-set` helm chart will also allow customers to explicitly provide the controller service account info, in case the `helm lookup` couldn't locate the right controller `Deployment`.
New sections in `values.yaml` of `gha-runner-scale-set`: New sections in `values.yaml` of `gha-runner-scale-set`:
```yaml ```yaml
## Optional controller service account that needs to have required Role and RoleBinding ## Optional controller service account that needs to have required Role and RoleBinding
## to operate this gha-runner-scale-set installation. ## to operate this gha-runner-scale-set installation.
## The helm chart will try to find the controller deployment and its service account at installation time. ## The helm chart will try to find the controller deployment and its service account at installation time.
## In case the helm chart can't find the right service account, you can explicitly pass in the following value ## In case the helm chart can't find the right service account, you can explicitly pass in the following value
@@ -133,8 +129,5 @@ You will deploy the `AutoScalingRunnerSet` with something like `helm install dem
In this mode, you will end up with a manager `Role` that has all Get/List/Create/Delete/Update/Patch/Watch permissions on resources we need, and a `RoleBinding` to bind the `Role` with the controller `ServiceAccount` in the watched single namespace and the controller namespace, ex: `test-namespace` and `arc-system` in the above example. In this mode, you will end up with a manager `Role` that has all Get/List/Create/Delete/Update/Patch/Watch permissions on resources we need, and a `RoleBinding` to bind the `Role` with the controller `ServiceAccount` in the watched single namespace and the controller namespace, ex: `test-namespace` and `arc-system` in the above example.
The downside of this mode: The downside of this mode:
- When you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other. - When you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other.
- You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster. - You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster.
[^1]: Superseded by [ADR 2023-04-11](2023-04-11-limit-manager-role-permission.md)

View File

@@ -0,0 +1,18 @@
# Title
<!-- ADR titles should typically be imperative sentences. -->
**Status**: (Proposed|Accepted|Rejected|Superceded|Deprecated)
## Context
*What is the issue or background knowledge necessary for future readers
to understand why this ADR was written?*
## Decision
**What** is the change being proposed? / **How** will it be implemented?*
## Consequences
*What becomes easier or more difficult to do because of this change?*

View File

@@ -52,9 +52,6 @@ type AutoscalingListenerSpec struct {
// Required // Required
Image string `json:"image,omitempty"` Image string `json:"image,omitempty"`
// Required
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
// Required // Required
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`

View File

@@ -248,6 +248,7 @@ type AutoscalingRunnerSetStatus struct {
} }
func (ars *AutoscalingRunnerSet) ListenerSpecHash() string { func (ars *AutoscalingRunnerSet) ListenerSpecHash() string {
type listenerSpec = AutoscalingRunnerSetSpec
arsSpec := ars.Spec.DeepCopy() arsSpec := ars.Spec.DeepCopy()
spec := arsSpec spec := arsSpec
return hash.ComputeTemplateHash(&spec) return hash.ComputeTemplateHash(&spec)

View File

@@ -77,11 +77,6 @@ type RunnerDeploymentStatus struct {
// +kubebuilder:object:root=true // +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=rdeploy // +kubebuilder:resource:shortName=rdeploy
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.enterprise",name=Enterprise,type=string
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.organization",name=Organization,type=string
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.repository",name=Repository,type=string
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.group",name=Group,type=string
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.labels",name=Labels,type=string
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number // +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number // +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
// +kubebuilder:printcolumn:JSONPath=".status.updatedReplicas",name=Up-To-Date,type=number // +kubebuilder:printcolumn:JSONPath=".status.updatedReplicas",name=Up-To-Date,type=number

View File

@@ -15,10 +15,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.23.3 version: 0.22.0
# Used as the default manager tag value when no tag property is provided in the values.yaml # Used as the default manager tag value when no tag property is provided in the values.yaml
appVersion: 0.27.4 appVersion: 0.27.0
home: https://github.com/actions/actions-runner-controller home: https://github.com/actions/actions-runner-controller

View File

@@ -46,7 +46,7 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
| `metrics.port` | Set port of metrics service | 8443 | | `metrics.port` | Set port of metrics service | 8443 |
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true | | `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy | | `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 | | `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | | | `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | | | `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
| `fullnameOverride` | Override the full resource names | | | `fullnameOverride` | Override the full resource names | |
@@ -102,11 +102,8 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | | | `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | | | `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | | | `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
| `githubWebhookServer.terminationGracePeriodSeconds` | Set the githubWebhookServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
| `githubWebhookServer.lifecycle` | Set the githubWebhookServer pod lifecycle hooks | `{}` |
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | | | `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` | | `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
| `githubWebhookServer.service.loadBalancerSourceRanges` | Set githubWebhookServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false | | `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | | | `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` | | `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
@@ -118,9 +115,9 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
| `actionsMetricsServer.logLevel` | Set the log level of the actionsMetricsServer container | | | `actionsMetricsServer.logLevel` | Set the log level of the actionsMetricsServer container | |
| `actionsMetricsServer.logFormat` | Set the log format of the actionsMetricsServer controller. Valid options are "text" and "json" | text | | `actionsMetricsServer.logFormat` | Set the log format of the actionsMetricsServer controller. Valid options are "text" and "json" | text |
| `actionsMetricsServer.enabled` | Deploy the actions metrics server pod | false | | `actionsMetricsServer.enabled` | Deploy the actions metrics server pod | false |
| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the actions-metrics-server | false | | `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
| `actionsMetricsServer.secret.create` | Deploy the webhook hook secret | false | | `actionsMetricsServer.secret.create` | Deploy the webhook hook secret | false |
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | actions-metrics-server | | `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
| `actionsMetricsServer.secret.github_webhook_secret_token` | Set the webhook secret token value | | | `actionsMetricsServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
| `actionsMetricsServer.imagePullSecrets` | Specifies the secret to be used when pulling the actionsMetricsServer pod containers | | | `actionsMetricsServer.imagePullSecrets` | Specifies the secret to be used when pulling the actionsMetricsServer pod containers | |
| `actionsMetricsServer.nameOverride` | Override the resource name prefix | | | `actionsMetricsServer.nameOverride` | Override the resource name prefix | |
@@ -138,20 +135,17 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
| `actionsMetricsServer.tolerations` | Set the actionsMetricsServer pod tolerations | | | `actionsMetricsServer.tolerations` | Set the actionsMetricsServer pod tolerations | |
| `actionsMetricsServer.affinity` | Set the actionsMetricsServer pod affinity rules | | | `actionsMetricsServer.affinity` | Set the actionsMetricsServer pod affinity rules | |
| `actionsMetricsServer.priorityClassName` | Set the actionsMetricsServer pod priorityClassName | | | `actionsMetricsServer.priorityClassName` | Set the actionsMetricsServer pod priorityClassName | |
| `actionsMetricsServer.terminationGracePeriodSeconds` | Set the actionsMetricsServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
| `actionsMetricsServer.lifecycle` | Set the actionsMetricsServer pod lifecycle hooks | `{}` |
| `actionsMetricsServer.service.type` | Set actionsMetricsServer service type | | | `actionsMetricsServer.service.type` | Set actionsMetricsServer service type | |
| `actionsMetricsServer.service.ports` | Set actionsMetricsServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` | | `actionsMetricsServer.service.ports` | Set actionsMetricsServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
| `actionsMetricsServer.service.loadBalancerSourceRanges` | Set actionsMetricsServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
| `actionsMetricsServer.ingress.enabled` | Deploy an ingress kind for the actionsMetricsServer | false | | `actionsMetricsServer.ingress.enabled` | Deploy an ingress kind for the actionsMetricsServer | false |
| `actionsMetricsServer.ingress.annotations` | Set annotations for the ingress kind | | | `actionsMetricsServer.ingress.annotations` | Set annotations for the ingress kind | |
| `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` | | `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
| `actionsMetricsServer.ingress.tls` | Set tls configuration for ingress | | | `actionsMetricsServer.ingress.tls` | Set tls configuration for ingress | |
| `actionsMetricsServer.ingress.ingressClassName` | Set ingress class name | | | `actionsMetricsServer.ingress.ingressClassName` | Set ingress class name | |
| `actionsMetrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false | | `actionsMetrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | | | `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | |
| `actionsMetrics.port` | Set port of actions metrics service | 8443 | | `actionsMetrics.port` | Set port of actions metrics service | 8443 |
| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true | | `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
| `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy | | `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 | | `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
| `actionsMetrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | | | `actionsMetrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |

View File

@@ -17,21 +17,6 @@ spec:
scope: Namespaced scope: Namespaced
versions: versions:
- additionalPrinterColumns: - additionalPrinterColumns:
- jsonPath: .spec.template.spec.enterprise
name: Enterprise
type: string
- jsonPath: .spec.template.spec.organization
name: Organization
type: string
- jsonPath: .spec.template.spec.repository
name: Repository
type: string
- jsonPath: .spec.template.spec.group
name: Group
type: string
- jsonPath: .spec.template.spec.labels
name: Labels
type: string
- jsonPath: .spec.replicas - jsonPath: .spec.replicas
name: Desired name: Desired
type: number type: number

View File

@@ -50,12 +50,6 @@ spec:
{{- end }} {{- end }}
command: command:
- "/actions-metrics-server" - "/actions-metrics-server"
{{- if .Values.actionsMetricsServer.lifecycle }}
{{- with .Values.actionsMetricsServer.lifecycle }}
lifecycle:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- end }}
env: env:
- name: GITHUB_WEBHOOK_SECRET_TOKEN - name: GITHUB_WEBHOOK_SECRET_TOKEN
valueFrom: valueFrom:
@@ -148,7 +142,7 @@ spec:
securityContext: securityContext:
{{- toYaml .Values.securityContext | nindent 12 }} {{- toYaml .Values.securityContext | nindent 12 }}
{{- end }} {{- end }}
terminationGracePeriodSeconds: {{ .Values.actionsMetricsServer.terminationGracePeriodSeconds }} terminationGracePeriodSeconds: 10
{{- with .Values.actionsMetricsServer.nodeSelector }} {{- with .Values.actionsMetricsServer.nodeSelector }}
nodeSelector: nodeSelector:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}

View File

@@ -1,90 +0,0 @@
{{- if .Values.actionsMetricsServer.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }}
rules:
- apiGroups:
- actions.summerwind.dev
resources:
- horizontalrunnerautoscalers
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.summerwind.dev
resources:
- horizontalrunnerautoscalers/finalizers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.summerwind.dev
resources:
- horizontalrunnerautoscalers/status
verbs:
- get
- patch
- update
- apiGroups:
- actions.summerwind.dev
resources:
- runnersets
verbs:
- get
- list
- watch
- apiGroups:
- actions.summerwind.dev
resources:
- runnerdeployments
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.summerwind.dev
resources:
- runnerdeployments/finalizers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.summerwind.dev
resources:
- runnerdeployments/status
verbs:
- get
- patch
- update
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
{{- end }}

View File

@@ -1,14 +0,0 @@
{{- if .Values.actionsMetricsServer.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "actions-runner-controller-actions-metrics-server.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -5,7 +5,7 @@ metadata:
name: {{ include "actions-runner-controller-actions-metrics-server.fullname" . }} name: {{ include "actions-runner-controller-actions-metrics-server.fullname" . }}
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
labels: labels:
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }} {{- include "actions-runner-controller.labels" . | nindent 4 }}
{{- if .Values.actionsMetricsServer.service.annotations }} {{- if .Values.actionsMetricsServer.service.annotations }}
annotations: annotations:
{{ toYaml .Values.actionsMetricsServer.service.annotations | nindent 4 }} {{ toYaml .Values.actionsMetricsServer.service.annotations | nindent 4 }}
@@ -23,10 +23,4 @@ spec:
{{- end }} {{- end }}
selector: selector:
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }} {{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }}
{{- if .Values.actionsMetricsServer.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- range $ip := .Values.actionsMetricsServer.service.loadBalancerSourceRanges }}
- {{ $ip -}}
{{- end }}
{{- end }}
{{- end }} {{- end }}

View File

@@ -117,14 +117,10 @@ spec:
name: {{ include "actions-runner-controller.secretName" . }} name: {{ include "actions-runner-controller.secretName" . }}
optional: true optional: true
{{- end }} {{- end }}
{{- if kindIs "slice" .Values.githubWebhookServer.env }}
{{- toYaml .Values.githubWebhookServer.env | nindent 8 }}
{{- else }}
{{- range $key, $val := .Values.githubWebhookServer.env }} {{- range $key, $val := .Values.githubWebhookServer.env }}
- name: {{ $key }} - name: {{ $key }}
value: {{ $val | quote }} value: {{ $val | quote }}
{{- end }} {{- end }}
{{- end }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}" image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
name: github-webhook-server name: github-webhook-server
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.image.pullPolicy }}

View File

@@ -250,6 +250,14 @@ rules:
- patch - patch
- update - update
- watch - watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
{{- if .Values.runner.statusUpdateHook.enabled }} {{- if .Values.runner.statusUpdateHook.enabled }}
- apiGroups: - apiGroups:
- "" - ""
@@ -303,4 +311,11 @@ rules:
- list - list
- create - create
- delete - delete
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
{{- end }} {{- end }}

View File

@@ -1,21 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
{{- if .Values.scope.singleNamespace }}
kind: RoleBinding
{{- else }}
kind: ClusterRoleBinding
{{- end }}
metadata:
name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
{{- if .Values.scope.singleNamespace }}
kind: Role
{{- else }}
kind: ClusterRole
{{- end }}
name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets
subjects:
- kind: ServiceAccount
name: {{ include "actions-runner-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}

View File

@@ -1,24 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
{{- if .Values.scope.singleNamespace }}
kind: Role
{{- else }}
kind: ClusterRole
{{- end }}
metadata:
creationTimestamp: null
name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
{{- if .Values.rbac.allowGrantingKubernetesContainerModePermissions }}
{{/* These permissions are required by ARC to create RBAC resources for the runner pod to use the kubernetes container mode. */}}
{{/* See https://github.com/actions/actions-runner-controller/pull/1268/files#r917331632 */}}
- create
- delete
{{- end }}

View File

@@ -44,7 +44,6 @@ webhooks:
resources: resources:
- runners - runners
sideEffects: None sideEffects: None
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
- admissionReviewVersions: - admissionReviewVersions:
- v1beta1 - v1beta1
{{- if .Values.scope.singleNamespace }} {{- if .Values.scope.singleNamespace }}
@@ -75,7 +74,6 @@ webhooks:
resources: resources:
- runnerdeployments - runnerdeployments
sideEffects: None sideEffects: None
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
- admissionReviewVersions: - admissionReviewVersions:
- v1beta1 - v1beta1
{{- if .Values.scope.singleNamespace }} {{- if .Values.scope.singleNamespace }}
@@ -106,7 +104,6 @@ webhooks:
resources: resources:
- runnerreplicasets - runnerreplicasets
sideEffects: None sideEffects: None
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
- admissionReviewVersions: - admissionReviewVersions:
- v1beta1 - v1beta1
{{- if .Values.scope.singleNamespace }} {{- if .Values.scope.singleNamespace }}
@@ -139,7 +136,6 @@ webhooks:
objectSelector: objectSelector:
matchLabels: matchLabels:
"actions-runner-controller/inject-registration-token": "true" "actions-runner-controller/inject-registration-token": "true"
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
--- ---
apiVersion: admissionregistration.k8s.io/v1 apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration kind: ValidatingWebhookConfiguration
@@ -181,7 +177,6 @@ webhooks:
resources: resources:
- runners - runners
sideEffects: None sideEffects: None
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
- admissionReviewVersions: - admissionReviewVersions:
- v1beta1 - v1beta1
{{- if .Values.scope.singleNamespace }} {{- if .Values.scope.singleNamespace }}
@@ -212,7 +207,6 @@ webhooks:
resources: resources:
- runnerdeployments - runnerdeployments
sideEffects: None sideEffects: None
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
- admissionReviewVersions: - admissionReviewVersions:
- v1beta1 - v1beta1
{{- if .Values.scope.singleNamespace }} {{- if .Values.scope.singleNamespace }}
@@ -244,7 +238,6 @@ webhooks:
- runnerreplicasets - runnerreplicasets
sideEffects: None sideEffects: None
{{ if not (or (hasKey .Values.admissionWebHooks "caBundle") .Values.certManagerEnabled) }} {{ if not (or (hasKey .Values.admissionWebHooks "caBundle") .Values.certManagerEnabled) }}
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
--- ---
apiVersion: v1 apiVersion: v1
kind: Secret kind: Secret

View File

@@ -47,7 +47,6 @@ authSecret:
#github_basicauth_username: "" #github_basicauth_username: ""
#github_basicauth_password: "" #github_basicauth_password: ""
# http(s) should be specified for dockerRegistryMirror, e.g.: dockerRegistryMirror="https://<your-docker-registry-mirror>"
dockerRegistryMirror: "" dockerRegistryMirror: ""
image: image:
repository: "summerwind/actions-runner-controller" repository: "summerwind/actions-runner-controller"
@@ -280,19 +279,6 @@ githubWebhookServer:
# queueLimit: 100 # queueLimit: 100
terminationGracePeriodSeconds: 10 terminationGracePeriodSeconds: 10
lifecycle: {} lifecycle: {}
# specify additional environment variables for the webhook server pod.
# It's possible to specify either key vale pairs e.g.:
# my_env_var: "some value"
# my_other_env_var: "other value"
# or a list of complete environment variable definitions e.g.:
# - name: GITHUB_WEBHOOK_SECRET_TOKEN
# valueFrom:
# secretKeyRef:
# key: GITHUB_WEBHOOK_SECRET_TOKEN
# name: prod-gha-controller-webhook-token
# optional: true
env: {}
actionsMetrics: actionsMetrics:
serviceAnnotations: {} serviceAnnotations: {}
@@ -360,7 +346,6 @@ actionsMetricsServer:
protocol: TCP protocol: TCP
name: http name: http
#nodePort: someFixedPortForUseWithTerraformCdkCfnEtc #nodePort: someFixedPortForUseWithTerraformCdkCfnEtc
loadBalancerSourceRanges: []
ingress: ingress:
enabled: false enabled: false
ingressClassName: "" ingressClassName: ""
@@ -390,5 +375,4 @@ actionsMetricsServer:
# - secretName: chart-example-tls # - secretName: chart-example-tls
# hosts: # hosts:
# - chart-example.local # - chart-example.local
terminationGracePeriodSeconds: 10
lifecycle: {}

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.4.0 version: 0.3.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: "0.4.0" appVersion: "0.3.0"
home: https://github.com/actions/actions-runner-controller home: https://github.com/actions/actions-runner-controller

View File

@@ -80,9 +80,6 @@ spec:
image: image:
description: Required description: Required
type: string type: string
imagePullPolicy:
description: Required
type: string
imagePullSecrets: imagePullSecrets:
description: Required description: Required
items: items:

View File

@@ -39,7 +39,7 @@ helm.sh/chart: {{ include "gha-runner-scale-set-controller.chart" . }}
{{- if .Chart.AppVersion }} {{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }} {{- end }}
app.kubernetes.io/part-of: gha-runner-scale-set-controller app.kubernetes.io/part-of: {{ .Chart.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- range $k, $v := .Values.labels }} {{- range $k, $v := .Values.labels }}
{{ $k }}: {{ $v }} {{ $k }}: {{ $v }}
@@ -59,41 +59,25 @@ Create the name of the service account to use
*/}} */}}
{{- define "gha-runner-scale-set-controller.serviceAccountName" -}} {{- define "gha-runner-scale-set-controller.serviceAccountName" -}}
{{- if eq .Values.serviceAccount.name "default"}} {{- if eq .Values.serviceAccount.name "default"}}
{{- fail "serviceAccount.name cannot be set to 'default'" }} {{- fail "serviceAccount.name cannot be set to 'default'" }}
{{- end }} {{- end }}
{{- if .Values.serviceAccount.create }} {{- if .Values.serviceAccount.create }}
{{- default (include "gha-runner-scale-set-controller.fullname" .) .Values.serviceAccount.name }} {{- default (include "gha-runner-scale-set-controller.fullname" .) .Values.serviceAccount.name }}
{{- else }} {{- else }}
{{- if not .Values.serviceAccount.name }} {{- if not .Values.serviceAccount.name }}
{{- fail "serviceAccount.name must be set if serviceAccount.create is false" }} {{- fail "serviceAccount.name must be set if serviceAccount.create is false" }}
{{- else }} {{- else }}
{{- .Values.serviceAccount.name }} {{- .Values.serviceAccount.name }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- define "gha-runner-scale-set-controller.managerClusterRoleName" -}} {{- define "gha-runner-scale-set-controller.managerRoleName" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-cluster-role {{- include "gha-runner-scale-set-controller.fullname" . }}-manager-role
{{- end }} {{- end }}
{{- define "gha-runner-scale-set-controller.managerClusterRoleBinding" -}} {{- define "gha-runner-scale-set-controller.managerRoleBinding" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-cluster-rolebinding {{- include "gha-runner-scale-set-controller.fullname" . }}-manager-rolebinding
{{- end }}
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-role
{{- end }}
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-rolebinding
{{- end }}
{{- define "gha-runner-scale-set-controller.managerListenerRoleName" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-role
{{- end }}
{{- define "gha-runner-scale-set-controller.managerListenerRoleBinding" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-rolebinding
{{- end }} {{- end }}
{{- define "gha-runner-scale-set-controller.leaderElectionRoleName" -}} {{- define "gha-runner-scale-set-controller.leaderElectionRoleName" -}}
@@ -107,7 +91,7 @@ Create the name of the service account to use
{{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}} {{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}}
{{- $names := list }} {{- $names := list }}
{{- range $k, $v := . }} {{- range $k, $v := . }}
{{- $names = append $names $v.name }} {{- $names = append $names $v.name }}
{{- end }} {{- end }}
{{- $names | join ","}} {{- $names | join ","}}
{{- end }} {{- end }}

View File

@@ -5,11 +5,6 @@ metadata:
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
labels: labels:
{{- include "gha-runner-scale-set-controller.labels" . | nindent 4 }} {{- include "gha-runner-scale-set-controller.labels" . | nindent 4 }}
actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }}
actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
{{- if .Values.flags.watchSingleNamespace }}
actions.github.com/controller-watch-single-namespace: {{ .Values.flags.watchSingleNamespace }}
{{- end }}
spec: spec:
replicas: {{ default 1 .Values.replicaCount }} replicas: {{ default 1 .Values.replicaCount }}
selector: selector:
@@ -23,7 +18,7 @@ spec:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
labels: labels:
app.kubernetes.io/part-of: gha-runner-scale-set-controller app.kubernetes.io/part-of: actions-runner-controller
app.kubernetes.io/component: controller-manager app.kubernetes.io/component: controller-manager
app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/version: {{ .Chart.Version }}
{{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }} {{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }}
@@ -56,9 +51,6 @@ spec:
{{- with .Values.flags.logLevel }} {{- with .Values.flags.logLevel }}
- "--log-level={{ . }}" - "--log-level={{ . }}"
{{- end }} {{- end }}
{{- with .Values.flags.watchSingleNamespace }}
- "--watch-single-namespace={{ . }}"
{{- end }}
command: command:
- "/manager" - "/manager"
env: env:
@@ -68,11 +60,14 @@ spec:
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: metadata.namespace fieldPath: metadata.namespace
- name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY
value: "{{ .Values.image.pullPolicy | default "IfNotPresent" }}"
{{- with .Values.env }} {{- with .Values.env }}
{{- if kindIs "slice" . }} {{- if kindIs "slice" .Values.env }}
{{- toYaml . | nindent 8 }} {{- toYaml .Values.env | nindent 8 }}
{{- else }}
{{- range $key, $val := .Values.env }}
- name: {{ $key }}
value: {{ $val | quote }}
{{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- with .Values.resources }} {{- with .Values.resources }}

View File

@@ -1,4 +1,4 @@
{{- if gt (int (default 1 .Values.replicaCount)) 1 }} {{- if gt (int (default 1 .Values.replicaCount)) 1 -}}
# permissions to do leader election. # permissions to do leader election.
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: Role kind: Role

View File

@@ -1,4 +1,4 @@
{{- if gt (int (default 1 .Values.replicaCount)) 1 }} {{- if gt (int (default 1 .Values.replicaCount)) 1 -}}
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding kind: RoleBinding
metadata: metadata:

View File

@@ -1,14 +0,0 @@
{{- if empty .Values.flags.watchSingleNamespace }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "gha-runner-scale-set-controller.managerClusterRoleBinding" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "gha-runner-scale-set-controller.managerClusterRoleName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,40 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "gha-runner-scale-set-controller.managerListenerRoleName" . }}
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- apiGroups:
- ""
resources:
- pods/status
verbs:
- get
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- get
- patch
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- patch
- update

View File

@@ -1,8 +1,7 @@
{{- if empty .Values.flags.watchSingleNamespace }}
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
name: {{ include "gha-runner-scale-set-controller.managerClusterRoleName" . }} name: {{ include "gha-runner-scale-set-controller.managerRoleName" . }}
rules: rules:
- apiGroups: - apiGroups:
- actions.github.com - actions.github.com
@@ -21,7 +20,6 @@ rules:
resources: resources:
- autoscalingrunnersets/finalizers - autoscalingrunnersets/finalizers
verbs: verbs:
- patch
- update - update
- apiGroups: - apiGroups:
- actions.github.com - actions.github.com
@@ -56,7 +54,6 @@ rules:
resources: resources:
- autoscalinglisteners/finalizers - autoscalinglisteners/finalizers
verbs: verbs:
- patch
- update - update
- apiGroups: - apiGroups:
- actions.github.com - actions.github.com
@@ -78,13 +75,6 @@ rules:
- get - get
- patch - patch
- update - update
- apiGroups:
- actions.github.com
resources:
- ephemeralrunnersets/finalizers
verbs:
- patch
- update
- apiGroups: - apiGroups:
- actions.github.com - actions.github.com
resources: resources:
@@ -102,8 +92,13 @@ rules:
resources: resources:
- ephemeralrunners/finalizers - ephemeralrunners/finalizers
verbs: verbs:
- create
- delete
- get
- list
- patch - patch
- update - update
- watch
- apiGroups: - apiGroups:
- actions.github.com - actions.github.com
resources: resources:
@@ -117,12 +112,44 @@ rules:
resources: resources:
- pods - pods
verbs: verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods/status
verbs:
- get
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list - list
- watch - watch
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
- serviceaccounts - configmaps
verbs: verbs:
- list - list
- watch - watch
@@ -131,6 +158,10 @@ rules:
resources: resources:
- rolebindings - rolebindings
verbs: verbs:
- create
- delete
- get
- update
- list - list
- watch - watch
- apiGroups: - apiGroups:
@@ -138,7 +169,9 @@ rules:
resources: resources:
- roles - roles
verbs: verbs:
- create
- delete
- get
- update
- list - list
- watch - watch
- patch
{{- end }}

View File

@@ -1,12 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding kind: ClusterRoleBinding
metadata: metadata:
name: {{ include "gha-runner-scale-set-controller.managerListenerRoleBinding" . }} name: {{ include "gha-runner-scale-set-controller.managerRoleBinding" . }}
namespace: {{ .Release.Namespace }}
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
kind: Role kind: ClusterRole
name: {{ include "gha-runner-scale-set-controller.managerListenerRoleName" . }} name: {{ include "gha-runner-scale-set-controller.managerRoleName" . }}
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}

View File

@@ -1,84 +0,0 @@
{{- if .Values.flags.watchSingleNamespace }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- actions.github.com
resources:
- autoscalinglisteners
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.github.com
resources:
- autoscalinglisteners/status
verbs:
- get
- patch
- update
- apiGroups:
- actions.github.com
resources:
- autoscalinglisteners/finalizers
verbs:
- patch
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
verbs:
- list
- watch
- apiGroups:
- actions.github.com
resources:
- autoscalingrunnersets
verbs:
- list
- watch
- apiGroups:
- actions.github.com
resources:
- ephemeralrunnersets
verbs:
- list
- watch
- apiGroups:
- actions.github.com
resources:
- ephemeralrunners
verbs:
- list
- watch
{{- end }}

View File

@@ -1,15 +0,0 @@
{{- if .Values.flags.watchSingleNamespace }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" . }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,125 +0,0 @@
{{- if .Values.flags.watchSingleNamespace }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
namespace: {{ .Values.flags.watchSingleNamespace }}
rules:
- apiGroups:
- actions.github.com
resources:
- autoscalingrunnersets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.github.com
resources:
- autoscalingrunnersets/finalizers
verbs:
- patch
- update
- apiGroups:
- actions.github.com
resources:
- autoscalingrunnersets/status
verbs:
- get
- patch
- update
- apiGroups:
- actions.github.com
resources:
- ephemeralrunnersets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.github.com
resources:
- ephemeralrunnersets/status
verbs:
- get
- patch
- update
- apiGroups:
- actions.github.com
resources:
- ephemeralrunnersets/finalizers
verbs:
- patch
- update
- apiGroups:
- actions.github.com
resources:
- ephemeralrunners
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.github.com
resources:
- ephemeralrunners/finalizers
verbs:
- patch
- update
- apiGroups:
- actions.github.com
resources:
- ephemeralrunners/status
verbs:
- get
- patch
- update
- apiGroups:
- actions.github.com
resources:
- autoscalinglisteners
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
verbs:
- list
- watch
- patch
{{- end }}

View File

@@ -1,15 +0,0 @@
{{- if .Values.flags.watchSingleNamespace }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" . }}
namespace: {{ .Values.flags.watchSingleNamespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,4 +1,4 @@
{{- if .Values.serviceAccount.create }} {{- if .Values.serviceAccount.create -}}
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:

View File

@@ -147,7 +147,7 @@ func TestTemplate_NotCreateServiceAccount_ServiceAccountNotSet(t *testing.T) {
assert.ErrorContains(t, err, "serviceAccount.name must be set if serviceAccount.create is false", "We should get an error because the default service account cannot be used") assert.ErrorContains(t, err, "serviceAccount.name must be set if serviceAccount.create is false", "We should get an error because the default service account cannot be used")
} }
func TestTemplate_CreateManagerClusterRole(t *testing.T) { func TestTemplate_CreateManagerRole(t *testing.T) {
t.Parallel() t.Parallel()
// Path to the helm chart we will test // Path to the helm chart we will test
@@ -162,23 +162,17 @@ func TestTemplate_CreateManagerClusterRole(t *testing.T) {
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role.yaml"}) output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"})
var managerClusterRole rbacv1.ClusterRole var managerRole rbacv1.ClusterRole
helm.UnmarshalK8SYaml(t, output, &managerClusterRole) helm.UnmarshalK8SYaml(t, output, &managerRole)
assert.Empty(t, managerClusterRole.Namespace, "ClusterRole should not have a namespace") assert.Empty(t, managerRole.Namespace, "ClusterRole should not have a namespace")
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRole.Name) assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-role", managerRole.Name)
assert.Equal(t, 16, len(managerClusterRole.Rules)) assert.Equal(t, 18, len(managerRole.Rules))
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"})
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_controller_role.yaml in chart", "We should get an error because the template should be skipped")
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role.yaml"})
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_watch_role.yaml in chart", "We should get an error because the template should be skipped")
} }
func TestTemplate_ManagerClusterRoleBinding(t *testing.T) { func TestTemplate_ManagerRoleBinding(t *testing.T) {
t.Parallel() t.Parallel()
// Path to the helm chart we will test // Path to the helm chart we will test
@@ -195,80 +189,16 @@ func TestTemplate_ManagerClusterRoleBinding(t *testing.T) {
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role_binding.yaml"}) output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role_binding.yaml"})
var managerClusterRoleBinding rbacv1.ClusterRoleBinding var managerRoleBinding rbacv1.ClusterRoleBinding
helm.UnmarshalK8SYaml(t, output, &managerClusterRoleBinding) helm.UnmarshalK8SYaml(t, output, &managerRoleBinding)
assert.Empty(t, managerClusterRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace") assert.Empty(t, managerRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace")
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-rolebinding", managerClusterRoleBinding.Name) assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-rolebinding", managerRoleBinding.Name)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRoleBinding.RoleRef.Name) assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-role", managerRoleBinding.RoleRef.Name)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerClusterRoleBinding.Subjects[0].Name) assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerRoleBinding.Subjects[0].Name)
assert.Equal(t, namespaceName, managerClusterRoleBinding.Subjects[0].Namespace) assert.Equal(t, namespaceName, managerRoleBinding.Subjects[0].Namespace)
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role_binding.yaml"})
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_controller_role_binding.yaml in chart", "We should get an error because the template should be skipped")
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role_binding.yaml"})
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_watch_role_binding.yaml in chart", "We should get an error because the template should be skipped")
}
func TestTemplate_CreateManagerListenerRole(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_listener_role.yaml"})
var managerListenerRole rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &managerListenerRole)
assert.Equal(t, namespaceName, managerListenerRole.Namespace, "Role should have a namespace")
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-role", managerListenerRole.Name)
assert.Equal(t, 4, len(managerListenerRole.Rules))
assert.Equal(t, "pods", managerListenerRole.Rules[0].Resources[0])
assert.Equal(t, "pods/status", managerListenerRole.Rules[1].Resources[0])
assert.Equal(t, "secrets", managerListenerRole.Rules[2].Resources[0])
assert.Equal(t, "serviceaccounts", managerListenerRole.Rules[3].Resources[0])
}
func TestTemplate_ManagerListenerRoleBinding(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"serviceAccount.create": "true",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_listener_role_binding.yaml"})
var managerListenerRoleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &managerListenerRoleBinding)
assert.Equal(t, namespaceName, managerListenerRoleBinding.Namespace, "RoleBinding should have a namespace")
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-rolebinding", managerListenerRoleBinding.Name)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-role", managerListenerRoleBinding.RoleRef.Name)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerListenerRoleBinding.Subjects[0].Name)
assert.Equal(t, namespaceName, managerListenerRoleBinding.Subjects[0].Namespace)
} }
func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
@@ -307,10 +237,6 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"]) assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"]) assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"]) assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"])
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"])
assert.NotContains(t, deployment.Labels, "actions.github.com/controller-watch-single-namespace")
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/part-of"])
assert.Equal(t, int32(1), *deployment.Spec.Replicas) assert.Equal(t, int32(1), *deployment.Spec.Replicas)
@@ -349,16 +275,13 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1]) assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value) assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name) assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath) assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources) assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext) assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
@@ -393,8 +316,6 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
"imagePullSecrets[0].name": "dockerhub", "imagePullSecrets[0].name": "dockerhub",
"nameOverride": "gha-runner-scale-set-controller-override", "nameOverride": "gha-runner-scale-set-controller-override",
"fullnameOverride": "gha-runner-scale-set-controller-fullname-override", "fullnameOverride": "gha-runner-scale-set-controller-fullname-override",
"env[0].name": "ENV_VAR_NAME_1",
"env[0].value": "ENV_VAR_VALUE_1",
"serviceAccount.name": "gha-runner-scale-set-controller-sa", "serviceAccount.name": "gha-runner-scale-set-controller-sa",
"podAnnotations.foo": "bar", "podAnnotations.foo": "bar",
"podSecurityContext.fsGroup": "1000", "podSecurityContext.fsGroup": "1000",
@@ -422,7 +343,6 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"]) assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"]) assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"]) assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/part-of"])
assert.Equal(t, "bar", deployment.Labels["foo"]) assert.Equal(t, "bar", deployment.Labels["foo"])
assert.Equal(t, "actions", deployment.Labels["github"]) assert.Equal(t, "actions", deployment.Labels["github"])
@@ -437,9 +357,6 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"]) assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"])
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"]) assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1) assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1)
assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name) assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name)
assert.Equal(t, "gha-runner-scale-set-controller-sa", deployment.Spec.Template.Spec.ServiceAccountName) assert.Equal(t, "gha-runner-scale-set-controller-sa", deployment.Spec.Template.Spec.ServiceAccountName)
@@ -475,16 +392,10 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1]) assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1])
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2]) assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 4) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value) assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
assert.Equal(t, "Always", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name) assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath) assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
@@ -624,264 +535,3 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub,ghcr", deployment.Spec.Template.Spec.Containers[0].Args[1]) assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub,ghcr", deployment.Spec.Template.Spec.Containers[0].Args[1])
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2]) assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
} }
func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml"))
require.NoError(t, err)
chart := new(Chart)
err = yaml.Unmarshal(chartContent, chart)
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"image.tag": "dev",
"flags.watchSingleNamespace": "demo",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(t, output, &deployment)
assert.Equal(t, namespaceName, deployment.Namespace)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name)
assert.Equal(t, "gha-runner-scale-set-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"])
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"])
assert.Equal(t, "demo", deployment.Labels["actions.github.com/controller-watch-single-namespace"])
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.ServiceAccountName)
assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext)
assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName)
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
assert.Len(t, deployment.Spec.Template.Spec.Volumes, 1)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name)
assert.NotNil(t, 10, deployment.Spec.Template.Spec.Volumes[0].EmptyDir)
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 0)
assert.Nil(t, deployment.Spec.Template.Spec.Affinity)
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0)
managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev"
assert.Len(t, deployment.Spec.Template.Spec.Containers, 1)
assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name)
assert.Equal(t, "ghcr.io/actions/gha-runner-scale-set-controller:dev", deployment.Spec.Template.Spec.Containers[0].Image)
assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3)
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
assert.Equal(t, "--watch-single-namespace=demo", deployment.Spec.Template.Spec.Containers[0].Args[2])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
}
func TestTemplate_ControllerContainerEnvironmentVariables(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"env[0].Name": "ENV_VAR_NAME_1",
"env[0].Value": "ENV_VAR_VALUE_1",
"env[1].Name": "ENV_VAR_NAME_2",
"env[1].ValueFrom.SecretKeyRef.Key": "ENV_VAR_NAME_2",
"env[1].ValueFrom.SecretKeyRef.Name": "secret-name",
"env[1].ValueFrom.SecretKeyRef.Optional": "true",
"env[2].Name": "ENV_VAR_NAME_3",
"env[2].Value": "",
"env[3].Name": "ENV_VAR_NAME_4",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(t, output, &deployment)
assert.Equal(t, namespaceName, deployment.Namespace)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 7)
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].Name)
assert.Equal(t, "secret-name", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Name)
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Key)
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Optional)
assert.Equal(t, "ENV_VAR_NAME_3", deployment.Spec.Template.Spec.Containers[0].Env[5].Name)
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[5].Value)
assert.Equal(t, "ENV_VAR_NAME_4", deployment.Spec.Template.Spec.Containers[0].Env[6].Name)
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[6].ValueFrom)
}
func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"flags.watchSingleNamespace": "demo",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role.yaml"})
assert.ErrorContains(t, err, "could not find template templates/manager_cluster_role.yaml in chart", "We should get an error because the template should be skipped")
}
func TestTemplate_WatchSingleNamespace_NotManagerClusterRoleBinding(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"serviceAccount.create": "true",
"flags.watchSingleNamespace": "demo",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role_binding.yaml"})
assert.ErrorContains(t, err, "could not find template templates/manager_cluster_role_binding.yaml in chart", "We should get an error because the template should be skipped")
}
func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"flags.watchSingleNamespace": "demo",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"})
var managerSingleNamespaceControllerRole rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRole)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceControllerRole.Name)
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRole.Namespace)
assert.Equal(t, 10, len(managerSingleNamespaceControllerRole.Rules))
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role.yaml"})
var managerSingleNamespaceWatchRole rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRole)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRole.Name)
assert.Equal(t, "demo", managerSingleNamespaceWatchRole.Namespace)
assert.Equal(t, 14, len(managerSingleNamespaceWatchRole.Rules))
}
func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"flags.watchSingleNamespace": "demo",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role_binding.yaml"})
var managerSingleNamespaceControllerRoleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRoleBinding)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-rolebinding", managerSingleNamespaceControllerRoleBinding.Name)
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Namespace)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceControllerRoleBinding.RoleRef.Name)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerSingleNamespaceControllerRoleBinding.Subjects[0].Name)
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Subjects[0].Namespace)
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role_binding.yaml"})
var managerSingleNamespaceWatchRoleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRoleBinding)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-rolebinding", managerSingleNamespaceWatchRoleBinding.Name)
assert.Equal(t, "demo", managerSingleNamespaceWatchRoleBinding.Namespace)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRoleBinding.RoleRef.Name)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerSingleNamespaceWatchRoleBinding.Subjects[0].Name)
assert.Equal(t, namespaceName, managerSingleNamespaceWatchRoleBinding.Subjects[0].Namespace)
}

View File

@@ -18,17 +18,6 @@ imagePullSecrets: []
nameOverride: "" nameOverride: ""
fullnameOverride: "" fullnameOverride: ""
env:
## Define environment variables for the controller pod
# - name: "ENV_VAR_NAME_1"
# value: "ENV_VAR_VALUE_1"
# - name: "ENV_VAR_NAME_2"
# valueFrom:
# secretKeyRef:
# key: ENV_VAR_NAME_2
# name: secret-name
# optional: true
serviceAccount: serviceAccount:
# Specifies whether a service account should be created for running the controller pod # Specifies whether a service account should be created for running the controller pod
create: true create: true
@@ -42,27 +31,27 @@ serviceAccount:
podAnnotations: {} podAnnotations: {}
podSecurityContext: {} podSecurityContext: {}
# fsGroup: 2000 # fsGroup: 2000
securityContext: {} securityContext: {}
# capabilities: # capabilities:
# drop: # drop:
# - ALL # - ALL
# readOnlyRootFilesystem: true # readOnlyRootFilesystem: true
# runAsNonRoot: true # runAsNonRoot: true
# runAsUser: 1000 # runAsUser: 1000
resources: {} resources: {}
## We usually recommend not to specify default resources and to leave this as a conscious # We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little # choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following # resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'. # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits: # limits:
# cpu: 100m # cpu: 100m
# memory: 128Mi # memory: 128Mi
# requests: # requests:
# cpu: 100m # cpu: 100m
# memory: 128Mi # memory: 128Mi
nodeSelector: {} nodeSelector: {}
@@ -79,7 +68,3 @@ flags:
# Log level can be set here with one of the following values: "debug", "info", "warn", "error". # Log level can be set here with one of the following values: "debug", "info", "warn", "error".
# Defaults to "debug". # Defaults to "debug".
logLevel: "debug" logLevel: "debug"
## Restricts the controller to only watch resources in the desired namespace.
## Defaults to watch all namespaces when unset.
# watchSingleNamespace: ""

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.4.0 version: 0.3.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: "0.4.0" appVersion: "0.3.0"
home: https://github.com/actions/dev-arc home: https://github.com/actions/dev-arc

View File

@@ -11,9 +11,17 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
If release name contains chart name it will be used as a full name. If release name contains chart name it will be used as a full name.
*/}} */}}
{{- define "gha-runner-scale-set.fullname" -}} {{- define "gha-runner-scale-set.fullname" -}}
{{- $name := default .Chart.Name }} {{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }} {{- end }}
{{- end }}
{{- end }}
{{/* {{/*
Create chart name and version as used by the chart label. Create chart name and version as used by the chart label.
@@ -32,9 +40,6 @@ helm.sh/chart: {{ include "gha-runner-scale-set.chart" . }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }} {{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: gha-runner-scale-set
actions.github.com/scale-set-name: {{ .Release.Name }}
actions.github.com/scale-set-namespace: {{ .Release.Namespace }}
{{- end }} {{- end }}
{{/* {{/*
@@ -65,24 +70,24 @@ app.kubernetes.io/instance: {{ .Release.Name }}
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role {{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role
{{- end }} {{- end }}
{{- define "gha-runner-scale-set.kubeModeRoleBindingName" -}}
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role-binding
{{- end }}
{{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}} {{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}}
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-service-account {{- include "gha-runner-scale-set.fullname" . }}-kube-mode-service-account
{{- end }} {{- end }}
{{- define "gha-runner-scale-set.dind-init-container" -}} {{- define "gha-runner-scale-set.dind-init-container" -}}
{{- range $i, $val := .Values.template.spec.containers }} {{- range $i, $val := .Values.template.spec.containers -}}
{{- if eq $val.name "runner" }} {{- if eq $val.name "runner" -}}
image: {{ $val.image }} image: {{ $val.image }}
{{- if $val.imagePullSecrets }}
imagePullSecrets:
{{ $val.imagePullSecrets | toYaml -}}
{{- end }}
command: ["cp"] command: ["cp"]
args: ["-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"] args: ["-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"]
volumeMounts: volumeMounts:
- name: dind-externals - name: dind-externals
mountPath: /home/runner/tmpDir mountPath: /home/runner/tmpDir
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}
@@ -119,7 +124,7 @@ volumeMounts:
{{- $createWorkVolume := 1 }} {{- $createWorkVolume := 1 }}
{{- range $i, $volume := .Values.template.spec.volumes }} {{- range $i, $volume := .Values.template.spec.volumes }}
{{- if eq $volume.name "work" }} {{- if eq $volume.name "work" }}
{{- $createWorkVolume = 0 }} {{- $createWorkVolume = 0 -}}
- {{ $volume | toYaml | nindent 2 }} - {{ $volume | toYaml | nindent 2 }}
{{- end }} {{- end }}
{{- end }} {{- end }}
@@ -133,7 +138,7 @@ volumeMounts:
{{- $createWorkVolume := 1 }} {{- $createWorkVolume := 1 }}
{{- range $i, $volume := .Values.template.spec.volumes }} {{- range $i, $volume := .Values.template.spec.volumes }}
{{- if eq $volume.name "work" }} {{- if eq $volume.name "work" }}
{{- $createWorkVolume = 0 }} {{- $createWorkVolume = 0 -}}
- {{ $volume | toYaml | nindent 2 }} - {{ $volume | toYaml | nindent 2 }}
{{- end }} {{- end }}
{{- end }} {{- end }}
@@ -155,28 +160,25 @@ volumeMounts:
{{- end }} {{- end }}
{{- define "gha-runner-scale-set.non-runner-containers" -}} {{- define "gha-runner-scale-set.non-runner-containers" -}}
{{- range $i, $container := .Values.template.spec.containers }} {{- range $i, $container := .Values.template.spec.containers -}}
{{- if ne $container.name "runner" }} {{- if ne $container.name "runner" -}}
- {{ $container | toYaml | nindent 2 }} - name: {{ $container.name }}
{{- end }} {{- range $key, $val := $container }}
{{- end }} {{- if ne $key "name" }}
{{- end }} {{ $key }}: {{ $val }}
{{- end }}
{{- define "gha-runner-scale-set.non-runner-non-dind-containers" -}} {{- end }}
{{- range $i, $container := .Values.template.spec.containers }}
{{- if and (ne $container.name "runner") (ne $container.name "dind") }}
- {{ $container | toYaml | nindent 2 }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- define "gha-runner-scale-set.dind-runner-container" -}} {{- define "gha-runner-scale-set.dind-runner-container" -}}
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }} {{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
{{- range $i, $container := .Values.template.spec.containers }} {{- range $i, $container := .Values.template.spec.containers -}}
{{- if eq $container.name "runner" }} {{- if eq $container.name "runner" -}}
{{- range $key, $val := $container }} {{- range $key, $val := $container }}
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }} {{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
{{ $key }}: {{ $val | toYaml | nindent 2 }} {{ $key }}: {{ $val }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- $setDockerHost := 1 }} {{- $setDockerHost := 1 }}
@@ -193,24 +195,29 @@ env:
{{- with $container.env }} {{- with $container.env }}
{{- range $i, $env := . }} {{- range $i, $env := . }}
{{- if eq $env.name "DOCKER_HOST" }} {{- if eq $env.name "DOCKER_HOST" }}
{{- $setDockerHost = 0 }} {{- $setDockerHost = 0 -}}
{{- end }} {{- end }}
{{- if eq $env.name "DOCKER_TLS_VERIFY" }} {{- if eq $env.name "DOCKER_TLS_VERIFY" }}
{{- $setDockerTlsVerify = 0 }} {{- $setDockerTlsVerify = 0 -}}
{{- end }} {{- end }}
{{- if eq $env.name "DOCKER_CERT_PATH" }} {{- if eq $env.name "DOCKER_CERT_PATH" }}
{{- $setDockerCertPath = 0 }} {{- $setDockerCertPath = 0 -}}
{{- end }} {{- end }}
{{- if eq $env.name "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS" }} {{- if eq $env.name "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS" }}
{{- $setRunnerWaitDocker = 0 }} {{- $setRunnerWaitDocker = 0 -}}
{{- end }} {{- end }}
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }} {{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
{{- $setNodeExtraCaCerts = 0 }} {{- $setNodeExtraCaCerts = 0 -}}
{{- end }} {{- end }}
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }} {{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
{{- $setRunnerUpdateCaCerts = 0 }} {{- $setRunnerUpdateCaCerts = 0 -}}
{{- end }}
- name: {{ $env.name }}
{{- range $envKey, $envVal := $env }}
{{- if ne $envKey "name" }}
{{ $envKey }}: {{ $envVal | toYaml | nindent 8 }}
{{- end }}
{{- end }} {{- end }}
- {{ $env | toYaml | nindent 4 }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if $setDockerHost }} {{- if $setDockerHost }}
@@ -247,15 +254,20 @@ volumeMounts:
{{- with $container.volumeMounts }} {{- with $container.volumeMounts }}
{{- range $i, $volMount := . }} {{- range $i, $volMount := . }}
{{- if eq $volMount.name "work" }} {{- if eq $volMount.name "work" }}
{{- $mountWork = 0 }} {{- $mountWork = 0 -}}
{{- end }} {{- end }}
{{- if eq $volMount.name "dind-cert" }} {{- if eq $volMount.name "dind-cert" }}
{{- $mountDindCert = 0 }} {{- $mountDindCert = 0 -}}
{{- end }} {{- end }}
{{- if eq $volMount.name "github-server-tls-cert" }} {{- if eq $volMount.name "github-server-tls-cert" }}
{{- $mountGitHubServerTLS = 0 }} {{- $mountGitHubServerTLS = 0 -}}
{{- end }}
- name: {{ $volMount.name }}
{{- range $mountKey, $mountVal := $volMount }}
{{- if ne $mountKey "name" }}
{{ $mountKey }}: {{ $mountVal | toYaml | nindent 8 }}
{{- end }}
{{- end }} {{- end }}
- {{ $volMount | toYaml | nindent 4 }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if $mountWork }} {{- if $mountWork }}
@@ -278,11 +290,11 @@ volumeMounts:
{{- define "gha-runner-scale-set.kubernetes-mode-runner-container" -}} {{- define "gha-runner-scale-set.kubernetes-mode-runner-container" -}}
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }} {{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
{{- range $i, $container := .Values.template.spec.containers }} {{- range $i, $container := .Values.template.spec.containers -}}
{{- if eq $container.name "runner" }} {{- if eq $container.name "runner" -}}
{{- range $key, $val := $container }} {{- range $key, $val := $container }}
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }} {{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
{{ $key }}: {{ $val | toYaml | nindent 2 }} {{ $key }}: {{ $val }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- $setContainerHooks := 1 }} {{- $setContainerHooks := 1 }}
@@ -298,21 +310,26 @@ env:
{{- with $container.env }} {{- with $container.env }}
{{- range $i, $env := . }} {{- range $i, $env := . }}
{{- if eq $env.name "ACTIONS_RUNNER_CONTAINER_HOOKS" }} {{- if eq $env.name "ACTIONS_RUNNER_CONTAINER_HOOKS" }}
{{- $setContainerHooks = 0 }} {{- $setContainerHooks = 0 -}}
{{- end }} {{- end }}
{{- if eq $env.name "ACTIONS_RUNNER_POD_NAME" }} {{- if eq $env.name "ACTIONS_RUNNER_POD_NAME" }}
{{- $setPodName = 0 }} {{- $setPodName = 0 -}}
{{- end }} {{- end }}
{{- if eq $env.name "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER" }} {{- if eq $env.name "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER" }}
{{- $setRequireJobContainer = 0 }} {{- $setRequireJobContainer = 0 -}}
{{- end }} {{- end }}
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }} {{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
{{- $setNodeExtraCaCerts = 0 }} {{- $setNodeExtraCaCerts = 0 -}}
{{- end }} {{- end }}
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }} {{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
{{- $setRunnerUpdateCaCerts = 0 }} {{- $setRunnerUpdateCaCerts = 0 -}}
{{- end }}
- name: {{ $env.name }}
{{- range $envKey, $envVal := $env }}
{{- if ne $envKey "name" }}
{{ $envKey }}: {{ $envVal | toYaml | nindent 8 }}
{{- end }}
{{- end }} {{- end }}
- {{ $env | toYaml | nindent 4 }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if $setContainerHooks }} {{- if $setContainerHooks }}
@@ -346,12 +363,17 @@ volumeMounts:
{{- with $container.volumeMounts }} {{- with $container.volumeMounts }}
{{- range $i, $volMount := . }} {{- range $i, $volMount := . }}
{{- if eq $volMount.name "work" }} {{- if eq $volMount.name "work" }}
{{- $mountWork = 0 }} {{- $mountWork = 0 -}}
{{- end }} {{- end }}
{{- if eq $volMount.name "github-server-tls-cert" }} {{- if eq $volMount.name "github-server-tls-cert" }}
{{- $mountGitHubServerTLS = 0 }} {{- $mountGitHubServerTLS = 0 -}}
{{- end }}
- name: {{ $volMount.name }}
{{- range $mountKey, $mountVal := $volMount }}
{{- if ne $mountKey "name" }}
{{ $mountKey }}: {{ $mountVal | toYaml | nindent 8 }}
{{- end }}
{{- end }} {{- end }}
- {{ $volMount | toYaml | nindent 4 }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if $mountWork }} {{- if $mountWork }}
@@ -369,14 +391,14 @@ volumeMounts:
{{- define "gha-runner-scale-set.default-mode-runner-containers" -}} {{- define "gha-runner-scale-set.default-mode-runner-containers" -}}
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }} {{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
{{- range $i, $container := .Values.template.spec.containers }} {{- range $i, $container := .Values.template.spec.containers -}}
{{- if ne $container.name "runner" }} {{- if ne $container.name "runner" -}}
- {{ $container | toYaml | nindent 2 }} - {{ $container | toYaml | nindent 2 }}
{{- else }} {{- else }}
- name: {{ $container.name }} - name: {{ $container.name }}
{{- range $key, $val := $container }} {{- range $key, $val := $container }}
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }} {{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
{{ $key }}: {{ $val | toYaml | nindent 4 }} {{ $key }}: {{ $val }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- $setNodeExtraCaCerts := 0 }} {{- $setNodeExtraCaCerts := 0 }}
@@ -389,12 +411,17 @@ volumeMounts:
{{- with $container.env }} {{- with $container.env }}
{{- range $i, $env := . }} {{- range $i, $env := . }}
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }} {{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
{{- $setNodeExtraCaCerts = 0 }} {{- $setNodeExtraCaCerts = 0 -}}
{{- end }} {{- end }}
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }} {{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
{{- $setRunnerUpdateCaCerts = 0 }} {{- $setRunnerUpdateCaCerts = 0 -}}
{{- end }}
- name: {{ $env.name }}
{{- range $envKey, $envVal := $env }}
{{- if ne $envKey "name" }}
{{ $envKey }}: {{ $envVal | toYaml | nindent 10 }}
{{- end }}
{{- end }} {{- end }}
- {{ $env | toYaml | nindent 6 }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if $setNodeExtraCaCerts }} {{- if $setNodeExtraCaCerts }}
@@ -413,9 +440,14 @@ volumeMounts:
{{- with $container.volumeMounts }} {{- with $container.volumeMounts }}
{{- range $i, $volMount := . }} {{- range $i, $volMount := . }}
{{- if eq $volMount.name "github-server-tls-cert" }} {{- if eq $volMount.name "github-server-tls-cert" }}
{{- $mountGitHubServerTLS = 0 }} {{- $mountGitHubServerTLS = 0 -}}
{{- end }}
- name: {{ $volMount.name }}
{{- range $mountKey, $mountVal := $volMount }}
{{- if ne $mountKey "name" }}
{{ $mountKey }}: {{ $mountVal | toYaml | nindent 10 }}
{{- end }}
{{- end }} {{- end }}
- {{ $volMount | toYaml | nindent 6 }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if $mountGitHubServerTLS }} {{- if $mountGitHubServerTLS }}
@@ -426,125 +458,3 @@ volumeMounts:
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- define "gha-runner-scale-set.managerRoleName" -}}
{{- include "gha-runner-scale-set.fullname" . }}-manager-role
{{- end }}
{{- define "gha-runner-scale-set.managerRoleBindingName" -}}
{{- include "gha-runner-scale-set.fullname" . }}-manager-role-binding
{{- end }}
{{- define "gha-runner-scale-set.managerServiceAccountName" -}}
{{- $searchControllerDeployment := 1 }}
{{- if .Values.controllerServiceAccount }}
{{- if .Values.controllerServiceAccount.name }}
{{- $searchControllerDeployment = 0 }}
{{- .Values.controllerServiceAccount.name }}
{{- end }}
{{- end }}
{{- if eq $searchControllerDeployment 1 }}
{{- $multiNamespacesCounter := 0 }}
{{- $singleNamespaceCounter := 0 }}
{{- $controllerDeployment := dict }}
{{- $singleNamespaceControllerDeployments := dict }}
{{- $managerServiceAccountName := "" }}
{{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }}
{{- if kindIs "map" $deployment.metadata.labels }}
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-runner-scale-set-controller" }}
{{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }}
{{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }}
{{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}}
{{- else }}
{{- $multiNamespacesCounter = add $multiNamespacesCounter 1 }}
{{- $controllerDeployment = $deployment }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
{{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
{{- fail "Found both gha-runner-scale-set-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if gt $multiNamespacesCounter 1 }}
{{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if eq $multiNamespacesCounter 1 }}
{{- with $controllerDeployment.metadata }}
{{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }}
{{- end }}
{{- else if gt $singleNamespaceCounter 0 }}
{{- if hasKey $singleNamespaceControllerDeployments .Release.Namespace }}
{{- $controllerDeployment = get $singleNamespaceControllerDeployments .Release.Namespace }}
{{- with $controllerDeployment.metadata }}
{{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }}
{{- end }}
{{- else }}
{{- fail "No gha-runner-scale-set-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- end }}
{{- if eq $managerServiceAccountName "" }}
{{- fail "No service account name found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-name), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- $managerServiceAccountName }}
{{- end }}
{{- end }}
{{- define "gha-runner-scale-set.managerServiceAccountNamespace" -}}
{{- $searchControllerDeployment := 1 }}
{{- if .Values.controllerServiceAccount }}
{{- if .Values.controllerServiceAccount.namespace }}
{{- $searchControllerDeployment = 0 }}
{{- .Values.controllerServiceAccount.namespace }}
{{- end }}
{{- end }}
{{- if eq $searchControllerDeployment 1 }}
{{- $multiNamespacesCounter := 0 }}
{{- $singleNamespaceCounter := 0 }}
{{- $controllerDeployment := dict }}
{{- $singleNamespaceControllerDeployments := dict }}
{{- $managerServiceAccountNamespace := "" }}
{{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }}
{{- if kindIs "map" $deployment.metadata.labels }}
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-runner-scale-set-controller" }}
{{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }}
{{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }}
{{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}}
{{- else }}
{{- $multiNamespacesCounter = add $multiNamespacesCounter 1 }}
{{- $controllerDeployment = $deployment }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
{{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
{{- fail "Found both gha-runner-scale-set-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if gt $multiNamespacesCounter 1 }}
{{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if eq $multiNamespacesCounter 1 }}
{{- with $controllerDeployment.metadata }}
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
{{- end }}
{{- else if gt $singleNamespaceCounter 0 }}
{{- if hasKey $singleNamespaceControllerDeployments .Release.Namespace }}
{{- $controllerDeployment = get $singleNamespaceControllerDeployments .Release.Namespace }}
{{- with $controllerDeployment.metadata }}
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
{{- end }}
{{- else }}
{{- fail "No gha-runner-scale-set-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- end }}
{{- if eq $managerServiceAccountNamespace "" }}
{{- fail "No service account namespace found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- $managerServiceAccountNamespace }}
{{- end }}
{{- end }}

View File

@@ -10,23 +10,7 @@ metadata:
name: {{ .Release.Name }} name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/component: "autoscaling-runner-set"
{{- include "gha-runner-scale-set.labels" . | nindent 4 }} {{- include "gha-runner-scale-set.labels" . | nindent 4 }}
annotations:
{{- $containerMode := .Values.containerMode }}
{{- if not (kindIs "string" .Values.githubConfigSecret) }}
actions.github.com/cleanup-github-secret-name: {{ include "gha-runner-scale-set.githubsecret" . }}
{{- end }}
actions.github.com/cleanup-manager-role-binding: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
actions.github.com/cleanup-manager-role-name: {{ include "gha-runner-scale-set.managerRoleName" . }}
{{- if and $containerMode (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
actions.github.com/cleanup-kubernetes-mode-role-binding-name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
actions.github.com/cleanup-kubernetes-mode-role-name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
actions.github.com/cleanup-kubernetes-mode-service-account-name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
{{- end }}
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
actions.github.com/cleanup-no-permission-service-account-name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }}
{{- end }}
spec: spec:
githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }} githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }}
githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }} githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }}
@@ -52,21 +36,17 @@ spec:
{{- if .Values.proxy.http }} {{- if .Values.proxy.http }}
http: http:
url: {{ .Values.proxy.http.url }} url: {{ .Values.proxy.http.url }}
{{- if .Values.proxy.http.credentialSecretRef }}
credentialSecretRef: {{ .Values.proxy.http.credentialSecretRef }} credentialSecretRef: {{ .Values.proxy.http.credentialSecretRef }}
{{- end }} {{ end }}
{{- end }}
{{- if .Values.proxy.https }} {{- if .Values.proxy.https }}
https: https:
url: {{ .Values.proxy.https.url }} url: {{ .Values.proxy.https.url }}
{{- if .Values.proxy.https.credentialSecretRef }}
credentialSecretRef: {{ .Values.proxy.https.credentialSecretRef }} credentialSecretRef: {{ .Values.proxy.https.credentialSecretRef }}
{{- end }} {{ end }}
{{- end }}
{{- if and .Values.proxy.noProxy (kindIs "slice" .Values.proxy.noProxy) }} {{- if and .Values.proxy.noProxy (kindIs "slice" .Values.proxy.noProxy) }}
noProxy: {{ .Values.proxy.noProxy | toYaml | nindent 6}} noProxy: {{ .Values.proxy.noProxy | toYaml | nindent 6}}
{{- end }} {{ end }}
{{- end }} {{ end }}
{{- if and (or (kindIs "int64" .Values.minRunners) (kindIs "float64" .Values.minRunners)) (or (kindIs "int64" .Values.maxRunners) (kindIs "float64" .Values.maxRunners)) }} {{- if and (or (kindIs "int64" .Values.minRunners) (kindIs "float64" .Values.minRunners)) (or (kindIs "int64" .Values.maxRunners) (kindIs "float64" .Values.maxRunners)) }}
{{- if gt .Values.minRunners .Values.maxRunners }} {{- if gt .Values.minRunners .Values.maxRunners }}
@@ -106,15 +86,14 @@ spec:
{{ $key }}: {{ $val | toYaml | nindent 8 }} {{ $key }}: {{ $val | toYaml | nindent 8 }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- $containerMode := .Values.containerMode }} {{- if eq .Values.containerMode.type "kubernetes" }}
{{- if eq $containerMode.type "kubernetes" }}
serviceAccountName: {{ default (include "gha-runner-scale-set.kubeModeServiceAccountName" .) .Values.template.spec.serviceAccountName }} serviceAccountName: {{ default (include "gha-runner-scale-set.kubeModeServiceAccountName" .) .Values.template.spec.serviceAccountName }}
{{- else }} {{- else }}
serviceAccountName: {{ default (include "gha-runner-scale-set.noPermissionServiceAccountName" .) .Values.template.spec.serviceAccountName }} serviceAccountName: {{ default (include "gha-runner-scale-set.noPermissionServiceAccountName" .) .Values.template.spec.serviceAccountName }}
{{- end }} {{- end }}
{{- if or .Values.template.spec.initContainers (eq $containerMode.type "dind") }} {{- if or .Values.template.spec.initContainers (eq .Values.containerMode.type "dind") }}
initContainers: initContainers:
{{- if eq $containerMode.type "dind" }} {{- if eq .Values.containerMode.type "dind" }}
- name: init-dind-externals - name: init-dind-externals
{{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }} {{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }}
{{- end }} {{- end }}
@@ -123,13 +102,13 @@ spec:
{{- end }} {{- end }}
{{- end }} {{- end }}
containers: containers:
{{- if eq $containerMode.type "dind" }} {{- if eq .Values.containerMode.type "dind" }}
- name: runner - name: runner
{{- include "gha-runner-scale-set.dind-runner-container" . | nindent 8 }} {{- include "gha-runner-scale-set.dind-runner-container" . | nindent 8 }}
- name: dind - name: dind
{{- include "gha-runner-scale-set.dind-container" . | nindent 8 }} {{- include "gha-runner-scale-set.dind-container" . | nindent 8 }}
{{- include "gha-runner-scale-set.non-runner-non-dind-containers" . | nindent 6 }} {{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }}
{{- else if eq $containerMode.type "kubernetes" }} {{- else if eq .Values.containerMode.type "kubernetes" }}
- name: runner - name: runner
{{- include "gha-runner-scale-set.kubernetes-mode-runner-container" . | nindent 8 }} {{- include "gha-runner-scale-set.kubernetes-mode-runner-container" . | nindent 8 }}
{{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }} {{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }}
@@ -137,16 +116,16 @@ spec:
{{- include "gha-runner-scale-set.default-mode-runner-containers" . | nindent 6 }} {{- include "gha-runner-scale-set.default-mode-runner-containers" . | nindent 6 }}
{{- end }} {{- end }}
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }} {{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
{{- if or .Values.template.spec.volumes (eq $containerMode.type "dind") (eq $containerMode.type "kubernetes") $tlsConfig.runnerMountPath }} {{- if or .Values.template.spec.volumes (eq .Values.containerMode.type "dind") (eq .Values.containerMode.type "kubernetes") $tlsConfig.runnerMountPath }}
volumes: volumes:
{{- if $tlsConfig.runnerMountPath }} {{- if $tlsConfig.runnerMountPath }}
{{- include "gha-runner-scale-set.tls-volume" $tlsConfig | nindent 6 }} {{- include "gha-runner-scale-set.tls-volume" $tlsConfig | nindent 6 }}
{{- end }} {{- end }}
{{- if eq $containerMode.type "dind" }} {{- if eq .Values.containerMode.type "dind" }}
{{- include "gha-runner-scale-set.dind-volume" . | nindent 6 }} {{- include "gha-runner-scale-set.dind-volume" . | nindent 6 }}
{{- include "gha-runner-scale-set.dind-work-volume" . | nindent 6 }} {{- include "gha-runner-scale-set.dind-work-volume" . | nindent 6 }}
{{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }} {{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }}
{{- else if eq $containerMode.type "kubernetes" }} {{- else if eq .Values.containerMode.type "kubernetes" }}
{{- include "gha-runner-scale-set.kubernetes-mode-work-volume" . | nindent 6 }} {{- include "gha-runner-scale-set.kubernetes-mode-work-volume" . | nindent 6 }}
{{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }} {{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }}
{{- else }} {{- else }}

View File

@@ -7,7 +7,7 @@ metadata:
labels: labels:
{{- include "gha-runner-scale-set.labels" . | nindent 4 }} {{- include "gha-runner-scale-set.labels" . | nindent 4 }}
finalizers: finalizers:
- actions.github.com/cleanup-protection - actions.github.com/secret-protection
data: data:
{{- $hasToken := false }} {{- $hasToken := false }}
{{- $hasAppId := false }} {{- $hasAppId := false }}
@@ -36,4 +36,4 @@ data:
{{- if and $hasAppId (or (not $hasInstallationId) (not $hasPrivateKey)) }} {{- if and $hasAppId (or (not $hasInstallationId) (not $hasPrivateKey)) }}
{{- fail "A valid .Values.githubConfigSecret is required for setting auth with GitHub server, provide .Values.githubConfigSecret.github_app_installation_id and .Values.githubConfigSecret.github_app_private_key." }} {{- fail "A valid .Values.githubConfigSecret is required for setting auth with GitHub server, provide .Values.githubConfigSecret.github_app_installation_id and .Values.githubConfigSecret.github_app_private_key." }}
{{- end }} {{- end }}
{{- end}} {{- end}}

View File

@@ -1,13 +1,10 @@
{{- $containerMode := .Values.containerMode }} {{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
# default permission for runner pod service account in kubernetes mode (container hook) # default permission for runner pod service account in kubernetes mode (container hook)
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: Role kind: Role
metadata: metadata:
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }} name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
finalizers:
- actions.github.com/cleanup-protection
rules: rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["pods"] resources: ["pods"]
@@ -24,4 +21,4 @@ rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["secrets"] resources: ["secrets"]
verbs: ["get", "list", "create", "delete"] verbs: ["get", "list", "create", "delete"]
{{- end }} {{- end }}

View File

@@ -1,12 +1,9 @@
{{- $containerMode := .Values.containerMode }} {{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding kind: RoleBinding
metadata: metadata:
name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }} name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
finalizers:
- actions.github.com/cleanup-protection
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
kind: Role kind: Role
@@ -15,4 +12,4 @@ subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }} name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
{{- end }} {{- end }}

View File

@@ -1,12 +1,9 @@
{{- $containerMode := .Values.containerMode }} {{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }} name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
finalizers:
- actions.github.com/cleanup-protection
labels: labels:
{{- include "gha-runner-scale-set.labels" . | nindent 4 }} {{- include "gha-runner-scale-set.labels" . | nindent 4 }}
{{- end }} {{- end }}

View File

@@ -1,75 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "gha-runner-scale-set.managerRoleName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
app.kubernetes.io/component: manager-role
finalizers:
- actions.github.com/cleanup-protection
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- apiGroups:
- ""
resources:
- pods/status
verbs:
- get
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- get
- list
- patch
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- patch
- update
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- create
- delete
- get
- patch
- update
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
verbs:
- create
- delete
- get
- patch
- update
{{- if .Values.githubServerTLS }}
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
{{- end }}

View File

@@ -1,18 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
app.kubernetes.io/component: manager-role-binding
finalizers:
- actions.github.com/cleanup-protection
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "gha-runner-scale-set.managerRoleName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set.managerServiceAccountName" . | nindent 4 }}
namespace: {{ include "gha-runner-scale-set.managerServiceAccountNamespace" . | nindent 4 }}

View File

@@ -1,5 +1,4 @@
{{- $containerMode := .Values.containerMode }} {{- if and (ne .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
@@ -7,6 +6,4 @@ metadata:
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
labels: labels:
{{- include "gha-runner-scale-set.labels" . | nindent 4 }} {{- include "gha-runner-scale-set.labels" . | nindent 4 }}
finalizers: {{- end }}
- actions.github.com/cleanup-protection
{{- end }}

View File

@@ -1,13 +1,11 @@
package tests package tests
import ( import (
"fmt"
"path/filepath" "path/filepath"
"strings" "strings"
"testing" "testing"
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
actionsgithubcom "github.com/actions/actions-runner-controller/controllers/actions.github.com"
"github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/helm"
"github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/k8s"
"github.com/gruntwork-io/terratest/modules/random" "github.com/gruntwork-io/terratest/modules/random"
@@ -29,10 +27,8 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -45,7 +41,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) {
assert.Equal(t, namespaceName, githubSecret.Namespace) assert.Equal(t, namespaceName, githubSecret.Namespace)
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", githubSecret.Name) assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", githubSecret.Name)
assert.Equal(t, "gh_token12345", string(githubSecret.Data["github_token"])) assert.Equal(t, "gh_token12345", string(githubSecret.Data["github_token"]))
assert.Equal(t, "actions.github.com/cleanup-protection", githubSecret.Finalizers[0]) assert.Equal(t, "actions.github.com/secret-protection", githubSecret.Finalizers[0])
} }
func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) { func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) {
@@ -64,8 +60,6 @@ func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) {
"githubConfigSecret.github_app_id": "10", "githubConfigSecret.github_app_id": "10",
"githubConfigSecret.github_app_installation_id": "100", "githubConfigSecret.github_app_installation_id": "100",
"githubConfigSecret.github_app_private_key": "private_key", "githubConfigSecret.github_app_private_key": "private_key",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -93,11 +87,9 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAuthInput(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_app_id": "", "githubConfigSecret.github_app_id": "",
"githubConfigSecret.github_token": "", "githubConfigSecret.github_token": "",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -120,10 +112,8 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAppInput(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_app_id": "10", "githubConfigSecret.github_app_id": "10",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -146,10 +136,8 @@ func TestTemplateNotRenderedGitHubSecretWithPredefinedSecret(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "pre-defined-secret", "githubConfigSecret": "pre-defined-secret",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -170,10 +158,8 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -190,7 +176,6 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
helm.UnmarshalK8SYaml(t, output, &ars) helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", ars.Spec.Template.Spec.ServiceAccountName) assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", ars.Spec.Template.Spec.ServiceAccountName)
assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) // no finalizer protections in place
} }
func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
@@ -205,11 +190,9 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "kubernetes", "containerMode.type": "kubernetes",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -220,7 +203,6 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
assert.Equal(t, namespaceName, serviceAccount.Namespace) assert.Equal(t, namespaceName, serviceAccount.Namespace)
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", serviceAccount.Name) assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", serviceAccount.Name)
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"}) output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
var role rbacv1.Role var role rbacv1.Role
@@ -228,9 +210,6 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
assert.Equal(t, namespaceName, role.Namespace) assert.Equal(t, namespaceName, role.Namespace)
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", role.Name) assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", role.Name)
assert.Equal(t, "actions.github.com/cleanup-protection", role.Finalizers[0])
assert.Len(t, role.Rules, 5, "kube mode role should have 5 rules") assert.Len(t, role.Rules, 5, "kube mode role should have 5 rules")
assert.Equal(t, "pods", role.Rules[0].Resources[0]) assert.Equal(t, "pods", role.Rules[0].Resources[0])
assert.Equal(t, "pods/exec", role.Rules[1].Resources[0]) assert.Equal(t, "pods/exec", role.Rules[1].Resources[0])
@@ -243,21 +222,18 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
helm.UnmarshalK8SYaml(t, output, &roleBinding) helm.UnmarshalK8SYaml(t, output, &roleBinding)
assert.Equal(t, namespaceName, roleBinding.Namespace) assert.Equal(t, namespaceName, roleBinding.Namespace)
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role-binding", roleBinding.Name) assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.Name)
assert.Len(t, roleBinding.Subjects, 1) assert.Len(t, roleBinding.Subjects, 1)
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", roleBinding.Subjects[0].Name) assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", roleBinding.Subjects[0].Name)
assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace) assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace)
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.RoleRef.Name) assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.RoleRef.Name)
assert.Equal(t, "Role", roleBinding.RoleRef.Kind) assert.Equal(t, "Role", roleBinding.RoleRef.Kind)
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars) helm.UnmarshalK8SYaml(t, output, &ars)
expectedServiceAccountName := "test-runners-gha-runner-scale-set-kube-mode-service-account" assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", ars.Spec.Template.Spec.ServiceAccountName)
assert.Equal(t, expectedServiceAccountName, ars.Spec.Template.Spec.ServiceAccountName)
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
} }
func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) { func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
@@ -272,11 +248,9 @@ func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"template.spec.serviceAccountName": "test-service-account", "template.spec.serviceAccountName": "test-service-account",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -289,7 +263,6 @@ func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
helm.UnmarshalK8SYaml(t, output, &ars) helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, "test-service-account", ars.Spec.Template.Spec.ServiceAccountName) assert.Equal(t, "test-service-account", ars.Spec.Template.Spec.ServiceAccountName)
assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
} }
func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) { func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
@@ -304,10 +277,8 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -322,10 +293,6 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"]) assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/part-of"])
assert.Equal(t, "autoscaling-runner-set", ars.Labels["app.kubernetes.io/component"])
assert.NotEmpty(t, ars.Labels["app.kubernetes.io/version"])
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret) assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret)
@@ -355,11 +322,9 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"runnerScaleSetName": "test-runner-scale-set-name", "runnerScaleSetName": "test-runner-scale-set-name",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -410,8 +375,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_ProvideMetadata(t *testing.T) {
"template.metadata.labels.test2": "test2", "template.metadata.labels.test2": "test2",
"template.metadata.annotations.test3": "test3", "template.metadata.annotations.test3": "test3",
"template.metadata.annotations.test4": "test4", "template.metadata.annotations.test4": "test4",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -451,11 +414,9 @@ func TestTemplateRenderedAutoScalingRunnerSet_MaxRunnersValidationError(t *testi
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "-1", "maxRunners": "-1",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -478,12 +439,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinRunnersValidationError(t *testi
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "1", "maxRunners": "1",
"minRunners": "-1", "minRunners": "-1",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -506,12 +465,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationError(t *te
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "0", "maxRunners": "0",
"minRunners": "1", "minRunners": "1",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -534,12 +491,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationSameValue(t
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "0", "maxRunners": "0",
"minRunners": "0", "minRunners": "0",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -565,11 +520,9 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMin(t
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"minRunners": "5", "minRunners": "5",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -595,11 +548,9 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMax(t
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "5", "maxRunners": "5",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -654,10 +605,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraVolumes(t *testing.T) {
namespaceName := "test-" + strings.ToLower(random.UniqueId()) namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath}, ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -688,10 +635,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
namespaceName := "test-" + strings.ToLower(random.UniqueId()) namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath}, ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -724,10 +667,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_K8S_ExtraVolumes(t *testing.T) {
namespaceName := "test-" + strings.ToLower(random.UniqueId()) namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath}, ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -756,11 +695,9 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "dind", "containerMode.type": "dind",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -847,11 +784,9 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T)
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "kubernetes", "containerMode.type": "kubernetes",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -904,10 +839,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_UsePredefinedSecret(t *testing.T)
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "pre-defined-secrets", "githubConfigSecret": "pre-defined-secrets",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -938,10 +871,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_ErrorOnEmptyPredefinedSecret(t *te
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "", "githubConfigSecret": "",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -964,15 +895,13 @@ func TestTemplateRenderedWithProxy(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "pre-defined-secrets", "githubConfigSecret": "pre-defined-secrets",
"controllerServiceAccount.name": "arc", "proxy.http.url": "http://proxy.example.com",
"controllerServiceAccount.namespace": "arc-system", "proxy.http.credentialSecretRef": "http-secret",
"proxy.http.url": "http://proxy.example.com", "proxy.https.url": "https://proxy.example.com",
"proxy.http.credentialSecretRef": "http-secret", "proxy.https.credentialSecretRef": "https-secret",
"proxy.https.url": "https://proxy.example.com", "proxy.noProxy": "{example.com,example.org}",
"proxy.https.credentialSecretRef": "https-secret",
"proxy.noProxy": "{example.com,example.org}",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -1032,8 +961,6 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap", "githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"githubServerTLS.runnerMountPath": "/runner/mount/path", "githubServerTLS.runnerMountPath": "/runner/mount/path",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -1091,8 +1018,6 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"githubServerTLS.runnerMountPath": "/runner/mount/path/", "githubServerTLS.runnerMountPath": "/runner/mount/path/",
"containerMode.type": "dind", "containerMode.type": "dind",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -1150,8 +1075,6 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"githubServerTLS.runnerMountPath": "/runner/mount/path", "githubServerTLS.runnerMountPath": "/runner/mount/path",
"containerMode.type": "kubernetes", "containerMode.type": "kubernetes",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -1209,8 +1132,6 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
"githubConfigSecret": "pre-defined-secrets", "githubConfigSecret": "pre-defined-secrets",
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap", "githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -1263,9 +1184,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
"githubConfigSecret": "pre-defined-secrets", "githubConfigSecret": "pre-defined-secrets",
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap", "githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"containerMode.type": "dind", "containerMode.type": "dind",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -1318,9 +1237,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
"githubConfigSecret": "pre-defined-secrets", "githubConfigSecret": "pre-defined-secrets",
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap", "githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"containerMode.type": "kubernetes", "containerMode.type": "kubernetes",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -1376,10 +1293,8 @@ func TestTemplateNamingConstraints(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
setValues := map[string]string{ setValues := map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "", "githubConfigSecret": "",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
} }
tt := map[string]struct { tt := map[string]struct {
@@ -1424,10 +1339,8 @@ func TestTemplateRenderedGitHubConfigUrlEndsWIthSlash(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions/", "githubConfigUrl": "https://github.com/actions/",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -1441,371 +1354,3 @@ func TestTemplateRenderedGitHubConfigUrlEndsWIthSlash(t *testing.T) {
assert.Equal(t, "test-runners", ars.Name) assert.Equal(t, "test-runners", ars.Name)
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
} }
func TestTemplate_CreateManagerRole(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"})
var managerRole rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &managerRole)
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
assert.Equal(t, 6, len(managerRole.Rules))
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
}
func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"githubServerTLS.certificateFrom.configMapKeyRef.name": "test",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"})
var managerRole rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &managerRole)
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
assert.Equal(t, 7, len(managerRole.Rules))
assert.Equal(t, "configmaps", managerRole.Rules[6].Resources[0])
}
func TestTemplate_CreateManagerRoleBinding(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role_binding.yaml"})
var managerRoleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &managerRoleBinding)
assert.Equal(t, namespaceName, managerRoleBinding.Namespace, "namespace should match the namespace of the Helm release")
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role-binding", managerRoleBinding.Name)
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRoleBinding.RoleRef.Name)
assert.Equal(t, "actions.github.com/cleanup-protection", managerRoleBinding.Finalizers[0])
assert.Equal(t, "arc", managerRoleBinding.Subjects[0].Name)
assert.Equal(t, "arc-system", managerRoleBinding.Subjects[0].Namespace)
}
func TestTemplateRenderedAutoScalingRunnerSet_ExtraContainers(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
testValuesPath, err := filepath.Abs("../tests/values_extra_containers.yaml")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}, "--debug")
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "There should be 2 containers")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
assert.Equal(t, "other", ars.Spec.Template.Spec.Containers[1].Name, "Container name should be other")
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set")
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set")
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[1].Resources.Limits.Cpu().String(), "CPU Limit should be set")
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[1].Resources.Limits.Memory().String(), "Memory Limit should be set")
assert.Equal(t, "SOME_ENV", ars.Spec.Template.Spec.Containers[0].Env[0].Name, "SOME_ENV should be set")
assert.Equal(t, "SOME_VALUE", ars.Spec.Template.Spec.Containers[0].Env[0].Value, "SOME_ENV should be set to `SOME_VALUE`")
assert.Equal(t, "MY_NODE_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name, "MY_NODE_NAME should be set")
assert.Equal(t, "spec.nodeName", ars.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath, "MY_NODE_NAME should be set to `spec.nodeName`")
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name, "VolumeMount name should be work")
assert.Equal(t, "/work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath, "VolumeMount mountPath should be /work")
assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others")
assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others")
assert.Equal(t, "work", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be work")
assert.Equal(t, corev1.DNSNone, ars.Spec.Template.Spec.DNSPolicy, "DNS Policy should be None")
assert.Equal(t, "192.0.2.1", ars.Spec.Template.Spec.DNSConfig.Nameservers[0], "DNS Nameserver should be set")
}
func TestTemplateRenderedAutoScalingRunnerSet_ExtraPodSpec(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
testValuesPath, err := filepath.Abs("../tests/values_extra_pod_spec.yaml")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "There should be 1 containers")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
assert.Equal(t, corev1.DNSNone, ars.Spec.Template.Spec.DNSPolicy, "DNS Policy should be None")
assert.Equal(t, "192.0.2.1", ars.Spec.Template.Spec.DNSConfig.Nameservers[0], "DNS Nameserver should be set")
}
func TestTemplateRenderedAutoScalingRunnerSet_DinDMergePodSpec(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
testValuesPath, err := filepath.Abs("../tests/values_dind_merge_spec.yaml")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}, "--debug")
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "There should be 2 containers")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set")
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set")
assert.Equal(t, "DOCKER_HOST", ars.Spec.Template.Spec.Containers[0].Env[0].Name, "DOCKER_HOST should be set")
assert.Equal(t, "tcp://localhost:9999", ars.Spec.Template.Spec.Containers[0].Env[0].Value, "DOCKER_HOST should be set to `tcp://localhost:9999`")
assert.Equal(t, "MY_NODE_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name, "MY_NODE_NAME should be set")
assert.Equal(t, "spec.nodeName", ars.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath, "MY_NODE_NAME should be set to `spec.nodeName`")
assert.Equal(t, "DOCKER_TLS_VERIFY", ars.Spec.Template.Spec.Containers[0].Env[2].Name, "DOCKER_TLS_VERIFY should be set")
assert.Equal(t, "1", ars.Spec.Template.Spec.Containers[0].Env[2].Value, "DOCKER_TLS_VERIFY should be set to `1`")
assert.Equal(t, "DOCKER_CERT_PATH", ars.Spec.Template.Spec.Containers[0].Env[3].Name, "DOCKER_CERT_PATH should be set")
assert.Equal(t, "/certs/client", ars.Spec.Template.Spec.Containers[0].Env[3].Value, "DOCKER_CERT_PATH should be set to `/certs/client`")
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name, "VolumeMount name should be work")
assert.Equal(t, "/work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath, "VolumeMount mountPath should be /work")
assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others")
assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others")
}
func TestTemplateRenderedAutoScalingRunnerSet_KubeModeMergePodSpec(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
testValuesPath, err := filepath.Abs("../tests/values_k8s_merge_spec.yaml")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}, "--debug")
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "There should be 1 containers")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set")
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set")
assert.Equal(t, "ACTIONS_RUNNER_CONTAINER_HOOKS", ars.Spec.Template.Spec.Containers[0].Env[0].Name, "ACTIONS_RUNNER_CONTAINER_HOOKS should be set")
assert.Equal(t, "/k8s/index.js", ars.Spec.Template.Spec.Containers[0].Env[0].Value, "ACTIONS_RUNNER_CONTAINER_HOOKS should be set to `/k8s/index.js`")
assert.Equal(t, "MY_NODE_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name, "MY_NODE_NAME should be set")
assert.Equal(t, "spec.nodeName", ars.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath, "MY_NODE_NAME should be set to `spec.nodeName`")
assert.Equal(t, "ACTIONS_RUNNER_POD_NAME", ars.Spec.Template.Spec.Containers[0].Env[2].Name, "ACTIONS_RUNNER_POD_NAME should be set")
assert.Equal(t, "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER", ars.Spec.Template.Spec.Containers[0].Env[3].Name, "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER should be set")
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name, "VolumeMount name should be work")
assert.Equal(t, "/work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath, "VolumeMount mountPath should be /work")
assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others")
assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others")
}
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
annotationExpectedTests := map[string]*helm.Options{
"GitHub token": {
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
},
"GitHub app": {
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_app_id": "10",
"githubConfigSecret.github_app_installation_id": "100",
"githubConfigSecret.github_app_private_key": "private_key",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
},
}
for name, options := range annotationExpectedTests {
t.Run("Annotation set: "+name, func(t *testing.T) {
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
assert.NotEmpty(t, autoscalingRunnerSet.Annotations[actionsgithubcom.AnnotationKeyGitHubSecretName])
})
}
t.Run("Annotation should not be set", func(t *testing.T) {
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "pre-defined-secret",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
assert.Empty(t, autoscalingRunnerSet.Annotations[actionsgithubcom.AnnotationKeyGitHubSecretName])
})
}
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"containerMode.type": "kubernetes",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
annotationValues := map[string]string{
actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-runner-scale-set-github-secret",
actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-runner-scale-set-manager-role",
actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-runner-scale-set-manager-role-binding",
actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-runner-scale-set-kube-mode-service-account",
actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-runner-scale-set-kube-mode-role",
actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-runner-scale-set-kube-mode-role-binding",
}
for annotation, value := range annotationValues {
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
}
}

View File

@@ -2,7 +2,4 @@ githubConfigUrl: https://github.com/actions/actions-runner-controller
githubConfigSecret: githubConfigSecret:
github_token: test github_token: test
maxRunners: 10 maxRunners: 10
minRunners: 5 minRunners: 5
controllerServiceAccount:
name: "arc"
namespace: "arc-system"

View File

@@ -1,31 +0,0 @@
githubConfigUrl: https://github.com/actions/actions-runner-controller
githubConfigSecret:
github_token: test
template:
spec:
containers:
- name: runner
image: runner-image:latest
env:
- name: DOCKER_HOST
value: tcp://localhost:9999
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: work
mountPath: /work
- name: others
mountPath: /others
resources:
limits:
memory: "64Mi"
cpu: "250m"
volumes:
- name: work
hostPath:
path: /data
type: Directory
containerMode:
type: dind

View File

@@ -1,46 +0,0 @@
githubConfigUrl: https://github.com/actions/actions-runner-controller
githubConfigSecret:
github_token: test
template:
spec:
containers:
- name: runner
image: runner-image:latest
env:
- name: SOME_ENV
value: SOME_VALUE
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: work
mountPath: /work
- name: others
mountPath: /others
resources:
limits:
memory: "64Mi"
cpu: "250m"
- name: other
image: other-image:latest
volumeMounts:
- name: work
mountPath: /work
- name: others
mountPath: /others
resources:
limits:
memory: "64Mi"
cpu: "250m"
volumes:
- name: work
hostPath:
path: /data
type: Directory
dnsPolicy: "None"
dnsConfig:
nameservers:
- 192.0.2.1
containerMode:
type: none

View File

@@ -1,12 +0,0 @@
githubConfigUrl: https://github.com/actions/actions-runner-controller
githubConfigSecret:
github_token: test
template:
spec:
containers:
- name: runner
image: runner-image:latest
dnsPolicy: "None"
dnsConfig:
nameservers:
- 192.0.2.1

View File

@@ -1,31 +0,0 @@
githubConfigUrl: https://github.com/actions/actions-runner-controller
githubConfigSecret:
github_token: test
template:
spec:
containers:
- name: runner
image: runner-image:latest
env:
- name: ACTIONS_RUNNER_CONTAINER_HOOKS
value: /k8s/index.js
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: work
mountPath: /work
- name: others
mountPath: /others
resources:
limits:
memory: "64Mi"
cpu: "250m"
volumes:
- name: work
hostPath:
path: /data
type: Directory
containerMode:
type: kubernetes

View File

@@ -65,32 +65,28 @@ githubConfigSecret:
# certificateFrom: # certificateFrom:
# configMapKeyRef: # configMapKeyRef:
# name: config-map-name # name: config-map-name
# key: ca.crt # key: ca.pem
# runnerMountPath: /usr/local/share/ca-certificates/ # runnerMountPath: /usr/local/share/ca-certificates/
# containerMode:
# type: "dind" ## type can be set to dind or kubernetes
# ## the following is required when containerMode.type=kubernetes
# kubernetesModeWorkVolumeClaim:
# accessModes: ["ReadWriteOnce"]
# # For local testing, use https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md to provide dynamic provision volume with storageClassName: openebs-hostpath
# storageClassName: "dynamic-blob-storage"
# resources:
# requests:
# storage: 1Gi
## template is the PodSpec for each runner Pod ## template is the PodSpec for each runner Pod
template: template:
## template.spec will be modified if you change the container mode spec:
containers:
- name: runner
image: ghcr.io/actions/actions-runner:latest
command: ["/home/runner/run.sh"]
containerMode:
type: "" ## type can be set to dind or kubernetes
## with containerMode.type=dind, we will populate the template.spec with following pod spec ## with containerMode.type=dind, we will populate the template.spec with following pod spec
## template: ## template:
## spec: ## spec:
## initContainers: ## initContainers:
## - name: init-dind-externals ## - name: initExternalsInternalVolume
## image: ghcr.io/actions/actions-runner:latest ## image: ghcr.io/actions/actions-runner:latest
## command: ["cp", "-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"] ## command: ["cp", "-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"]
## volumeMounts: ## volumeMounts:
## - name: dind-externals ## - name: externalsInternal
## mountPath: /home/runner/tmpDir ## mountPath: /home/runner/tmpDir
## containers: ## containers:
## - name: runner ## - name: runner
@@ -103,9 +99,9 @@ template:
## - name: DOCKER_CERT_PATH ## - name: DOCKER_CERT_PATH
## value: /certs/client ## value: /certs/client
## volumeMounts: ## volumeMounts:
## - name: work ## - name: workingDirectoryInternal
## mountPath: /home/runner/_work ## mountPath: /home/runner/_work
## - name: dind-cert ## - name: dinDInternal
## mountPath: /certs/client ## mountPath: /certs/client
## readOnly: true ## readOnly: true
## - name: dind ## - name: dind
@@ -113,18 +109,18 @@ template:
## securityContext: ## securityContext:
## privileged: true ## privileged: true
## volumeMounts: ## volumeMounts:
## - name: work ## - mountPath: /certs/client
## mountPath: /home/runner/_work ## name: dinDInternal
## - name: dind-cert ## - mountPath: /home/runner/_work
## mountPath: /certs/client ## name: workingDirectoryInternal
## - name: dind-externals ## - mountPath: /home/runner/externals
## mountPath: /home/runner/externals ## name: externalsInternal
## volumes: ## volumes:
## - name: work ## - name: dinDInternal
## emptyDir: {} ## emptyDir: {}
## - name: dind-cert ## - name: workingDirectoryInternal
## emptyDir: {} ## emptyDir: {}
## - name: dind-externals ## - name: externalsInternal
## emptyDir: {} ## emptyDir: {}
###################################################################################################### ######################################################################################################
## with containerMode.type=kubernetes, we will populate the template.spec with following pod spec ## with containerMode.type=kubernetes, we will populate the template.spec with following pod spec
@@ -155,18 +151,13 @@ template:
## resources: ## resources:
## requests: ## requests:
## storage: 1Gi ## storage: 1Gi
spec:
containers:
- name: runner
image: ghcr.io/actions/actions-runner:latest
command: ["/home/runner/run.sh"]
## Optional controller service account that needs to have required Role and RoleBinding ## the following is required when containerMode.type=kubernetes
## to operate this gha-runner-scale-set installation. kubernetesModeWorkVolumeClaim:
## The helm chart will try to find the controller deployment and its service account at installation time. accessModes: ["ReadWriteOnce"]
## In case the helm chart can't find the right service account, you can explicitly pass in the following value # For testing, use https://github.com/rancher/local-path-provisioner to provide dynamic provision volume
## to help it finish RoleBinding with the right service account. # TODO: remove before release
## Note: if your controller is installed to only watch a single namespace, you have to pass these values explicitly. storageClassName: "dynamic-blob-storage"
# controllerServiceAccount: resources:
# namespace: arc-system requests:
# name: test-arc-gha-runner-scale-set-controller storage: 1Gi

View File

@@ -144,7 +144,7 @@ func TestCustomerServerRootCA(t *testing.T) {
client, err := newActionsClientFromConfig(config, creds) client, err := newActionsClientFromConfig(config, creds)
require.NoError(t, err) require.NoError(t, err)
_, err = client.GetRunnerScaleSet(ctx, 1, "test") _, err = client.GetRunnerScaleSet(ctx, "test")
require.NoError(t, err) require.NoError(t, err)
assert.True(t, serverCalledSuccessfully) assert.True(t, serverCalledSuccessfully)
} }

View File

@@ -80,9 +80,6 @@ spec:
image: image:
description: Required description: Required
type: string type: string
imagePullPolicy:
description: Required
type: string
imagePullSecrets: imagePullSecrets:
description: Required description: Required
items: items:

View File

@@ -17,21 +17,6 @@ spec:
scope: Namespaced scope: Namespaced
versions: versions:
- additionalPrinterColumns: - additionalPrinterColumns:
- jsonPath: .spec.template.spec.enterprise
name: Enterprise
type: string
- jsonPath: .spec.template.spec.organization
name: Organization
type: string
- jsonPath: .spec.template.spec.repository
name: Repository
type: string
- jsonPath: .spec.template.spec.group
name: Group
type: string
- jsonPath: .spec.template.spec.labels
name: Labels
type: string
- jsonPath: .spec.replicas - jsonPath: .spec.replicas
name: Desired name: Desired
type: number type: number

View File

@@ -5,7 +5,7 @@ kind: Kustomization
images: images:
- name: controller - name: controller
newName: summerwind/actions-runner-controller newName: summerwind/actions-runner-controller
newTag: latest newTag: dev
replacements: replacements:
- path: env-replacement.yaml - path: env-replacement.yaml

View File

@@ -56,8 +56,6 @@ spec:
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: metadata.namespace fieldPath: metadata.namespace
- name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY
value: IfNotPresent
volumeMounts: volumeMounts:
- name: controller-manager - name: controller-manager
mountPath: "/etc/actions-runner-controller" mountPath: "/etc/actions-runner-controller"

View File

@@ -102,13 +102,6 @@ rules:
- patch - patch
- update - update
- watch - watch
- apiGroups:
- actions.github.com
resources:
- ephemeralrunnersets/finalizers
verbs:
- patch
- update
- apiGroups: - apiGroups:
- actions.github.com - actions.github.com
resources: resources:

View File

@@ -31,6 +31,6 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
| `autoscaler.enabled` | Enable the HorizontalRunnerAutoscaler, if its enabled then replica count will not be used | true | | `autoscaler.enabled` | Enable the HorizontalRunnerAutoscaler, if its enabled then replica count will not be used | true |
| `autoscaler.minReplicas` | Minimum no of replicas | 1 | | `autoscaler.minReplicas` | Minimum no of replicas | 1 |
| `autoscaler.maxReplicas` | Maximum no of replicas | 5 | | `autoscaler.maxReplicas` | Maximum no of replicas | 5 |
| `autoscaler.scaleDownDelaySecondsAfterScaleOut` | [Anti-Flapping Configuration](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#anti-flapping-configuration) | 120 | | `autoscaler.scaleDownDelaySecondsAfterScaleOut` | [Anti-Flapping Configuration](https://github.com/actions/actions-runner-controller#anti-flapping-configuration) | 120 |
| `autoscaler.metrics` | [Pull driven scaling](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#pull-driven-scaling) | default | | `autoscaler.metrics` | [Pull driven scaling](https://github.com/actions/actions-runner-controller#pull-driven-scaling) | default |
| `autoscaler.scaleUpTriggers` | [Webhook driven scaling](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#webhook-driven-scaling) | | | `autoscaler.scaleUpTriggers` | [Webhook driven scaling](https://github.com/actions/actions-runner-controller#webhook-driven-scaling) | |

View File

@@ -17,7 +17,7 @@ runnerLabels:
replicaCount: 1 replicaCount: 1
# The Runner Group that the runner(s) should be associated with. # The Runner Group that the runner(s) should be associated with.
# See https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/managing-access-to-self-hosted-runners-using-groups. # See https://docs.github.com/en/github-ae@latest/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups.
group: Default group: Default
autoscaler: autoscaler:

View File

@@ -41,6 +41,7 @@ import (
const ( const (
autoscalingListenerContainerName = "autoscaler" autoscalingListenerContainerName = "autoscaler"
autoscalingListenerOwnerKey = ".metadata.controller"
autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer" autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer"
) )
@@ -85,7 +86,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
} }
if !done { if !done {
log.Info("Waiting for resources to be deleted before removing finalizer") log.Info("Waiting for resources to be deleted before removing finalizer")
return ctrl.Result{Requeue: true}, nil return ctrl.Result{}, nil
} }
log.Info("Removing finalizer") log.Info("Removing finalizer")
@@ -203,7 +204,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
return r.createRoleBindingForListener(ctx, autoscalingListener, listenerRole, serviceAccount, log) return r.createRoleBindingForListener(ctx, autoscalingListener, listenerRole, serviceAccount, log)
} }
// Create a secret containing proxy config if specified // Create a secret containing proxy config if specifiec
if autoscalingListener.Spec.Proxy != nil { if autoscalingListener.Spec.Proxy != nil {
proxySecret := new(corev1.Secret) proxySecret := new(corev1.Secret)
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: proxyListenerSecretName(autoscalingListener)}, proxySecret); err != nil { if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: proxyListenerSecretName(autoscalingListener)}, proxySecret); err != nil {
@@ -245,6 +246,66 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
// SetupWithManager sets up the controller with the Manager.
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
groupVersionIndexer := func(rawObj client.Object) []string {
groupVersion := v1alpha1.GroupVersion.String()
owner := metav1.GetControllerOf(rawObj)
if owner == nil {
return nil
}
// ...make sure it is owned by this controller
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil {
return err
}
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil {
return err
}
labelBasedWatchFunc := func(obj client.Object) []reconcile.Request {
var requests []reconcile.Request
labels := obj.GetLabels()
namespace, ok := labels["auto-scaling-listener-namespace"]
if !ok {
return nil
}
name, ok := labels["auto-scaling-listener-name"]
if !ok {
return nil
}
requests = append(requests,
reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
},
)
return requests
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.AutoscalingListener{}).
Owns(&corev1.Pod{}).
Owns(&corev1.ServiceAccount{}).
Owns(&corev1.Secret{}).
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
Complete(r)
}
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) { func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) {
logger.Info("Cleaning up the listener pod") logger.Info("Cleaning up the listener pod")
listenerPod := new(corev1.Pod) listenerPod := new(corev1.Pod)
@@ -442,7 +503,7 @@ func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Con
} }
logger.Info("Created listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name) logger.Info("Created listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
return ctrl.Result{Requeue: true}, nil return ctrl.Result{}, nil
} }
func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) { func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
@@ -463,8 +524,8 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a
Name: proxyListenerSecretName(autoscalingListener), Name: proxyListenerSecretName(autoscalingListener),
Namespace: autoscalingListener.Namespace, Namespace: autoscalingListener.Namespace,
Labels: map[string]string{ Labels: map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, "auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetName,
}, },
}, },
Data: data, Data: data,
@@ -481,7 +542,7 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a
logger.Info("Created listener proxy secret", "namespace", newProxySecret.Namespace, "name", newProxySecret.Name) logger.Info("Created listener proxy secret", "namespace", newProxySecret.Namespace, "name", newProxySecret.Name)
return ctrl.Result{Requeue: true}, nil return ctrl.Result{}, nil
} }
func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) { func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
@@ -497,7 +558,7 @@ func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Con
} }
logger.Info("Updated listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash) logger.Info("Updated listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash)
return ctrl.Result{Requeue: true}, nil return ctrl.Result{}, nil
} }
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) { func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
@@ -555,62 +616,3 @@ func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context
"serviceAccount", serviceAccount.Name) "serviceAccount", serviceAccount.Name)
return ctrl.Result{Requeue: true}, nil return ctrl.Result{Requeue: true}, nil
} }
// SetupWithManager sets up the controller with the Manager.
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
groupVersionIndexer := func(rawObj client.Object) []string {
groupVersion := v1alpha1.GroupVersion.String()
owner := metav1.GetControllerOf(rawObj)
if owner == nil {
return nil
}
// ...make sure it is owned by this controller
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, resourceOwnerKey, groupVersionIndexer); err != nil {
return err
}
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, resourceOwnerKey, groupVersionIndexer); err != nil {
return err
}
labelBasedWatchFunc := func(obj client.Object) []reconcile.Request {
var requests []reconcile.Request
labels := obj.GetLabels()
namespace, ok := labels["auto-scaling-listener-namespace"]
if !ok {
return nil
}
name, ok := labels["auto-scaling-listener-name"]
if !ok {
return nil
}
requests = append(requests,
reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
},
)
return requests
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.AutoscalingListener{}).
Owns(&corev1.Pod{}).
Owns(&corev1.ServiceAccount{}).
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
Complete(r)
}

View File

@@ -213,7 +213,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
Eventually( Eventually(
func() error { func() error {
podList := new(corev1.PodList) podList := new(corev1.PodList)
err := k8sClient.List(ctx, podList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingListener.Name}) err := k8sClient.List(ctx, podList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name})
if err != nil { if err != nil {
return err return err
} }
@@ -231,7 +231,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
Eventually( Eventually(
func() error { func() error {
serviceAccountList := new(corev1.ServiceAccountList) serviceAccountList := new(corev1.ServiceAccountList)
err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingListener.Name}) err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name})
if err != nil { if err != nil {
return err return err
} }

View File

@@ -24,11 +24,9 @@ import (
"strings" "strings"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/build"
"github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr" "github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors" kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
@@ -43,10 +41,13 @@ import (
) )
const ( const (
labelKeyRunnerSpecHash = "runner-spec-hash" // TODO: Replace with shared image.
autoscalingRunnerSetOwnerKey = ".metadata.controller"
LabelKeyRunnerSpecHash = "runner-spec-hash"
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer" autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
runnerScaleSetIdAnnotationKey = "runner-scale-set-id" runnerScaleSetIdKey = "runner-scale-set-id"
runnerScaleSetNameAnnotationKey = "runner-scale-set-name" runnerScaleSetNameKey = "runner-scale-set-name"
runnerScaleSetRunnerGroupNameKey = "runner-scale-set-runner-group-name"
) )
// AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object // AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object
@@ -113,17 +114,6 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, err return ctrl.Result{}, err
} }
requeue, err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log)
if err != nil {
log.Error(err, "Failed to remove finalizers on dependent resources")
return ctrl.Result{}, err
}
if requeue {
log.Info("Waiting for dependent resources to be deleted")
return ctrl.Result{Requeue: true}, nil
}
log.Info("Removing finalizer") log.Info("Removing finalizer")
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName) controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName)
@@ -137,22 +127,6 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
if autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion] != build.Version {
if err := r.Delete(ctx, autoscalingRunnerSet); err != nil {
log.Error(err, "Failed to delete autoscaling runner set on version mismatch",
"targetVersion", build.Version,
"actualVersion", autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
)
return ctrl.Result{}, nil
}
log.Info("Autoscaling runner set version doesn't match the build version. Deleting the resource.",
"targetVersion", build.Version,
"actualVersion", autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
)
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) { if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) {
log.Info("Adding finalizer") log.Info("Adding finalizer")
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
@@ -166,7 +140,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
scaleSetIdRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey] scaleSetIdRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]
if !ok { if !ok {
// Need to create a new runner scale set on Actions service // Need to create a new runner scale set on Actions service
log.Info("Runner scale set id annotation does not exist. Creating a new runner scale set.") log.Info("Runner scale set id annotation does not exist. Creating a new runner scale set.")
@@ -180,14 +154,14 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
} }
// Make sure the runner group of the scale set is up to date // Make sure the runner group of the scale set is up to date
currentRunnerGroupName, ok := autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName] currentRunnerGroupName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetRunnerGroupNameKey]
if !ok || (len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 && !strings.EqualFold(currentRunnerGroupName, autoscalingRunnerSet.Spec.RunnerGroup)) { if !ok || (len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 && !strings.EqualFold(currentRunnerGroupName, autoscalingRunnerSet.Spec.RunnerGroup)) {
log.Info("AutoScalingRunnerSet runner group changed. Updating the runner scale set.") log.Info("AutoScalingRunnerSet runner group changed. Updating the runner scale set.")
return r.updateRunnerScaleSetRunnerGroup(ctx, autoscalingRunnerSet, log) return r.updateRunnerScaleSetRunnerGroup(ctx, autoscalingRunnerSet, log)
} }
// Make sure the runner scale set name is up to date // Make sure the runner scale set name is up to date
currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetNameAnnotationKey] currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetNameKey]
if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) { if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) {
log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.") log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.")
return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log) return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log)
@@ -215,10 +189,10 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash() desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
for _, runnerSet := range existingRunnerSets.all() { for _, runnerSet := range existingRunnerSets.all() {
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[labelKeyRunnerSpecHash]) log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[LabelKeyRunnerSpecHash])
} }
if desiredSpecHash != latestRunnerSet.Labels[labelKeyRunnerSpecHash] { if desiredSpecHash != latestRunnerSet.Labels[LabelKeyRunnerSpecHash] {
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set") log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set")
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log) return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
} }
@@ -246,7 +220,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
} }
// Our listener pod is out of date, so we need to delete it to get a new recreate. // Our listener pod is out of date, so we need to delete it to get a new recreate.
if listener.Labels[labelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() { if listener.Labels[LabelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() {
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name) log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
if err := r.Delete(ctx, listener); err != nil { if err := r.Delete(ctx, listener); err != nil {
if kerrors.IsNotFound(err) { if kerrors.IsNotFound(err) {
@@ -332,29 +306,6 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
return nil return nil
} }
func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (requeue bool, err error) {
c := autoscalingRunnerSetFinalizerDependencyCleaner{
client: r.Client,
autoscalingRunnerSet: autoscalingRunnerSet,
logger: logger,
}
c.removeKubernetesModeRoleBindingFinalizer(ctx)
c.removeKubernetesModeRoleFinalizer(ctx)
c.removeKubernetesModeServiceAccountFinalizer(ctx)
c.removeNoPermissionServiceAccountFinalizer(ctx)
c.removeGitHubSecretFinalizer(ctx)
c.removeManagerRoleBindingFinalizer(ctx)
c.removeManagerRoleFinalizer(ctx)
requeue, err = c.result()
if err != nil {
logger.Error(err, "Failed to cleanup finalizer from dependent resource")
return true, err
}
return requeue, nil
}
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) { func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
logger.Info("Creating a new runner scale set") logger.Info("Creating a new runner scale set")
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet) actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
@@ -365,29 +316,24 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set") logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set")
return ctrl.Result{}, err return ctrl.Result{}, err
} }
runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, autoscalingRunnerSet.Spec.RunnerScaleSetName)
runnerGroupId := 1
if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 {
runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup)
if err != nil {
logger.Error(err, "Failed to get runner group by name", "runnerGroup", autoscalingRunnerSet.Spec.RunnerGroup)
return ctrl.Result{}, err
}
runnerGroupId = int(runnerGroup.ID)
}
runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, runnerGroupId, autoscalingRunnerSet.Spec.RunnerScaleSetName)
if err != nil { if err != nil {
logger.Error(err, "Failed to get runner scale set from Actions service", logger.Error(err, "Failed to get runner scale set from Actions service")
"runnerGroupId",
strconv.Itoa(runnerGroupId),
"runnerScaleSetName",
autoscalingRunnerSet.Spec.RunnerScaleSetName)
return ctrl.Result{}, err return ctrl.Result{}, err
} }
runnerGroupId := 1
if runnerScaleSet == nil { if runnerScaleSet == nil {
if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 {
runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup)
if err != nil {
logger.Error(err, "Failed to get runner group by name", "runnerGroup", autoscalingRunnerSet.Spec.RunnerGroup)
return ctrl.Result{}, err
}
runnerGroupId = int(runnerGroup.ID)
}
runnerScaleSet, err = actionsClient.CreateRunnerScaleSet( runnerScaleSet, err = actionsClient.CreateRunnerScaleSet(
ctx, ctx,
&actions.RunnerScaleSet{ &actions.RunnerScaleSet{
@@ -414,18 +360,12 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
if autoscalingRunnerSet.Annotations == nil { if autoscalingRunnerSet.Annotations == nil {
autoscalingRunnerSet.Annotations = map[string]string{} autoscalingRunnerSet.Annotations = map[string]string{}
} }
if autoscalingRunnerSet.Labels == nil {
autoscalingRunnerSet.Labels = map[string]string{}
}
logger.Info("Adding runner scale set ID, name and runner group name as an annotation and url labels") logger.Info("Adding runner scale set ID, name and runner group name as an annotation")
if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[runnerScaleSetNameAnnotationKey] = runnerScaleSet.Name obj.Annotations[runnerScaleSetNameKey] = runnerScaleSet.Name
obj.Annotations[runnerScaleSetIdAnnotationKey] = strconv.Itoa(runnerScaleSet.Id) obj.Annotations[runnerScaleSetIdKey] = strconv.Itoa(runnerScaleSet.Id)
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = runnerScaleSet.RunnerGroupName obj.Annotations[runnerScaleSetRunnerGroupNameKey] = runnerScaleSet.RunnerGroupName
if err := applyGitHubURLLabels(obj.Spec.GitHubConfigUrl, obj.Labels); err != nil { // should never happen
logger.Error(err, "Failed to apply GitHub URL labels")
}
}); err != nil { }); err != nil {
logger.Error(err, "Failed to add runner scale set ID, name and runner group name as an annotation") logger.Error(err, "Failed to add runner scale set ID, name and runner group name as an annotation")
return ctrl.Result{}, err return ctrl.Result{}, err
@@ -439,7 +379,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
} }
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) { func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]) runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
if err != nil { if err != nil {
logger.Error(err, "Failed to parse runner scale set ID") logger.Error(err, "Failed to parse runner scale set ID")
return ctrl.Result{}, err return ctrl.Result{}, err
@@ -470,7 +410,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
logger.Info("Updating runner scale set runner group name as an annotation") logger.Info("Updating runner scale set runner group name as an annotation")
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = updatedRunnerScaleSet.RunnerGroupName obj.Annotations[runnerScaleSetRunnerGroupNameKey] = updatedRunnerScaleSet.RunnerGroupName
}); err != nil { }); err != nil {
logger.Error(err, "Failed to update runner group name annotation") logger.Error(err, "Failed to update runner group name annotation")
return ctrl.Result{}, err return ctrl.Result{}, err
@@ -481,7 +421,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
} }
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) { func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]) runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
if err != nil { if err != nil {
logger.Error(err, "Failed to parse runner scale set ID") logger.Error(err, "Failed to parse runner scale set ID")
return ctrl.Result{}, err return ctrl.Result{}, err
@@ -506,7 +446,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
logger.Info("Updating runner scale set name as an annotation") logger.Info("Updating runner scale set name as an annotation")
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[runnerScaleSetNameAnnotationKey] = updatedRunnerScaleSet.Name obj.Annotations[runnerScaleSetNameKey] = updatedRunnerScaleSet.Name
}); err != nil { }); err != nil {
logger.Error(err, "Failed to update runner scale set name annotation") logger.Error(err, "Failed to update runner scale set name annotation")
return ctrl.Result{}, err return ctrl.Result{}, err
@@ -517,28 +457,12 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
} }
func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error { func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
scaleSetId, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
if !ok {
// Annotation not being present can occur in 3 scenarios
// 1. Scale set is never created.
// In this case, we don't need to fetch the actions client to delete the scale set that does not exist
//
// 2. The scale set has been deleted by the controller.
// In that case, the controller will clean up annotation because the scale set does not exist anymore.
// Removal of the scale set id is also useful because permission cleanup will eventually lose permission
// assigned to it on a GitHub secret, causing actions client from secret to result in permission denied
//
// 3. Annotation is removed manually.
// In this case, the controller will treat this as if the scale set is being removed from the actions service
// Then, manual deletion of the scale set is required.
return nil
}
logger.Info("Deleting the runner scale set from Actions service") logger.Info("Deleting the runner scale set from Actions service")
runnerScaleSetId, err := strconv.Atoi(scaleSetId) runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
if err != nil { if err != nil {
// If the annotation is not set correctly, we are going to get stuck in a loop trying to parse the scale set id. // If the annotation is not set correctly, or if it does not exist, we are going to get stuck in a loop trying to parse the scale set id.
// If the configuration is invalid (secret does not exist for example), we never got to the point to create runner set. // If the configuration is invalid (secret does not exist for example), we never get to the point to create runner set. But then, manual cleanup
// But then, manual cleanup would get stuck finalizing the resource trying to parse annotation indefinitely // would get stuck finalizing the resource trying to parse annotation indefinitely
logger.Info("autoscaling runner set does not have annotation describing scale set id. Skip deletion", "err", err.Error()) logger.Info("autoscaling runner set does not have annotation describing scale set id. Skip deletion", "err", err.Error())
return nil return nil
} }
@@ -555,14 +479,6 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
return err return err
} }
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
delete(obj.Annotations, runnerScaleSetIdAnnotationKey)
})
if err != nil {
logger.Error(err, "Failed to patch autoscaling runner set with annotation removed", "annotation", runnerScaleSetIdAnnotationKey)
return err
}
logger.Info("Deleted the runner scale set from Actions service") logger.Info("Deleted the runner scale set from Actions service")
return nil return nil
} }
@@ -615,7 +531,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) { func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) {
list := new(v1alpha1.EphemeralRunnerSetList) list := new(v1alpha1.EphemeralRunnerSetList)
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingRunnerSet.Name}); err != nil { if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingRunnerSet.Name}); err != nil {
return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err) return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
} }
@@ -708,7 +624,7 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro
return []string{owner.Name} return []string{owner.Name}
} }
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, resourceOwnerKey, groupVersionIndexer); err != nil { if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, autoscalingRunnerSetOwnerKey, groupVersionIndexer); err != nil {
return err return err
} }
@@ -732,328 +648,6 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro
Complete(r) Complete(r)
} }
type autoscalingRunnerSetFinalizerDependencyCleaner struct {
// configuration fields
client client.Client
autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
logger logr.Logger
// fields to operate on
requeue bool
err error
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) result() (requeue bool, err error) {
return c.requeue, c.err
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleBindingFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
return
}
roleBindingName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName]
if !ok {
c.logger.Info(
"Skipping cleaning up kubernetes mode service account",
"reason",
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeRoleBindingName),
)
return
}
c.logger.Info("Removing finalizer from container mode kubernetes role binding", "name", roleBindingName)
roleBinding := new(rbacv1.RoleBinding)
err := c.client.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
switch {
case err == nil:
if !controllerutil.ContainsFinalizer(roleBinding, AutoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("Kubernetes mode role binding finalizer has already been removed", "name", roleBindingName)
return
}
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
})
if err != nil {
c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from container mode kubernetes role binding", "name", roleBindingName)
return
case err != nil && !kerrors.IsNotFound(err):
c.err = fmt.Errorf("failed to fetch kubernetes mode role binding: %w", err)
return
default:
c.logger.Info("Container mode kubernetes role binding has already been deleted", "name", roleBindingName)
return
}
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
return
}
roleName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName]
if !ok {
c.logger.Info(
"Skipping cleaning up kubernetes mode role",
"reason",
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeRoleName),
)
return
}
c.logger.Info("Removing finalizer from container mode kubernetes role", "name", roleName)
role := new(rbacv1.Role)
err := c.client.Get(ctx, types.NamespacedName{Name: roleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
switch {
case err == nil:
if !controllerutil.ContainsFinalizer(role, AutoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("Kubernetes mode role finalizer has already been removed", "name", roleName)
return
}
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
})
if err != nil {
c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from container mode kubernetes role")
return
case err != nil && !kerrors.IsNotFound(err):
c.err = fmt.Errorf("failed to fetch kubernetes mode role: %w", err)
return
default:
c.logger.Info("Container mode kubernetes role has already been deleted", "name", roleName)
return
}
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeServiceAccountFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
return
}
serviceAccountName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName]
if !ok {
c.logger.Info(
"Skipping cleaning up kubernetes mode role binding",
"reason",
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeServiceAccountName),
)
return
}
c.logger.Info("Removing finalizer from container mode kubernetes service account", "name", serviceAccountName)
serviceAccount := new(corev1.ServiceAccount)
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
switch {
case err == nil:
if !controllerutil.ContainsFinalizer(serviceAccount, AutoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("Kubernetes mode service account finalizer has already been removed", "name", serviceAccountName)
return
}
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
})
if err != nil {
c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from container mode kubernetes service account")
return
case err != nil && !kerrors.IsNotFound(err):
c.err = fmt.Errorf("failed to fetch kubernetes mode service account: %w", err)
return
default:
c.logger.Info("Container mode kubernetes service account has already been deleted", "name", serviceAccountName)
return
}
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServiceAccountFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
return
}
serviceAccountName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName]
if !ok {
c.logger.Info(
"Skipping cleaning up no permission service account",
"reason",
fmt.Sprintf("annotation key %q not present", AnnotationKeyNoPermissionServiceAccountName),
)
return
}
c.logger.Info("Removing finalizer from no permission service account", "name", serviceAccountName)
serviceAccount := new(corev1.ServiceAccount)
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
switch {
case err == nil:
if !controllerutil.ContainsFinalizer(serviceAccount, AutoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("No permission service account finalizer has already been removed", "name", serviceAccountName)
return
}
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
})
if err != nil {
c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from no permission service account", "name", serviceAccountName)
return
case err != nil && !kerrors.IsNotFound(err):
c.err = fmt.Errorf("failed to fetch service account: %w", err)
return
default:
c.logger.Info("No permission service account has already been deleted", "name", serviceAccountName)
return
}
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
return
}
githubSecretName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName]
if !ok {
c.logger.Info(
"Skipping cleaning up no permission service account",
"reason",
fmt.Sprintf("annotation key %q not present", AnnotationKeyGitHubSecretName),
)
return
}
c.logger.Info("Removing finalizer from GitHub secret", "name", githubSecretName)
githubSecret := new(corev1.Secret)
err := c.client.Get(ctx, types.NamespacedName{Name: githubSecretName, Namespace: c.autoscalingRunnerSet.Namespace}, githubSecret)
switch {
case err == nil:
if !controllerutil.ContainsFinalizer(githubSecret, AutoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("GitHub secret finalizer has already been removed", "name", githubSecretName)
return
}
err = patch(ctx, c.client, githubSecret, func(obj *corev1.Secret) {
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
})
if err != nil {
c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from GitHub secret", "name", githubSecretName)
return
case err != nil && !kerrors.IsNotFound(err) && !kerrors.IsForbidden(err):
c.err = fmt.Errorf("failed to fetch GitHub secret: %w", err)
return
default:
c.logger.Info("GitHub secret has already been deleted", "name", githubSecretName)
return
}
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindingFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
return
}
managerRoleBindingName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName]
if !ok {
c.logger.Info(
"Skipping cleaning up manager role binding",
"reason",
fmt.Sprintf("annotation key %q not present", AnnotationKeyManagerRoleBindingName),
)
return
}
c.logger.Info("Removing finalizer from manager role binding", "name", managerRoleBindingName)
roleBinding := new(rbacv1.RoleBinding)
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
switch {
case err == nil:
if !controllerutil.ContainsFinalizer(roleBinding, AutoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("Manager role binding finalizer has already been removed", "name", managerRoleBindingName)
return
}
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
})
if err != nil {
c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from manager role binding", "name", managerRoleBindingName)
return
case err != nil && !kerrors.IsNotFound(err):
c.err = fmt.Errorf("failed to fetch manager role binding: %w", err)
return
default:
c.logger.Info("Manager role binding has already been deleted", "name", managerRoleBindingName)
return
}
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
return
}
managerRoleName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName]
if !ok {
c.logger.Info(
"Skipping cleaning up manager role",
"reason",
fmt.Sprintf("annotation key %q not present", AnnotationKeyManagerRoleName),
)
return
}
c.logger.Info("Removing finalizer from manager role", "name", managerRoleName)
role := new(rbacv1.Role)
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
switch {
case err == nil:
if !controllerutil.ContainsFinalizer(role, AutoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("Manager role finalizer has already been removed", "name", managerRoleName)
return
}
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
})
if err != nil {
c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from manager role", "name", managerRoleName)
return
case err != nil && !kerrors.IsNotFound(err):
c.err = fmt.Errorf("failed to fetch manager role: %w", err)
return
default:
c.logger.Info("Manager role has already been deleted", "name", managerRoleName)
return
}
}
// NOTE: if this is logic should be used for other resources, // NOTE: if this is logic should be used for other resources,
// consider using generics // consider using generics
type EphemeralRunnerSets struct { type EphemeralRunnerSets struct {

View File

@@ -13,7 +13,6 @@ import (
"time" "time"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
ctrl "sigs.k8s.io/controller-runtime" ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
@@ -24,10 +23,8 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/build"
"github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/github/actions/fake" "github.com/actions/actions-runner-controller/github/actions/fake"
"github.com/actions/actions-runner-controller/github/actions/testserver" "github.com/actions/actions-runner-controller/github/actions/testserver"
@@ -39,25 +36,13 @@ const (
autoscalingRunnerSetTestGitHubToken = "gh_token" autoscalingRunnerSetTestGitHubToken = "gh_token"
) )
var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() { var _ = Describe("Test AutoScalingRunnerSet controller", func() {
var ctx context.Context var ctx context.Context
var mgr ctrl.Manager var mgr ctrl.Manager
var autoscalingNS *corev1.Namespace var autoscalingNS *corev1.Namespace
var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
var configSecret *corev1.Secret var configSecret *corev1.Secret
var originalBuildVersion string
buildVersion := "0.1.0"
BeforeAll(func() {
originalBuildVersion = build.Version
build.Version = buildVersion
})
AfterAll(func() {
build.Version = originalBuildVersion
})
BeforeEach(func() { BeforeEach(func() {
ctx = context.Background() ctx = context.Background()
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
@@ -80,9 +65,6 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs", Name: "test-asrs",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
Labels: map[string]string{
LabelKeyKubernetesVersion: buildVersion,
},
}, },
Spec: v1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
@@ -135,39 +117,19 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
return "", err return "", err
} }
if _, ok := created.Annotations[runnerScaleSetIdAnnotationKey]; !ok { if _, ok := created.Annotations[runnerScaleSetIdKey]; !ok {
return "", nil return "", nil
} }
if _, ok := created.Annotations[AnnotationKeyGitHubRunnerGroupName]; !ok { if _, ok := created.Annotations[runnerScaleSetRunnerGroupNameKey]; !ok {
return "", nil return "", nil
} }
return fmt.Sprintf("%s_%s", created.Annotations[runnerScaleSetIdAnnotationKey], created.Annotations[AnnotationKeyGitHubRunnerGroupName]), nil return fmt.Sprintf("%s_%s", created.Annotations[runnerScaleSetIdKey], created.Annotations[runnerScaleSetRunnerGroupNameKey]), nil
}, },
autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("1_testgroup"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's annotation") autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("1_testgroup"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's annotation")
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created)
if err != nil {
return "", err
}
if _, ok := created.Labels[LabelKeyGitHubOrganization]; !ok {
return "", nil
}
if _, ok := created.Labels[LabelKeyGitHubRepository]; !ok {
return "", nil
}
return fmt.Sprintf("%s/%s", created.Labels[LabelKeyGitHubOrganization], created.Labels[LabelKeyGitHubRepository]), nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("owner/repo"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's label")
// Check if ephemeral runner set is created // Check if ephemeral runner set is created
Eventually( Eventually(
func() (int, error) { func() (int, error) {
@@ -296,10 +258,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items)) return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
} }
return runnerSetList.Items[0].Labels[labelKeyRunnerSpecHash], nil return runnerSetList.Items[0].Labels[LabelKeyRunnerSpecHash], nil
}, },
autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[labelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created") autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[LabelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
// We should create a new listener // We should create a new listener
Eventually( Eventually(
@@ -389,18 +351,18 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
return "", err return "", err
} }
if _, ok := updated.Annotations[AnnotationKeyGitHubRunnerGroupName]; !ok { if _, ok := updated.Annotations[runnerScaleSetRunnerGroupNameKey]; !ok {
return "", nil return "", nil
} }
return updated.Annotations[AnnotationKeyGitHubRunnerGroupName], nil return updated.Annotations[runnerScaleSetRunnerGroupNameKey], nil
}, },
autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("testgroup2"), "AutoScalingRunnerSet should have the new runner group in its annotation") autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("testgroup2"), "AutoScalingRunnerSet should have the new runner group in its annotation")
// delete the annotation and it should be re-added // delete the annotation and it should be re-added
patched = autoscalingRunnerSet.DeepCopy() patched = autoscalingRunnerSet.DeepCopy()
delete(patched.Annotations, AnnotationKeyGitHubRunnerGroupName) delete(patched.Annotations, runnerScaleSetRunnerGroupNameKey)
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet)) err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
@@ -412,11 +374,11 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
return "", err return "", err
} }
if _, ok := updated.Annotations[AnnotationKeyGitHubRunnerGroupName]; !ok { if _, ok := updated.Annotations[runnerScaleSetRunnerGroupNameKey]; !ok {
return "", nil return "", nil
} }
return updated.Annotations[AnnotationKeyGitHubRunnerGroupName], nil return updated.Annotations[runnerScaleSetRunnerGroupNameKey], nil
}, },
autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval, autoscalingRunnerSetTestInterval,
@@ -490,19 +452,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
}) })
}) })
var _ = Describe("Test AutoScalingController updates", Ordered, func() { var _ = Describe("Test AutoScalingController updates", func() {
var originalBuildVersion string
buildVersion := "0.1.0"
BeforeAll(func() {
originalBuildVersion = build.Version
build.Version = buildVersion
})
AfterAll(func() {
build.Version = originalBuildVersion
})
Context("Creating autoscaling runner set with RunnerScaleSetName set", func() { Context("Creating autoscaling runner set with RunnerScaleSetName set", func() {
var ctx context.Context var ctx context.Context
var mgr ctrl.Manager var mgr ctrl.Manager
@@ -511,7 +461,6 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
var configSecret *corev1.Secret var configSecret *corev1.Secret
BeforeEach(func() { BeforeEach(func() {
originalBuildVersion = build.Version
ctx = context.Background() ctx = context.Background()
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
@@ -557,9 +506,6 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs", Name: "test-asrs",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
Labels: map[string]string{
LabelKeyKubernetesVersion: buildVersion,
},
}, },
Spec: v1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
@@ -593,7 +539,7 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
return "", err return "", err
} }
if val, ok := ars.Annotations[runnerScaleSetNameAnnotationKey]; ok { if val, ok := ars.Annotations[runnerScaleSetNameKey]; ok {
return val, nil return val, nil
} }
@@ -605,7 +551,6 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
update := autoscalingRunnerSet.DeepCopy() update := autoscalingRunnerSet.DeepCopy()
update.Spec.RunnerScaleSetName = "testset_update" update.Spec.RunnerScaleSetName = "testset_update"
err = k8sClient.Patch(ctx, update, client.MergeFrom(autoscalingRunnerSet)) err = k8sClient.Patch(ctx, update, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to update AutoScalingRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to update AutoScalingRunnerSet")
@@ -617,7 +562,7 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
return "", err return "", err
} }
if val, ok := ars.Annotations[runnerScaleSetNameAnnotationKey]; ok { if val, ok := ars.Annotations[runnerScaleSetNameKey]; ok {
return val, nil return val, nil
} }
@@ -630,18 +575,7 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
}) })
}) })
var _ = Describe("Test AutoscalingController creation failures", Ordered, func() { var _ = Describe("Test AutoscalingController creation failures", func() {
var originalBuildVersion string
buildVersion := "0.1.0"
BeforeAll(func() {
originalBuildVersion = build.Version
build.Version = buildVersion
})
AfterAll(func() {
build.Version = originalBuildVersion
})
Context("When autoscaling runner set creation fails on the client", func() { Context("When autoscaling runner set creation fails on the client", func() {
var ctx context.Context var ctx context.Context
var mgr ctrl.Manager var mgr ctrl.Manager
@@ -672,9 +606,6 @@ var _ = Describe("Test AutoscalingController creation failures", Ordered, func()
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs", Name: "test-asrs",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
Labels: map[string]string{
LabelKeyKubernetesVersion: buildVersion,
},
}, },
Spec: v1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
@@ -753,18 +684,7 @@ var _ = Describe("Test AutoscalingController creation failures", Ordered, func()
}) })
}) })
var _ = Describe("Test client optional configuration", Ordered, func() { var _ = Describe("Test Client optional configuration", func() {
var originalBuildVersion string
buildVersion := "0.1.0"
BeforeAll(func() {
originalBuildVersion = build.Version
build.Version = buildVersion
})
AfterAll(func() {
build.Version = originalBuildVersion
})
Context("When specifying a proxy", func() { Context("When specifying a proxy", func() {
var ctx context.Context var ctx context.Context
var mgr ctrl.Manager var mgr ctrl.Manager
@@ -804,9 +724,6 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs", Name: "test-asrs",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
Labels: map[string]string{
LabelKeyKubernetesVersion: buildVersion,
},
}, },
Spec: v1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "http://example.com/org/repo", GitHubConfigUrl: "http://example.com/org/repo",
@@ -883,9 +800,6 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs", Name: "test-asrs",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
Labels: map[string]string{
LabelKeyKubernetesVersion: buildVersion,
},
}, },
Spec: v1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "http://example.com/org/repo", GitHubConfigUrl: "http://example.com/org/repo",
@@ -1002,9 +916,6 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs", Name: "test-asrs",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
Labels: map[string]string{
LabelKeyKubernetesVersion: buildVersion,
},
}, },
Spec: v1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: server.ConfigURLForOrg("my-org"), GitHubConfigUrl: server.ConfigURLForOrg("my-org"),
@@ -1055,9 +966,6 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs", Name: "test-asrs",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
Labels: map[string]string{
LabelKeyKubernetesVersion: buildVersion,
},
}, },
Spec: v1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
@@ -1108,7 +1016,7 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
g.Expect(listener.Spec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "listener does not have TLS config") g.Expect(listener.Spec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "listener does not have TLS config")
}, },
autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval, autoscalingListenerTestInterval,
).Should(Succeed(), "tls config is incorrect") ).Should(Succeed(), "tls config is incorrect")
}) })
@@ -1119,9 +1027,6 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs", Name: "test-asrs",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
Labels: map[string]string{
LabelKeyKubernetesVersion: buildVersion,
},
}, },
Spec: v1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
@@ -1168,459 +1073,8 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
g.Expect(runnerSet.Spec.EphemeralRunnerSpec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "EphemeralRunnerSpec does not have TLS config") g.Expect(runnerSet.Spec.EphemeralRunnerSpec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "EphemeralRunnerSpec does not have TLS config")
}, },
autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval, autoscalingListenerTestInterval,
).Should(Succeed()) ).Should(Succeed())
}) })
}) })
}) })
var _ = Describe("Test external permissions cleanup", Ordered, func() {
var originalBuildVersion string
buildVersion := "0.1.0"
BeforeAll(func() {
originalBuildVersion = build.Version
build.Version = buildVersion
})
AfterAll(func() {
build.Version = originalBuildVersion
})
It("Should clean up kubernetes mode permissions", func() {
ctx := context.Background()
autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient)
configSecret := createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
controller := &AutoscalingRunnerSetReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(),
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
startManagers(GinkgoT(), mgr)
min := 1
max := 10
autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs",
Namespace: autoscalingNS.Name,
Labels: map[string]string{
"app.kubernetes.io/name": "gha-runner-scale-set",
LabelKeyKubernetesVersion: buildVersion,
},
Annotations: map[string]string{
AnnotationKeyKubernetesModeRoleBindingName: "kube-mode-role-binding",
AnnotationKeyKubernetesModeRoleName: "kube-mode-role",
AnnotationKeyKubernetesModeServiceAccountName: "kube-mode-service-account",
},
},
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
MaxRunners: &max,
MinRunners: &min,
RunnerGroup: "testgroup",
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "runner",
Image: "ghcr.io/actions/runner",
},
},
},
},
},
}
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName],
Namespace: autoscalingRunnerSet.Namespace,
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
},
}
err = k8sClient.Create(ctx, role)
Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode role")
serviceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName],
Namespace: autoscalingRunnerSet.Namespace,
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
},
}
err = k8sClient.Create(ctx, serviceAccount)
Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode service account")
roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName],
Namespace: autoscalingRunnerSet.Namespace,
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: serviceAccount.Name,
Namespace: serviceAccount.Namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
// Kind is the type of resource being referenced
Kind: "Role",
Name: role.Name,
},
}
err = k8sClient.Create(ctx, roleBinding)
Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode role binding")
err = k8sClient.Create(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
Eventually(
func() (string, error) {
created := new(v1alpha1.AutoscalingRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created)
if err != nil {
return "", err
}
if len(created.Finalizers) == 0 {
return "", nil
}
return created.Finalizers[0], nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval,
).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
err = k8sClient.Delete(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to delete autoscaling runner set")
err = k8sClient.Delete(ctx, roleBinding)
Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode role binding")
err = k8sClient.Delete(ctx, role)
Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode role")
err = k8sClient.Delete(ctx, serviceAccount)
Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode service account")
Eventually(
func() bool {
r := new(rbacv1.RoleBinding)
err := k8sClient.Get(ctx, types.NamespacedName{
Name: roleBinding.Name,
Namespace: roleBinding.Namespace,
}, r)
return errors.IsNotFound(err)
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval,
).Should(BeTrue(), "Expected role binding to be cleaned up")
Eventually(
func() bool {
r := new(rbacv1.Role)
err := k8sClient.Get(ctx, types.NamespacedName{
Name: role.Name,
Namespace: role.Namespace,
}, r)
return errors.IsNotFound(err)
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval,
).Should(BeTrue(), "Expected role to be cleaned up")
})
It("Should clean up manager permissions and no-permission service account", func() {
ctx := context.Background()
autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient)
controller := &AutoscalingRunnerSetReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(),
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
startManagers(GinkgoT(), mgr)
min := 1
max := 10
autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs",
Namespace: autoscalingNS.Name,
Labels: map[string]string{
"app.kubernetes.io/name": "gha-runner-scale-set",
LabelKeyKubernetesVersion: buildVersion,
},
Annotations: map[string]string{
AnnotationKeyManagerRoleName: "manager-role",
AnnotationKeyManagerRoleBindingName: "manager-role-binding",
AnnotationKeyGitHubSecretName: "gh-secret-name",
AnnotationKeyNoPermissionServiceAccountName: "no-permission-sa",
},
},
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
MaxRunners: &max,
MinRunners: &min,
RunnerGroup: "testgroup",
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "runner",
Image: "ghcr.io/actions/runner",
},
},
},
},
},
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName],
Namespace: autoscalingRunnerSet.Namespace,
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
},
Data: map[string][]byte{
"github_token": []byte(defaultGitHubToken),
},
}
err = k8sClient.Create(context.Background(), secret)
Expect(err).NotTo(HaveOccurred(), "failed to create github secret")
autoscalingRunnerSet.Spec.GitHubConfigSecret = secret.Name
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName],
Namespace: autoscalingRunnerSet.Namespace,
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
},
}
err = k8sClient.Create(ctx, role)
Expect(err).NotTo(HaveOccurred(), "failed to create manager role")
roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName],
Namespace: autoscalingRunnerSet.Namespace,
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "Role",
Name: role.Name,
},
}
err = k8sClient.Create(ctx, roleBinding)
Expect(err).NotTo(HaveOccurred(), "failed to create manager role binding")
noPermissionServiceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName],
Namespace: autoscalingRunnerSet.Namespace,
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
},
}
err = k8sClient.Create(ctx, noPermissionServiceAccount)
Expect(err).NotTo(HaveOccurred(), "failed to create no permission service account")
err = k8sClient.Create(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
Eventually(
func() (string, error) {
created := new(v1alpha1.AutoscalingRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created)
if err != nil {
return "", err
}
if len(created.Finalizers) == 0 {
return "", nil
}
return created.Finalizers[0], nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval,
).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
err = k8sClient.Delete(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to delete autoscaling runner set")
err = k8sClient.Delete(ctx, noPermissionServiceAccount)
Expect(err).NotTo(HaveOccurred(), "failed to delete no permission service account")
err = k8sClient.Delete(ctx, secret)
Expect(err).NotTo(HaveOccurred(), "failed to delete GitHub secret")
err = k8sClient.Delete(ctx, roleBinding)
Expect(err).NotTo(HaveOccurred(), "failed to delete manager role binding")
err = k8sClient.Delete(ctx, role)
Expect(err).NotTo(HaveOccurred(), "failed to delete manager role")
Eventually(
func() bool {
r := new(corev1.ServiceAccount)
err := k8sClient.Get(
ctx,
types.NamespacedName{
Name: noPermissionServiceAccount.Name,
Namespace: noPermissionServiceAccount.Namespace,
},
r,
)
return errors.IsNotFound(err)
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval,
).Should(BeTrue(), "Expected no permission service account to be cleaned up")
Eventually(
func() bool {
r := new(corev1.Secret)
err := k8sClient.Get(ctx, types.NamespacedName{
Name: secret.Name,
Namespace: secret.Namespace,
}, r)
return errors.IsNotFound(err)
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval,
).Should(BeTrue(), "Expected role binding to be cleaned up")
Eventually(
func() bool {
r := new(rbacv1.RoleBinding)
err := k8sClient.Get(ctx, types.NamespacedName{
Name: roleBinding.Name,
Namespace: roleBinding.Namespace,
}, r)
return errors.IsNotFound(err)
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval,
).Should(BeTrue(), "Expected role binding to be cleaned up")
Eventually(
func() bool {
r := new(rbacv1.Role)
err := k8sClient.Get(
ctx,
types.NamespacedName{
Name: role.Name,
Namespace: role.Namespace,
},
r,
)
return errors.IsNotFound(err)
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval,
).Should(BeTrue(), "Expected role to be cleaned up")
})
})
var _ = Describe("Test resource version and build version mismatch", func() {
It("Should delete and recreate the autoscaling runner set to match the build version", func() {
ctx := context.Background()
autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient)
configSecret := createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
controller := &AutoscalingRunnerSetReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(),
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
originalVersion := build.Version
defer func() {
build.Version = originalVersion
}()
build.Version = "0.2.0"
min := 1
max := 10
autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs",
Namespace: autoscalingNS.Name,
Labels: map[string]string{
"app.kubernetes.io/name": "gha-runner-scale-set",
"app.kubernetes.io/version": "0.1.0",
},
Annotations: map[string]string{
AnnotationKeyKubernetesModeRoleBindingName: "kube-mode-role-binding",
AnnotationKeyKubernetesModeRoleName: "kube-mode-role",
AnnotationKeyKubernetesModeServiceAccountName: "kube-mode-service-account",
},
},
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
MaxRunners: &max,
MinRunners: &min,
RunnerGroup: "testgroup",
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "runner",
Image: "ghcr.io/actions/runner",
},
},
},
},
},
}
// create autoscaling runner set before starting a manager
err = k8sClient.Create(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred())
startManagers(GinkgoT(), mgr)
Eventually(func() bool {
ars := new(v1alpha1.AutoscalingRunnerSet)
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Name}, ars)
return errors.IsNotFound(err)
}).Should(BeTrue())
})
})

View File

@@ -1,7 +1,5 @@
package actionsgithubcom package actionsgithubcom
import corev1 "k8s.io/api/core/v1"
const ( const (
LabelKeyRunnerTemplateHash = "runner-template-hash" LabelKeyRunnerTemplateHash = "runner-template-hash"
LabelKeyPodTemplateHash = "pod-template-hash" LabelKeyPodTemplateHash = "pod-template-hash"
@@ -18,47 +16,3 @@ const (
EnvVarHTTPSProxy = "https_proxy" EnvVarHTTPSProxy = "https_proxy"
EnvVarNoProxy = "no_proxy" EnvVarNoProxy = "no_proxy"
) )
// Labels applied to resources
const (
// Kubernetes labels
LabelKeyKubernetesPartOf = "app.kubernetes.io/part-of"
LabelKeyKubernetesComponent = "app.kubernetes.io/component"
LabelKeyKubernetesVersion = "app.kubernetes.io/version"
// Github labels
LabelKeyGitHubScaleSetName = "actions.github.com/scale-set-name"
LabelKeyGitHubScaleSetNamespace = "actions.github.com/scale-set-namespace"
LabelKeyGitHubEnterprise = "actions.github.com/enterprise"
LabelKeyGitHubOrganization = "actions.github.com/organization"
LabelKeyGitHubRepository = "actions.github.com/repository"
)
// Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running
const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
// Labels applied to listener roles
const (
labelKeyListenerName = "auto-scaling-listener-name"
labelKeyListenerNamespace = "auto-scaling-listener-namespace"
)
// Annotations applied for later cleanup of resources
const (
AnnotationKeyManagerRoleBindingName = "actions.github.com/cleanup-manager-role-binding"
AnnotationKeyManagerRoleName = "actions.github.com/cleanup-manager-role-name"
AnnotationKeyKubernetesModeRoleName = "actions.github.com/cleanup-kubernetes-mode-role-name"
AnnotationKeyKubernetesModeRoleBindingName = "actions.github.com/cleanup-kubernetes-mode-role-binding-name"
AnnotationKeyKubernetesModeServiceAccountName = "actions.github.com/cleanup-kubernetes-mode-service-account-name"
AnnotationKeyGitHubSecretName = "actions.github.com/cleanup-github-secret-name"
AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name"
)
// DefaultScaleSetListenerImagePullPolicy is the default pull policy applied
// to the listener when ImagePullPolicy is not specified
const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent
// ownerKey is field selector matching the owner name of a particular resource
const resourceOwnerKey = ".metadata.controller"

View File

@@ -107,7 +107,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
} }
if !done { if !done {
log.Info("Waiting for ephemeral runner owned resources to be deleted") log.Info("Waiting for ephemeral runner owned resources to be deleted")
return ctrl.Result{Requeue: true}, nil return ctrl.Result{}, nil
} }
done, err = r.cleanupContainerHooksResources(ctx, ephemeralRunner, log) done, err = r.cleanupContainerHooksResources(ctx, ephemeralRunner, log)
@@ -643,7 +643,7 @@ func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1
} }
log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name) log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name)
return ctrl.Result{Requeue: true}, nil return ctrl.Result{}, nil
} }
// updateRunStatusFromPod is responsible for updating non-exiting statuses. // updateRunStatusFromPod is responsible for updating non-exiting statuses.
@@ -792,6 +792,7 @@ func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr). return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.EphemeralRunner{}). For(&v1alpha1.EphemeralRunner{}).
Owns(&corev1.Pod{}). Owns(&corev1.Pod{}).
Owns(&corev1.Secret{}).
WithEventFilter(predicate.ResourceVersionChangedPredicate{}). WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
Named("ephemeral-runner-controller"). Named("ephemeral-runner-controller").
Complete(r) Complete(r)

View File

@@ -40,7 +40,8 @@ import (
) )
const ( const (
ephemeralRunnerSetFinalizerName = "ephemeralrunner.actions.github.com/finalizer" ephemeralRunnerSetReconcilerOwnerKey = ".metadata.controller"
ephemeralRunnerSetFinalizerName = "ephemeralrunner.actions.github.com/finalizer"
) )
// EphemeralRunnerSetReconciler reconciles a EphemeralRunnerSet object // EphemeralRunnerSetReconciler reconciles a EphemeralRunnerSet object
@@ -55,7 +56,6 @@ type EphemeralRunnerSetReconciler struct {
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/status,verbs=get;update;patch //+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/finalizers,verbs=update;patch
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get //+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get
@@ -146,7 +146,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
ctx, ctx,
ephemeralRunnerList, ephemeralRunnerList,
client.InNamespace(req.Namespace), client.InNamespace(req.Namespace),
client.MatchingFields{resourceOwnerKey: req.Name}, client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: req.Name},
) )
if err != nil { if err != nil {
log.Error(err, "Unable to list child ephemeral runners") log.Error(err, "Unable to list child ephemeral runners")
@@ -242,7 +242,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, e
func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) { func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) {
ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList) ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: ephemeralRunnerSet.Name}) err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: ephemeralRunnerSet.Name})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list child ephemeral runners: %v", err) return false, fmt.Errorf("failed to list child ephemeral runners: %v", err)
} }
@@ -356,9 +356,10 @@ func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ep
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet), Name: proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet),
Namespace: ephemeralRunnerSet.Namespace, Namespace: ephemeralRunnerSet.Namespace,
Labels: map[string]string{ Labels: map[string]string{
LabelKeyGitHubScaleSetName: ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName], // TODO: figure out autoScalingRunnerSet name and set it as a label for this secret
LabelKeyGitHubScaleSetNamespace: ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace], // "auto-scaling-runner-set-namespace": ephemeralRunnerSet.Namespace,
// "auto-scaling-runner-set-name": ephemeralRunnerSet.Name,
}, },
}, },
Data: proxySecretData, Data: proxySecretData,
@@ -521,7 +522,7 @@ func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Conte
// SetupWithManager sets up the controller with the Manager. // SetupWithManager sets up the controller with the Manager.
func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
// Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups. // Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups.
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, resourceOwnerKey, func(rawObj client.Object) []string { if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, ephemeralRunnerSetReconcilerOwnerKey, func(rawObj client.Object) []string {
groupVersion := v1alpha1.GroupVersion.String() groupVersion := v1alpha1.GroupVersion.String()
// grab the job object, extract the owner... // grab the job object, extract the owner...

View File

@@ -1141,13 +1141,12 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0])) err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status") Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status")
currentRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet) updatedRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, currentRunnerSet) err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, updatedRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
updatedRunnerSet := currentRunnerSet.DeepCopy()
updatedRunnerSet.Spec.Replicas = 0 updatedRunnerSet.Spec.Replicas = 0
err = k8sClient.Patch(ctx, updatedRunnerSet, client.MergeFrom(currentRunnerSet)) err = k8sClient.Update(ctx, updatedRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
// wait for server to be called // wait for server to be called

View File

@@ -8,7 +8,6 @@ import (
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/build" "github.com/actions/actions-runner-controller/build"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/hash" "github.com/actions/actions-runner-controller/hash"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1" rbacv1 "k8s.io/api/rbac/v1"
@@ -20,90 +19,14 @@ const (
jitTokenKey = "jitToken" jitTokenKey = "jitToken"
) )
var commonLabelKeys = [...]string{ // labels applied to resources
LabelKeyKubernetesPartOf, const (
LabelKeyKubernetesComponent, LabelKeyAutoScaleRunnerSetName = "auto-scaling-runner-set-name"
LabelKeyKubernetesVersion, LabelKeyAutoScaleRunnerSetNamespace = "auto-scaling-runner-set-namespace"
LabelKeyGitHubScaleSetName, )
LabelKeyGitHubScaleSetNamespace,
LabelKeyGitHubEnterprise,
LabelKeyGitHubOrganization,
LabelKeyGitHubRepository,
}
const labelValueKubernetesPartOf = "gha-runner-scale-set"
// scaleSetListenerImagePullPolicy is applied to all listeners
var scaleSetListenerImagePullPolicy = DefaultScaleSetListenerImagePullPolicy
func SetListenerImagePullPolicy(pullPolicy string) bool {
switch p := corev1.PullPolicy(pullPolicy); p {
case corev1.PullAlways, corev1.PullNever, corev1.PullIfNotPresent:
scaleSetListenerImagePullPolicy = p
return true
default:
return false
}
}
type resourceBuilder struct{} type resourceBuilder struct{}
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
return nil, err
}
effectiveMinRunners := 0
effectiveMaxRunners := math.MaxInt32
if autoscalingRunnerSet.Spec.MaxRunners != nil {
effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners
}
if autoscalingRunnerSet.Spec.MinRunners != nil {
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
}
githubConfig, err := actions.ParseGitHubConfigFromURL(autoscalingRunnerSet.Spec.GitHubConfigUrl)
if err != nil {
return nil, fmt.Errorf("failed to parse github config from url: %v", err)
}
autoscalingListener := &v1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerName(autoscalingRunnerSet),
Namespace: namespace,
Labels: map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesComponent: "runner-scale-set-listener",
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
LabelKeyGitHubEnterprise: githubConfig.Enterprise,
LabelKeyGitHubOrganization: githubConfig.Organization,
LabelKeyGitHubRepository: githubConfig.Repository,
labelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
},
},
Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
RunnerScaleSetId: runnerScaleSetId,
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
EphemeralRunnerSetName: ephemeralRunnerSet.Name,
MinRunners: effectiveMinRunners,
MaxRunners: effectiveMaxRunners,
Image: image,
ImagePullPolicy: scaleSetListenerImagePullPolicy,
ImagePullSecrets: imagePullSecrets,
Proxy: autoscalingRunnerSet.Spec.Proxy,
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
},
}
return autoscalingListener, nil
}
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod { func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
listenerEnv := []corev1.EnvVar{ listenerEnv := []corev1.EnvVar{
{ {
@@ -196,7 +119,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
Name: autoscalingListenerContainerName, Name: autoscalingListenerContainerName,
Image: autoscalingListener.Spec.Image, Image: autoscalingListener.Spec.Image,
Env: listenerEnv, Env: listenerEnv,
ImagePullPolicy: autoscalingListener.Spec.ImagePullPolicy, ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{ Command: []string{
"/github-runnerscaleset-listener", "/github-runnerscaleset-listener",
}, },
@@ -206,11 +129,6 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
RestartPolicy: corev1.RestartPolicyNever, RestartPolicy: corev1.RestartPolicyNever,
} }
labels := make(map[string]string, len(autoscalingListener.Labels))
for key, val := range autoscalingListener.Labels {
labels[key] = val
}
newRunnerScaleSetListenerPod := &corev1.Pod{ newRunnerScaleSetListenerPod := &corev1.Pod{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "Pod", Kind: "Pod",
@@ -219,7 +137,10 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: autoscalingListener.Name, Name: autoscalingListener.Name,
Namespace: autoscalingListener.Namespace, Namespace: autoscalingListener.Namespace,
Labels: labels, Labels: map[string]string{
LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
},
}, },
Spec: podSpec, Spec: podSpec,
} }
@@ -227,14 +148,47 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
return newRunnerScaleSetListenerPod return newRunnerScaleSetListenerPod
} }
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
if err != nil {
return nil, err
}
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
newLabels := map[string]string{}
newLabels[LabelKeyRunnerSpecHash] = runnerSpecHash
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
Labels: newLabels,
},
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: 0,
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
RunnerScaleSetId: runnerScaleSetId,
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
Proxy: autoscalingRunnerSet.Spec.Proxy,
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
},
},
}
return newEphemeralRunnerSet, nil
}
func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount { func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
return &corev1.ServiceAccount{ return &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerServiceAccountName(autoscalingListener), Name: scaleSetListenerServiceAccountName(autoscalingListener),
Namespace: autoscalingListener.Namespace, Namespace: autoscalingListener.Namespace,
Labels: map[string]string{ Labels: map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
}, },
}, },
} }
@@ -248,11 +202,11 @@ func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.
Name: scaleSetListenerRoleName(autoscalingListener), Name: scaleSetListenerRoleName(autoscalingListener),
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: map[string]string{ Labels: map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
labelKeyListenerNamespace: autoscalingListener.Namespace, "auto-scaling-listener-namespace": autoscalingListener.Namespace,
labelKeyListenerName: autoscalingListener.Name, "auto-scaling-listener-name": autoscalingListener.Name,
"role-policy-rules-hash": rulesHash, "role-policy-rules-hash": rulesHash,
}, },
}, },
Rules: rules, Rules: rules,
@@ -282,12 +236,12 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
Name: scaleSetListenerRoleName(autoscalingListener), Name: scaleSetListenerRoleName(autoscalingListener),
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: map[string]string{ Labels: map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
labelKeyListenerNamespace: autoscalingListener.Namespace, "auto-scaling-listener-namespace": autoscalingListener.Namespace,
labelKeyListenerName: autoscalingListener.Name, "auto-scaling-listener-name": autoscalingListener.Name,
"role-binding-role-ref-hash": roleRefHash, "role-binding-role-ref-hash": roleRefHash,
"role-binding-subject-hash": subjectHash, "role-binding-subject-hash": subjectHash,
}, },
}, },
RoleRef: roleRef, RoleRef: roleRef,
@@ -305,9 +259,9 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
Name: scaleSetListenerSecretMirrorName(autoscalingListener), Name: scaleSetListenerSecretMirrorName(autoscalingListener),
Namespace: autoscalingListener.Namespace, Namespace: autoscalingListener.Namespace,
Labels: map[string]string{ Labels: map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
"secret-data-hash": dataHash, "secret-data-hash": dataHash,
}, },
}, },
Data: secret.DeepCopy().Data, Data: secret.DeepCopy().Data,
@@ -316,79 +270,56 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
return newListenerSecret return newListenerSecret
} }
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) { func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]) runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
if err != nil { if err != nil {
return nil, err return nil, err
} }
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
newLabels := map[string]string{ effectiveMinRunners := 0
labelKeyRunnerSpecHash: runnerSpecHash, effectiveMaxRunners := math.MaxInt32
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, if autoscalingRunnerSet.Spec.MaxRunners != nil {
LabelKeyKubernetesComponent: "runner-set", effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], }
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name, if autoscalingRunnerSet.Spec.MinRunners != nil {
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
} }
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, newLabels); err != nil { autoscalingListener := &v1alpha1.AutoscalingListener{
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
}
newAnnotations := map[string]string{
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
}
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-", Name: scaleSetListenerName(autoscalingRunnerSet),
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace, Namespace: namespace,
Labels: newLabels, Labels: map[string]string{
Annotations: newAnnotations, LabelKeyAutoScaleRunnerSetNamespace: autoscalingRunnerSet.Namespace,
}, LabelKeyAutoScaleRunnerSetName: autoscalingRunnerSet.Name,
Spec: v1alpha1.EphemeralRunnerSetSpec{ LabelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
Replicas: 0,
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
RunnerScaleSetId: runnerScaleSetId,
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
Proxy: autoscalingRunnerSet.Spec.Proxy,
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
}, },
}, },
Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
RunnerScaleSetId: runnerScaleSetId,
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
EphemeralRunnerSetName: ephemeralRunnerSet.Name,
MinRunners: effectiveMinRunners,
MaxRunners: effectiveMaxRunners,
Image: image,
ImagePullSecrets: imagePullSecrets,
Proxy: autoscalingRunnerSet.Spec.Proxy,
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
},
} }
return newEphemeralRunnerSet, nil return autoscalingListener, nil
} }
func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner { func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
labels := make(map[string]string)
for _, key := range commonLabelKeys {
switch key {
case LabelKeyKubernetesComponent:
labels[key] = "runner"
default:
v, ok := ephemeralRunnerSet.Labels[key]
if !ok {
continue
}
labels[key] = v
}
}
annotations := make(map[string]string)
for key, val := range ephemeralRunnerSet.Annotations {
annotations[key] = val
}
return &v1alpha1.EphemeralRunner{ return &v1alpha1.EphemeralRunner{
TypeMeta: metav1.TypeMeta{}, TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
GenerateName: ephemeralRunnerSet.Name + "-runner-", GenerateName: ephemeralRunnerSet.Name + "-runner-",
Namespace: ephemeralRunnerSet.Namespace, Namespace: ephemeralRunnerSet.Namespace,
Labels: labels,
Annotations: annotations,
}, },
Spec: ephemeralRunnerSet.Spec.EphemeralRunnerSpec, Spec: ephemeralRunnerSet.Spec.EphemeralRunnerSpec,
} }
@@ -406,7 +337,6 @@ func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
for k, v := range runner.Spec.PodTemplateSpec.Labels { for k, v := range runner.Spec.PodTemplateSpec.Labels {
labels[k] = v labels[k] = v
} }
labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue)
for k, v := range runner.ObjectMeta.Annotations { for k, v := range runner.ObjectMeta.Annotations {
annotations[k] = v annotations[k] = v
@@ -422,6 +352,8 @@ func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
runner.Status.RunnerJITConfig, runner.Status.RunnerJITConfig,
) )
labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue)
objectMeta := metav1.ObjectMeta{ objectMeta := metav1.ObjectMeta{
Name: runner.ObjectMeta.Name, Name: runner.ObjectMeta.Name,
Namespace: runner.ObjectMeta.Namespace, Namespace: runner.ObjectMeta.Namespace,
@@ -537,22 +469,3 @@ func rulesForListenerRole(resourceNames []string) []rbacv1.PolicyRule {
}, },
} }
} }
func applyGitHubURLLabels(url string, labels map[string]string) error {
githubConfig, err := actions.ParseGitHubConfigFromURL(url)
if err != nil {
return fmt.Errorf("failed to parse github config from url: %v", err)
}
if len(githubConfig.Enterprise) > 0 {
labels[LabelKeyGitHubEnterprise] = githubConfig.Enterprise
}
if len(githubConfig.Organization) > 0 {
labels[LabelKeyGitHubOrganization] = githubConfig.Organization
}
if len(githubConfig.Repository) > 0 {
labels[LabelKeyGitHubRepository] = githubConfig.Repository
}
return nil
}

View File

@@ -1,93 +0,0 @@
package actionsgithubcom
import (
"context"
"testing"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestLabelPropagation(t *testing.T) {
autoscalingRunnerSet := v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-scale-set",
Namespace: "test-ns",
Labels: map[string]string{
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesVersion: "0.2.0",
},
Annotations: map[string]string{
runnerScaleSetIdAnnotationKey: "1",
AnnotationKeyGitHubRunnerGroupName: "test-group",
},
},
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/org/repo",
},
}
var b resourceBuilder
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(&autoscalingRunnerSet)
require.NoError(t, err)
assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf])
assert.Equal(t, "runner-set", ephemeralRunnerSet.Labels[LabelKeyKubernetesComponent])
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], ephemeralRunnerSet.Labels[LabelKeyKubernetesVersion])
assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash])
assert.Equal(t, autoscalingRunnerSet.Name, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName])
assert.Equal(t, autoscalingRunnerSet.Namespace, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace])
assert.Equal(t, "", ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise])
assert.Equal(t, "org", ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization])
assert.Equal(t, "repo", ephemeralRunnerSet.Labels[LabelKeyGitHubRepository])
assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName])
listener, err := b.newAutoScalingListener(&autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil)
require.NoError(t, err)
assert.Equal(t, labelValueKubernetesPartOf, listener.Labels[LabelKeyKubernetesPartOf])
assert.Equal(t, "runner-scale-set-listener", listener.Labels[LabelKeyKubernetesComponent])
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], listener.Labels[LabelKeyKubernetesVersion])
assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash])
assert.Equal(t, autoscalingRunnerSet.Name, listener.Labels[LabelKeyGitHubScaleSetName])
assert.Equal(t, autoscalingRunnerSet.Namespace, listener.Labels[LabelKeyGitHubScaleSetNamespace])
assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise])
assert.Equal(t, "org", listener.Labels[LabelKeyGitHubOrganization])
assert.Equal(t, "repo", listener.Labels[LabelKeyGitHubRepository])
listenerServiceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
}
listenerSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
}
listenerPod := b.newScaleSetListenerPod(listener, listenerServiceAccount, listenerSecret)
assert.Equal(t, listenerPod.Labels, listener.Labels)
ephemeralRunner := b.newEphemeralRunner(ephemeralRunnerSet)
require.NoError(t, err)
for _, key := range commonLabelKeys {
if key == LabelKeyKubernetesComponent {
continue
}
assert.Equal(t, ephemeralRunnerSet.Labels[key], ephemeralRunner.Labels[key])
}
assert.Equal(t, "runner", ephemeralRunner.Labels[LabelKeyKubernetesComponent])
assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunner.Annotations[AnnotationKeyGitHubRunnerGroupName])
runnerSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
}
pod := b.newEphemeralRunnerPod(context.TODO(), ephemeralRunner, runnerSecret)
for key := range ephemeralRunner.Labels {
assert.Equal(t, ephemeralRunner.Labels[key], pod.Labels[key])
}
}

View File

@@ -115,7 +115,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
}() }()
// respond ok to GET / e.g. for health check // respond ok to GET / e.g. for health check
if strings.ToUpper(r.Method) == http.MethodGet { if r.Method == http.MethodGet {
ok = true ok = true
fmt.Fprintln(w, "webhook server is running") fmt.Fprintln(w, "webhook server is running")
return return
@@ -210,23 +210,13 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
if e.GetAction() == "queued" { if e.GetAction() == "queued" {
target.Amount = 1 target.Amount = 1
break break
} else if e.GetAction() == "completed" && e.GetWorkflowJob().GetConclusion() != "skipped" { } else if e.GetAction() == "completed" && e.GetWorkflowJob().GetConclusion() != "skipped" && e.GetWorkflowJob().GetRunnerID() > 0 {
// We want to filter out "completed" events sent by check runs. // A negative amount is processed in the tryScale func as a scale-down request,
// See https://github.com/actions/actions-runner-controller/issues/2118 // that erases the oldest CapacityReservation with the same amount.
// and https://github.com/actions/actions-runner-controller/pull/2119 // If the first CapacityReservation was with Replicas=1, this negative scale target erases that,
// But canceled events have runner_id == 0 and GetRunnerID() returns 0 when RunnerID == nil, // so that the resulting desired replicas decreases by 1.
// so we need to be more specific in filtering out the check runs. target.Amount = -1
// See example check run completion at https://gist.github.com/nathanklick/268fea6496a4d7b14cecb2999747ef84 break
if e.GetWorkflowJob().GetConclusion() == "success" && e.GetWorkflowJob().RunnerID == nil {
log.V(1).Info("Ignoring workflow_job event because it does not relate to a self-hosted runner")
} else {
// A negative amount is processed in the tryScale func as a scale-down request,
// that erases the oldest CapacityReservation with the same amount.
// If the first CapacityReservation was with Replicas=1, this negative scale target erases that,
// so that the resulting desired replicas decreases by 1.
target.Amount = -1
break
}
} }
// If the conclusion is "skipped", we will ignore it and fallthrough to the default case. // If the conclusion is "skipped", we will ignore it and fallthrough to the default case.
fallthrough fallthrough

View File

@@ -105,14 +105,12 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
Log: logf.Log, Log: logf.Log,
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"), Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
GitHubClient: multiClient, GitHubClient: multiClient,
RunnerImage: "example/runner:test",
DockerImage: "example/docker:test",
Name: controllerName("runner"), Name: controllerName("runner"),
RegistrationRecheckInterval: time.Millisecond * 100, RegistrationRecheckInterval: time.Millisecond * 100,
RegistrationRecheckJitter: time.Millisecond * 10, RegistrationRecheckJitter: time.Millisecond * 10,
UnregistrationRetryDelay: 1 * time.Second, UnregistrationRetryDelay: 1 * time.Second,
RunnerPodDefaults: RunnerPodDefaults{
RunnerImage: "example/runner:test",
DockerImage: "example/docker:test",
},
} }
err = runnerController.SetupWithManager(mgr) err = runnerController.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller") Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller")

View File

@@ -285,20 +285,16 @@ func secretDataToGitHubClientConfig(data map[string][]byte) (*github.Config, err
appID := string(data["github_app_id"]) appID := string(data["github_app_id"])
if appID != "" { conf.AppID, err = strconv.ParseInt(appID, 10, 64)
conf.AppID, err = strconv.ParseInt(appID, 10, 64) if err != nil {
if err != nil { return nil, err
return nil, err
}
} }
instID := string(data["github_app_installation_id"]) instID := string(data["github_app_installation_id"])
if instID != "" { conf.AppInstallationID, err = strconv.ParseInt(instID, 10, 64)
conf.AppInstallationID, err = strconv.ParseInt(instID, 10, 64) if err != nil {
if err != nil { return nil, err
return nil, err
}
} }
conf.AppPrivateKey = string(data["github_app_private_key"]) conf.AppPrivateKey = string(data["github_app_private_key"])

Some files were not shown because too many files have changed in this diff Show More