Compare commits

..

4 Commits

Author SHA1 Message Date
Francesco Renzi
a5acb44ae0 Convert two more tests 2023-05-15 12:54:02 +01:00
Francesco Renzi
8243fa121c use TestMain 2023-03-23 15:58:50 +00:00
Francesco Renzi
93b6e7fa31 One more test, in parallel 2023-03-22 12:57:57 +00:00
Francesco Renzi
065655be7e Experimenting with no ginkgo/gomega 2023-03-22 11:54:15 +00:00
83 changed files with 4807 additions and 6456 deletions

View File

@@ -1,160 +0,0 @@
name: 'Execute and Assert ARC E2E Test Action'
description: 'Queue E2E test workflow and assert workflow run result to be succeed'
inputs:
auth-token:
description: 'GitHub access token to queue workflow run'
required: true
repo-owner:
description: "The repository owner name that has the test workflow file, ex: actions"
required: true
repo-name:
description: "The repository name that has the test workflow file, ex: test"
required: true
workflow-file:
description: 'The file name of the workflow yaml, ex: test.yml'
required: true
arc-name:
description: 'The name of the configured gha-runner-scale-set'
required: true
arc-namespace:
description: 'The namespace of the configured gha-runner-scale-set'
required: true
arc-controller-namespace:
description: 'The namespace of the configured gha-runner-scale-set-controller'
required: true
runs:
using: "composite"
steps:
- name: Queue test workflow
shell: bash
id: queue_workflow
run: |
queue_time=`date +%FT%TZ`
echo "queue_time=$queue_time" >> $GITHUB_OUTPUT
curl -X POST https://api.github.com/repos/${{inputs.repo-owner}}/${{inputs.repo-name}}/actions/workflows/${{inputs.workflow-file}}/dispatches \
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token ${{inputs.auth-token}}" \
-d '{"ref": "main", "inputs": { "arc_name": "${{inputs.arc-name}}" } }'
- name: Fetch workflow run & job ids
uses: actions/github-script@v6
id: query_workflow
with:
script: |
// Try to find the workflow run triggered by the previous step using the workflow_dispatch event.
// - Find recently create workflow runs in the test repository
// - For each workflow run, list its workflow job and see if the job's labels contain `inputs.arc-name`
// - Since the inputs.arc-name should be unique per e2e workflow run, once we find the job with the label, we find the workflow that we just triggered.
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms))
}
const owner = '${{inputs.repo-owner}}'
const repo = '${{inputs.repo-name}}'
const workflow_id = '${{inputs.workflow-file}}'
let workflow_run_id = 0
let workflow_job_id = 0
let workflow_run_html_url = ""
let count = 0
while (count++<12) {
await sleep(10 * 1000);
let listRunResponse = await github.rest.actions.listWorkflowRuns({
owner: owner,
repo: repo,
workflow_id: workflow_id,
created: '>${{steps.queue_workflow.outputs.queue_time}}'
})
if (listRunResponse.data.total_count > 0) {
console.log(`Found some new workflow runs for ${workflow_id}`)
for (let i = 0; i<listRunResponse.data.total_count; i++) {
let workflowRun = listRunResponse.data.workflow_runs[i]
console.log(`Check if workflow run ${workflowRun.id} is triggered by us.`)
let listJobResponse = await github.rest.actions.listJobsForWorkflowRun({
owner: owner,
repo: repo,
run_id: workflowRun.id
})
console.log(`Workflow run ${workflowRun.id} has ${listJobResponse.data.total_count} jobs.`)
if (listJobResponse.data.total_count > 0) {
for (let j = 0; j<listJobResponse.data.total_count; j++) {
let workflowJob = listJobResponse.data.jobs[j]
console.log(`Check if workflow job ${workflowJob.id} is triggered by us.`)
console.log(JSON.stringify(workflowJob.labels));
if (workflowJob.labels.includes('${{inputs.arc-name}}')) {
console.log(`Workflow job ${workflowJob.id} (Run id: ${workflowJob.run_id}) is triggered by us.`)
workflow_run_id = workflowJob.run_id
workflow_job_id = workflowJob.id
workflow_run_html_url = workflowRun.html_url
break
}
}
}
if (workflow_job_id > 0) {
break;
}
}
}
if (workflow_job_id > 0) {
break;
}
}
if (workflow_job_id == 0) {
core.setFailed(`Can't find workflow run and workflow job triggered to 'runs-on ${{inputs.arc-name}}'`)
} else {
core.setOutput('workflow_run', workflow_run_id);
core.setOutput('workflow_job', workflow_job_id);
core.setOutput('workflow_run_url', workflow_run_html_url);
}
- name: Generate summary about the triggered workflow run
shell: bash
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Triggered workflow run** |
|:--------------------------:|
| ${{steps.query_workflow.outputs.workflow_run_url}} |
EOF
- name: Wait for workflow to finish successfully
uses: actions/github-script@v6
with:
script: |
// Wait 5 minutes and make sure the workflow run we triggered completed with result 'success'
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms))
}
const owner = '${{inputs.repo-owner}}'
const repo = '${{inputs.repo-name}}'
const workflow_run_id = ${{steps.query_workflow.outputs.workflow_run}}
const workflow_job_id = ${{steps.query_workflow.outputs.workflow_job}}
let count = 0
while (count++<10) {
await sleep(30 * 1000);
let getRunResponse = await github.rest.actions.getWorkflowRun({
owner: owner,
repo: repo,
run_id: workflow_run_id
})
console.log(`${getRunResponse.data.html_url}: ${getRunResponse.data.status} (${getRunResponse.data.conclusion})`);
if (getRunResponse.data.status == 'completed') {
if ( getRunResponse.data.conclusion == 'success') {
console.log(`Workflow run finished properly.`)
return
} else {
core.setFailed(`The triggered workflow run finish with result ${getRunResponse.data.conclusion}`)
return
}
}
}
core.setFailed(`The triggered workflow run didn't finish properly using ${{inputs.arc-name}}`)
- name: Gather logs and cleanup
shell: bash
if: always()
run: |
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-name}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
kubectl logs deployment/arc-gha-runner-scale-set-controller -n ${{inputs.arc-controller-namespace}}

View File

@@ -2,21 +2,21 @@ name: 'Setup ARC E2E Test Action'
description: 'Build controller image, create kind cluster, load the image, and exchange ARC configure token.'
inputs:
app-id:
github-app-id:
description: 'GitHub App Id for exchange access token'
required: true
app-pk:
github-app-pk:
description: "GitHub App private key for exchange access token"
required: true
image-name:
github-app-org:
description: 'The organization the GitHub App has installed on'
required: true
docker-image-name:
description: "Local docker image name for building"
required: true
image-tag:
docker-image-tag:
description: "Tag of ARC Docker image for building"
required: true
target-org:
description: "The test organization for ARC e2e test"
required: true
outputs:
token:
@@ -42,22 +42,23 @@ runs:
platforms: linux/amd64
load: true
build-args: |
DOCKER_IMAGE_NAME=${{inputs.image-name}}
VERSION=${{inputs.image-tag}}
DOCKER_IMAGE_NAME=${{inputs.docker-image-name}}
VERSION=${{inputs.docker-image-tag}}
tags: |
${{inputs.image-name}}:${{inputs.image-tag}}
no-cache: true
${{inputs.docker-image-name}}:${{inputs.docker-image-tag}}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Create minikube cluster and load image
shell: bash
run: |
minikube start
minikube image load ${{inputs.image-name}}:${{inputs.image-tag}}
minikube image load ${{inputs.docker-image-name}}:${{inputs.docker-image-tag}}
- name: Get configure token
id: config-token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
with:
application_id: ${{ inputs.app-id }}
application_private_key: ${{ inputs.app-pk }}
organization: ${{ inputs.target-org}}
application_id: ${{ inputs.github-app-id }}
application_private_key: ${{ inputs.github-app-pk }}
organization: ${{ inputs.github-app-org }}

View File

@@ -0,0 +1,16 @@
name: ARC Reusable Workflow
on:
workflow_dispatch:
inputs:
date_time:
description: 'Datetime for runner name uniqueness, format: %Y-%m-%d-%H-%M-%S-%3N, example: 2023-02-14-13-00-16-791'
required: true
jobs:
arc-runner-job:
strategy:
fail-fast: false
matrix:
job: [1, 2, 3]
runs-on: arc-runner-${{ inputs.date_time }}
steps:
- run: echo "Hello World!" >> $GITHUB_STEP_SUMMARY

View File

@@ -8,35 +8,52 @@ on:
branches:
- master
workflow_dispatch:
permissions:
contents: read
inputs:
target_org:
description: The org of the test repository.
required: true
default: actions-runner-controller
target_repo:
description: The repository to install the ARC.
required: true
default: arc_e2e_test_dummy
env:
TARGET_ORG: actions-runner-controller
TARGET_REPO: arc_e2e_test_dummy
IMAGE_NAME: "arc-test-image"
IMAGE_VERSION: "dev"
jobs:
default-setup:
runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
- name: Resolve inputs
id: resolved_inputs
run: |
TARGET_ORG="${{env.TARGET_ORG}}"
TARGET_REPO="${{env.TARGET_REPO}}"
if [ ! -z "${{inputs.target_org}}" ]; then
TARGET_ORG="${{inputs.target_org}}"
fi
if [ ! -z "${{inputs.target_repo}}" ]; then
TARGET_REPO="${{inputs.target_repo}}"
fi
echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT
echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}}
github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}}
github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}}
docker-image-name: ${{env.IMAGE_NAME}}
docker-image-tag: ${{env.IMAGE_VERSION}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
@@ -68,61 +85,88 @@ jobs:
- name: Install gha-runner-scale-set
id: install_arc
run: |
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
with:
auth-token: ${{ steps.setup.outputs.token }}
repo-owner: ${{ env.TARGET_ORG }}
repo-name: ${{env.TARGET_REPO}}
workflow-file: ${{env.WORKFLOW_FILE}}
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
- name: Test ARC scales pods up and down
id: test
run: |
export GITHUB_TOKEN="${{ steps.setup.outputs.token }}"
export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}"
export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}"
go test ./test_e2e_arc -v
- name: Uninstall gha-runner-scale-set
if: always() && steps.install_arc.outcome == 'success'
run: |
helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }}
- name: Dump gha-runner-scale-set-controller logs
if: always() && steps.install_arc_controller.outcome == 'success'
run: |
kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems
- name: Job summary
if: always() && steps.install_arc.outcome == 'success'
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Outcome** | ${{ steps.test.outcome }} |
|----------------|--------------------------------------------- |
| **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) |
EOF
single-namespace-setup:
runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
- name: Resolve inputs
id: resolved_inputs
run: |
TARGET_ORG="${{env.TARGET_ORG}}"
TARGET_REPO="${{env.TARGET_REPO}}"
if [ ! -z "${{inputs.target_org}}" ]; then
TARGET_ORG="${{inputs.target_org}}"
fi
if [ ! -z "${{inputs.target_repo}}" ]; then
TARGET_REPO="${{inputs.target_repo}}"
fi
echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT
echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}}
github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}}
github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}}
docker-image-name: ${{env.IMAGE_NAME}}
docker-image-tag: ${{env.IMAGE_VERSION}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
@@ -156,61 +200,88 @@ jobs:
- name: Install gha-runner-scale-set
id: install_arc
run: |
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
with:
auth-token: ${{ steps.setup.outputs.token }}
repo-owner: ${{ env.TARGET_ORG }}
repo-name: ${{env.TARGET_REPO}}
workflow-file: ${{env.WORKFLOW_FILE}}
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
- name: Test ARC scales pods up and down
id: test
run: |
export GITHUB_TOKEN="${{ steps.setup.outputs.token }}"
export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}"
export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}"
go test ./test_e2e_arc -v
- name: Uninstall gha-runner-scale-set
if: always() && steps.install_arc.outcome == 'success'
run: |
helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }}
- name: Dump gha-runner-scale-set-controller logs
if: always() && steps.install_arc_controller.outcome == 'success'
run: |
kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems
- name: Job summary
if: always() && steps.install_arc.outcome == 'success'
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Outcome** | ${{ steps.test.outcome }} |
|----------------|--------------------------------------------- |
| **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) |
EOF
dind-mode-setup:
runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: arc-test-dind-workflow.yaml
steps:
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
- name: Resolve inputs
id: resolved_inputs
run: |
TARGET_ORG="${{env.TARGET_ORG}}"
TARGET_REPO="${{env.TARGET_REPO}}"
if [ ! -z "${{inputs.target_org}}" ]; then
TARGET_ORG="${{inputs.target_org}}"
fi
if [ ! -z "${{inputs.target_repo}}" ]; then
TARGET_REPO="${{inputs.target_repo}}"
fi
echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT
echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}}
github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}}
github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}}
docker-image-name: ${{env.IMAGE_NAME}}
docker-image-tag: ${{env.IMAGE_VERSION}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
@@ -242,11 +313,11 @@ jobs:
- name: Install gha-runner-scale-set
id: install_arc
run: |
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set containerMode.type="dind" \
./charts/gha-runner-scale-set \
@@ -254,59 +325,81 @@ jobs:
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
with:
auth-token: ${{ steps.setup.outputs.token }}
repo-owner: ${{ env.TARGET_ORG }}
repo-name: ${{env.TARGET_REPO}}
workflow-file: ${{env.WORKFLOW_FILE}}
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
- name: Test ARC scales pods up and down
id: test
run: |
export GITHUB_TOKEN="${{ steps.setup.outputs.token }}"
export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}"
export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}"
go test ./test_e2e_arc -v
- name: Uninstall gha-runner-scale-set
if: always() && steps.install_arc.outcome == 'success'
run: |
helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }}
- name: Dump gha-runner-scale-set-controller logs
if: always() && steps.install_arc_controller.outcome == 'success'
run: |
kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems
- name: Job summary
if: always() && steps.install_arc.outcome == 'success'
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Outcome** | ${{ steps.test.outcome }} |
|----------------|--------------------------------------------- |
| **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) |
EOF
kubernetes-mode-setup:
runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
steps:
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
- name: Resolve inputs
id: resolved_inputs
run: |
TARGET_ORG="${{env.TARGET_ORG}}"
TARGET_REPO="${{env.TARGET_REPO}}"
if [ ! -z "${{inputs.target_org}}" ]; then
TARGET_ORG="${{inputs.target_org}}"
fi
if [ ! -z "${{inputs.target_repo}}" ]; then
TARGET_REPO="${{inputs.target_repo}}"
fi
echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT
echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}}
github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}}
github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}}
docker-image-name: ${{env.IMAGE_NAME}}
docker-image-tag: ${{env.IMAGE_VERSION}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
run: |
echo "Install openebs/dynamic-localpv-provisioner"
helm repo add openebs https://openebs.github.io/charts
helm repo update
helm install openebs openebs/openebs -n openebs --create-namespace
helm install arc \
--namespace "arc-systems" \
--create-namespace \
@@ -330,16 +423,20 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
kubectl wait --timeout=30s --for=condition=ready pod -n openebs -l name=openebs-localpv-provisioner
- name: Install gha-runner-scale-set
id: install_arc
run: |
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
echo "Install openebs/dynamic-localpv-provisioner"
helm repo add openebs https://openebs.github.io/charts
helm repo update
helm install openebs openebs/openebs -n openebs --create-namespace
ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set containerMode.type="kubernetes" \
--set containerMode.kubernetesModeWorkVolumeClaim.accessModes={"ReadWriteOnce"} \
@@ -350,50 +447,77 @@ jobs:
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
with:
auth-token: ${{ steps.setup.outputs.token }}
repo-owner: ${{ env.TARGET_ORG }}
repo-name: ${{env.TARGET_REPO}}
workflow-file: ${{env.WORKFLOW_FILE}}
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
- name: Test ARC scales pods up and down
id: test
run: |
export GITHUB_TOKEN="${{ steps.setup.outputs.token }}"
export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}"
export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}"
go test ./test_e2e_arc -v
- name: Uninstall gha-runner-scale-set
if: always() && steps.install_arc.outcome == 'success'
run: |
helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }}
- name: Dump gha-runner-scale-set-controller logs
if: always() && steps.install_arc_controller.outcome == 'success'
run: |
kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems
- name: Job summary
if: always() && steps.install_arc.outcome == 'success'
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Outcome** | ${{ steps.test.outcome }} |
|----------------|--------------------------------------------- |
| **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) |
EOF
auth-proxy-setup:
runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
- name: Resolve inputs
id: resolved_inputs
run: |
TARGET_ORG="${{env.TARGET_ORG}}"
TARGET_REPO="${{env.TARGET_REPO}}"
if [ ! -z "${{inputs.target_org}}" ]; then
TARGET_ORG="${{inputs.target_org}}"
fi
if [ ! -z "${{inputs.target_repo}}" ]; then
TARGET_REPO="${{inputs.target_repo}}"
fi
echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT
echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}}
github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}}
github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}}
docker-image-name: ${{env.IMAGE_NAME}}
docker-image-tag: ${{env.IMAGE_VERSION}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
@@ -434,11 +558,11 @@ jobs:
--namespace=arc-runners \
--from-literal=username=github \
--from-literal=password='actions'
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set proxy.https.url="http://host.minikube.internal:3128" \
--set proxy.https.credentialSecretRef="proxy-auth" \
@@ -448,50 +572,77 @@ jobs:
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
with:
auth-token: ${{ steps.setup.outputs.token }}
repo-owner: ${{ env.TARGET_ORG }}
repo-name: ${{env.TARGET_REPO}}
workflow-file: ${{env.WORKFLOW_FILE}}
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
- name: Test ARC scales pods up and down
id: test
run: |
export GITHUB_TOKEN="${{ steps.setup.outputs.token }}"
export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}"
export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}"
go test ./test_e2e_arc -v
- name: Uninstall gha-runner-scale-set
if: always() && steps.install_arc.outcome == 'success'
run: |
helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }}
- name: Dump gha-runner-scale-set-controller logs
if: always() && steps.install_arc_controller.outcome == 'success'
run: |
kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems
- name: Job summary
if: always() && steps.install_arc.outcome == 'success'
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Outcome** | ${{ steps.test.outcome }} |
|----------------|--------------------------------------------- |
| **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) |
EOF
anonymous-proxy-setup:
runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
- name: Resolve inputs
id: resolved_inputs
run: |
TARGET_ORG="${{env.TARGET_ORG}}"
TARGET_REPO="${{env.TARGET_REPO}}"
if [ ! -z "${{inputs.target_org}}" ]; then
TARGET_ORG="${{inputs.target_org}}"
fi
if [ ! -z "${{inputs.target_repo}}" ]; then
TARGET_REPO="${{inputs.target_repo}}"
fi
echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT
echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}}
github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}}
github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}}
docker-image-name: ${{env.IMAGE_NAME}}
docker-image-tag: ${{env.IMAGE_VERSION}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
@@ -527,11 +678,11 @@ jobs:
--name squid \
--publish 3128:3128 \
ubuntu/squid:latest
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set proxy.https.url="http://host.minikube.internal:3128" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
@@ -540,144 +691,44 @@ jobs:
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
with:
auth-token: ${{ steps.setup.outputs.token }}
repo-owner: ${{ env.TARGET_ORG }}
repo-name: ${{env.TARGET_REPO}}
workflow-file: ${{env.WORKFLOW_FILE}}
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
self-signed-ca-setup:
runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
- name: Test ARC scales pods up and down
id: test
run: |
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository=${{ env.IMAGE_NAME }} \
--set image.tag=${{ env.IMAGE_VERSION }} \
./charts/gha-runner-scale-set-controller \
--debug
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
export GITHUB_TOKEN="${{ steps.setup.outputs.token }}"
export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}"
export WORKFLOW_FILE="${{ env.WORKFLOW_FILE }}"
go test ./test_e2e_arc -v
- name: Install gha-runner-scale-set
id: install_arc
- name: Uninstall gha-runner-scale-set
if: always() && steps.install_arc.outcome == 'success'
run: |
docker run -d \
--rm \
--name mitmproxy \
--publish 8080:8080 \
-v ${{ github.workspace }}/mitmproxy:/home/mitmproxy/.mitmproxy \
mitmproxy/mitmproxy:latest \
mitmdump
count=0
while true; do
if [ -f "${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem" ]; then
echo "CA cert generated"
cat ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for mitmproxy generate its CA cert"
exit 1
fi
sleep 1
done
sudo cp ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
sudo chown runner ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
kubectl create namespace arc-runners
kubectl -n arc-runners create configmap ca-cert --from-file="${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt"
kubectl -n arc-runners get configmap ca-cert -o yaml
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set proxy.https.url="http://host.minikube.internal:8080" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
--set "githubServerTLS.certificateFrom.configMapKeyRef.name=ca-cert" \
--set "githubServerTLS.certificateFrom.configMapKeyRef.key=mitmproxy-ca-cert.crt" \
--set "githubServerTLS.runnerMountPath=/usr/local/share/ca-certificates/" \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 10 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1
fi
sleep 1
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }}
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
with:
auth-token: ${{ steps.setup.outputs.token }}
repo-owner: ${{ env.TARGET_ORG }}
repo-name: ${{env.TARGET_REPO}}
workflow-file: ${{env.WORKFLOW_FILE}}
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
- name: Dump gha-runner-scale-set-controller logs
if: always() && steps.install_arc_controller.outcome == 'success'
run: |
kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems
- name: Job summary
if: always() && steps.install_arc.outcome == 'success'
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Outcome** | ${{ steps.test.outcome }} |
|----------------|--------------------------------------------- |
| **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) |
EOF

View File

@@ -1,80 +0,0 @@
name: Go
on:
push:
branches:
- master
paths:
- '.github/workflows/go.yaml'
- '**.go'
- 'go.mod'
- 'go.sum'
pull_request:
paths:
- '.github/workflows/go.yaml'
- '**.go'
- 'go.mod'
- 'go.sum'
permissions:
contents: read
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
cache: false
- name: fmt
run: go fmt ./...
- name: Check diff
run: git diff --exit-code
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
cache: false
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
with:
only-new-issues: true
version: v1.51.1
generate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
cache: false
- name: Generate
run: make generate
- name: Check diff
run: git diff --exit-code
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
- run: make manifests
- name: Check diff
run: git diff --exit-code
- name: Install kubebuilder
run: |
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz
tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz
sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder
- name: Run go tests
run: |
go test -short `go list ./... | grep -v ./test_e2e_arc`

23
.github/workflows/golangci-lint.yaml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: golangci-lint
on:
push:
branches:
- master
pull_request:
permissions:
contents: read
pull-requests: read
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v3
with:
go-version: 1.19
- uses: actions/checkout@v3
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
with:
only-new-issues: true
version: v1.51.1

View File

@@ -20,7 +20,7 @@ env:
HELM_VERSION: v3.8.0
permissions:
contents: write
contents: read
jobs:
lint-chart:
@@ -173,28 +173,10 @@ jobs:
--pages-branch 'gh-pages' \
--pages-index-path 'index.yaml'
# This step is required to not throw away changes made to the index.yaml on every new chart release.
#
# We update the index.yaml in the actions-runner-controller.github.io repo
# by appending the new chart version to the index.yaml saved in actions-runner-controller repo
# and copying and commiting the updated index.yaml to the github.io one.
# See below for more context:
# - https://github.com/actions-runner-controller/actions-runner-controller.github.io/pull/2
# - https://github.com/actions/actions-runner-controller/pull/2452
- name: Commit and push to actions/actions-runner-controller
run: |
git checkout gh-pages
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
git add .
git commit -m "Update index.yaml"
git push
working-directory: ${{ github.workspace }}
# Chart Release was never intended to publish to a different repo
# this workaround is intended to move the index.yaml to the target repo
# where the github pages are hosted
- name: Checkout target repository
- name: Checkout pages repository
uses: actions/checkout@v3
with:
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
@@ -206,7 +188,7 @@ jobs:
run: |
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
- name: Commit and push to target repository
- name: Commit and push
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"

60
.github/workflows/validate-arc.yaml vendored Normal file
View File

@@ -0,0 +1,60 @@
name: Validate ARC
on:
pull_request:
branches:
- master
paths-ignore:
- '**.md'
- '.github/ISSUE_TEMPLATE/**'
- '.github/workflows/publish-canary.yaml'
- '.github/workflows/validate-chart.yaml'
- '.github/workflows/publish-chart.yaml'
- '.github/workflows/runners.yaml'
- '.github/workflows/publish-arc.yaml'
- '.github/workflows/validate-entrypoint.yaml'
- '.github/renovate.*'
- 'runner/**'
- '.gitignore'
- 'PROJECT'
- 'LICENSE'
- 'Makefile'
permissions:
contents: read
jobs:
test-controller:
name: Test ARC
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set-up Go
uses: actions/setup-go@v3
with:
go-version: '1.19'
check-latest: false
- uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Install kubebuilder
run: |
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz
tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz
sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder
- name: Run tests
run: |
make test
- name: Verify manifests are up-to-date
run: |
make manifests
git diff --exit-code

View File

@@ -71,7 +71,7 @@ jobs:
git clone https://github.com/helm/chart-testing
cd chart-testing
unset CT_CONFIG_DIR
goreleaser build --clean --skip-validate
goreleaser build --clean --skip-validate
./dist/chart-testing_linux_amd64_v1/ct version
echo 'Adding ct directory to PATH...'
echo "$RUNNER_TEMP/chart-testing/dist/chart-testing_linux_amd64_v1" >> "$GITHUB_PATH"
@@ -107,7 +107,7 @@ jobs:
load: true
build-args: |
DOCKER_IMAGE_NAME=test-arc
VERSION=dev
VERSION=dev
tags: |
test-arc:dev
cache-from: type=gha

View File

@@ -61,9 +61,6 @@ if [ "${tool}" == "helm" ]; then
flags+=( --set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
flags+=( --set actionsMetricsServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
fi
if [ "${WATCH_NAMESPACE}" != "" ]; then
flags+=( --set watchNamespace=${WATCH_NAMESPACE} --set singleNamespace=true)
fi
if [ "${CHART_VERSION}" != "" ]; then
flags+=( --version ${CHART_VERSION})
fi
@@ -72,9 +69,6 @@ if [ "${tool}" == "helm" ]; then
flags+=( --set githubWebhookServer.logFormat=${LOG_FORMAT})
flags+=( --set actionsMetricsServer.logFormat=${LOG_FORMAT})
fi
if [ "${ADMISSION_WEBHOOKS_TIMEOUT}" != "" ]; then
flags+=( --set admissionWebHooks.timeoutSeconds=${ADMISSION_WEBHOOKS_TIMEOUT})
fi
if [ -n "${CREATE_SECRETS_USING_HELM}" ]; then
if [ -z "${WEBHOOK_GITHUB_TOKEN}" ]; then
echo 'Failed deploying secret "actions-metrics-server" using helm. Set WEBHOOK_GITHUB_TOKEN to deploy.' 1>&2
@@ -83,10 +77,6 @@ if [ "${tool}" == "helm" ]; then
flags+=( --set actionsMetricsServer.secret.create=true)
flags+=( --set actionsMetricsServer.secret.github_token=${WEBHOOK_GITHUB_TOKEN})
fi
if [ -n "${GITHUB_WEBHOOK_SERVER_ENV_NAME}" ] && [ -n "${GITHUB_WEBHOOK_SERVER_ENV_VALUE}" ]; then
flags+=( --set githubWebhookServer.env[0].name=${GITHUB_WEBHOOK_SERVER_ENV_NAME})
flags+=( --set githubWebhookServer.env[0].value=${GITHUB_WEBHOOK_SERVER_ENV_VALUE})
fi
set -vx

View File

@@ -2,7 +2,7 @@
**Date**: 2023-02-02
**Status**: Done
**Status**: Proposed
## Context

View File

@@ -2,7 +2,7 @@
**Date**: 2023-02-10
**Status**: Done
**Status**: Pending
## Context

View File

@@ -86,4 +86,4 @@ Or for example if they're having problems specifically with runners:
This way users don't have to understand ARC moving parts but we still have a
way to target them specifically if we need to.
[^1]: Supersedes [ADR 2022-12-05](2022-12-05-adding-labels-k8s-resources.md)
[^1]: [ADR 2022-12-05](2022-12-05-adding-labels-k8s-resources.md)

View File

@@ -52,9 +52,6 @@ type AutoscalingListenerSpec struct {
// Required
Image string `json:"image,omitempty"`
// Required
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
// Required
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`

View File

@@ -77,11 +77,6 @@ type RunnerDeploymentStatus struct {
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=rdeploy
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.enterprise",name=Enterprise,type=string
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.organization",name=Organization,type=string
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.repository",name=Repository,type=string
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.group",name=Group,type=string
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.labels",name=Labels,type=string
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
// +kubebuilder:printcolumn:JSONPath=".status.updatedReplicas",name=Up-To-Date,type=number

View File

@@ -15,10 +15,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.23.1
version: 0.22.0
# Used as the default manager tag value when no tag property is provided in the values.yaml
appVersion: 0.27.2
appVersion: 0.27.0
home: https://github.com/actions/actions-runner-controller

View File

@@ -17,21 +17,6 @@ spec:
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.template.spec.enterprise
name: Enterprise
type: string
- jsonPath: .spec.template.spec.organization
name: Organization
type: string
- jsonPath: .spec.template.spec.repository
name: Repository
type: string
- jsonPath: .spec.template.spec.group
name: Group
type: string
- jsonPath: .spec.template.spec.labels
name: Labels
type: string
- jsonPath: .spec.replicas
name: Desired
type: number

View File

@@ -117,14 +117,10 @@ spec:
name: {{ include "actions-runner-controller.secretName" . }}
optional: true
{{- end }}
{{- if kindIs "slice" .Values.githubWebhookServer.env }}
{{- toYaml .Values.githubWebhookServer.env | nindent 8 }}
{{- else }}
{{- range $key, $val := .Values.githubWebhookServer.env }}
- name: {{ $key }}
value: {{ $val | quote }}
{{- end }}
{{- end }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
name: github-webhook-server
imagePullPolicy: {{ .Values.image.pullPolicy }}

View File

@@ -250,6 +250,14 @@ rules:
- patch
- update
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
{{- if .Values.runner.statusUpdateHook.enabled }}
- apiGroups:
- ""
@@ -303,4 +311,11 @@ rules:
- list
- create
- delete
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
{{- end }}

View File

@@ -1,21 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
{{- if .Values.scope.singleNamespace }}
kind: RoleBinding
{{- else }}
kind: ClusterRoleBinding
{{- end }}
metadata:
name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
{{- if .Values.scope.singleNamespace }}
kind: Role
{{- else }}
kind: ClusterRole
{{- end }}
name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets
subjects:
- kind: ServiceAccount
name: {{ include "actions-runner-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}

View File

@@ -1,24 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
{{- if .Values.scope.singleNamespace }}
kind: Role
{{- else }}
kind: ClusterRole
{{- end }}
metadata:
creationTimestamp: null
name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
{{- if .Values.rbac.allowGrantingKubernetesContainerModePermissions }}
{{/* These permissions are required by ARC to create RBAC resources for the runner pod to use the kubernetes container mode. */}}
{{/* See https://github.com/actions/actions-runner-controller/pull/1268/files#r917331632 */}}
- create
- delete
{{- end }}

View File

@@ -44,7 +44,6 @@ webhooks:
resources:
- runners
sideEffects: None
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
- admissionReviewVersions:
- v1beta1
{{- if .Values.scope.singleNamespace }}
@@ -75,7 +74,6 @@ webhooks:
resources:
- runnerdeployments
sideEffects: None
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
- admissionReviewVersions:
- v1beta1
{{- if .Values.scope.singleNamespace }}
@@ -106,7 +104,6 @@ webhooks:
resources:
- runnerreplicasets
sideEffects: None
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
- admissionReviewVersions:
- v1beta1
{{- if .Values.scope.singleNamespace }}
@@ -139,7 +136,6 @@ webhooks:
objectSelector:
matchLabels:
"actions-runner-controller/inject-registration-token": "true"
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
@@ -181,7 +177,6 @@ webhooks:
resources:
- runners
sideEffects: None
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
- admissionReviewVersions:
- v1beta1
{{- if .Values.scope.singleNamespace }}
@@ -212,7 +207,6 @@ webhooks:
resources:
- runnerdeployments
sideEffects: None
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
- admissionReviewVersions:
- v1beta1
{{- if .Values.scope.singleNamespace }}
@@ -244,7 +238,6 @@ webhooks:
- runnerreplicasets
sideEffects: None
{{ if not (or (hasKey .Values.admissionWebHooks "caBundle") .Values.certManagerEnabled) }}
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
---
apiVersion: v1
kind: Secret

View File

@@ -279,19 +279,6 @@ githubWebhookServer:
# queueLimit: 100
terminationGracePeriodSeconds: 10
lifecycle: {}
# specify additional environment variables for the webhook server pod.
# It's possible to specify either key vale pairs e.g.:
# my_env_var: "some value"
# my_other_env_var: "other value"
# or a list of complete environment variable definitions e.g.:
# - name: GITHUB_WEBHOOK_SECRET_TOKEN
# valueFrom:
# secretKeyRef:
# key: GITHUB_WEBHOOK_SECRET_TOKEN
# name: prod-gha-controller-webhook-token
# optional: true
env: {}
actionsMetrics:
serviceAnnotations: {}

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.4.0
version: 0.3.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.4.0"
appVersion: "0.3.0"
home: https://github.com/actions/actions-runner-controller

View File

@@ -80,9 +80,6 @@ spec:
image:
description: Required
type: string
imagePullPolicy:
description: Required
type: string
imagePullSecrets:
description: Required
items:

View File

@@ -68,11 +68,14 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY
value: "{{ .Values.image.pullPolicy | default "IfNotPresent" }}"
{{- with .Values.env }}
{{- if kindIs "slice" . }}
{{- toYaml . | nindent 8 }}
{{- if kindIs "slice" .Values.env }}
{{- toYaml .Values.env | nindent 8 }}
{{- else }}
{{- range $key, $val := .Values.env }}
- name: {{ $key }}
value: {{ $val | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- with .Values.resources }}

View File

@@ -133,5 +133,4 @@ rules:
verbs:
- list
- watch
- patch
{{- end }}

View File

@@ -81,4 +81,4 @@ rules:
verbs:
- list
- watch
{{- end }}
{{- end }}

View File

@@ -114,5 +114,4 @@ rules:
verbs:
- list
- watch
- patch
{{- end }}
{{- end }}

View File

@@ -349,16 +349,13 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
@@ -393,8 +390,6 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
"imagePullSecrets[0].name": "dockerhub",
"nameOverride": "gha-runner-scale-set-controller-override",
"fullnameOverride": "gha-runner-scale-set-controller-fullname-override",
"env[0].name": "ENV_VAR_NAME_1",
"env[0].value": "ENV_VAR_VALUE_1",
"serviceAccount.name": "gha-runner-scale-set-controller-sa",
"podAnnotations.foo": "bar",
"podSecurityContext.fsGroup": "1000",
@@ -437,9 +432,6 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"])
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1)
assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name)
assert.Equal(t, "gha-runner-scale-set-controller-sa", deployment.Spec.Template.Spec.ServiceAccountName)
@@ -475,16 +467,10 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1])
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 4)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
assert.Equal(t, "Always", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
@@ -704,16 +690,13 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
assert.Equal(t, "--watch-single-namespace=demo", deployment.Spec.Template.Spec.Containers[0].Args[2])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
@@ -721,52 +704,6 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
}
func TestTemplate_ControllerContainerEnvironmentVariables(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"env[0].Name": "ENV_VAR_NAME_1",
"env[0].Value": "ENV_VAR_VALUE_1",
"env[1].Name": "ENV_VAR_NAME_2",
"env[1].ValueFrom.SecretKeyRef.Key": "ENV_VAR_NAME_2",
"env[1].ValueFrom.SecretKeyRef.Name": "secret-name",
"env[1].ValueFrom.SecretKeyRef.Optional": "true",
"env[2].Name": "ENV_VAR_NAME_3",
"env[2].Value": "",
"env[3].Name": "ENV_VAR_NAME_4",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(t, output, &deployment)
assert.Equal(t, namespaceName, deployment.Namespace)
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 7)
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].Name)
assert.Equal(t, "secret-name", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Name)
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Key)
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Optional)
assert.Equal(t, "ENV_VAR_NAME_3", deployment.Spec.Template.Spec.Containers[0].Env[5].Name)
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[5].Value)
assert.Equal(t, "ENV_VAR_NAME_4", deployment.Spec.Template.Spec.Containers[0].Env[6].Name)
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[6].ValueFrom)
}
func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T) {
t.Parallel()

View File

@@ -18,17 +18,6 @@ imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
env:
## Define environment variables for the controller pod
# - name: "ENV_VAR_NAME_1"
# value: "ENV_VAR_VALUE_1"
# - name: "ENV_VAR_NAME_2"
# valueFrom:
# secretKeyRef:
# key: ENV_VAR_NAME_2
# name: secret-name
# optional: true
serviceAccount:
# Specifies whether a service account should be created for running the controller pod
create: true
@@ -42,27 +31,27 @@ serviceAccount:
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
resources: {}
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
@@ -80,6 +69,6 @@ flags:
# Defaults to "debug".
logLevel: "debug"
## Restricts the controller to only watch resources in the desired namespace.
## Defaults to watch all namespaces when unset.
# watchSingleNamespace: ""
# Restricts the controller to only watch resources in the desired namespace.
# Defaults to watch all namespaces when unset.
# watchSingleNamespace: ""

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.4.0
version: 0.3.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.4.0"
appVersion: "0.3.0"
home: https://github.com/actions/dev-arc

View File

@@ -11,9 +11,17 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
If release name contains chart name it will be used as a full name.
*/}}
{{- define "gha-runner-scale-set.fullname" -}}
{{- $name := default .Chart.Name }}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
@@ -32,9 +40,6 @@ helm.sh/chart: {{ include "gha-runner-scale-set.chart" . }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: gha-runner-scale-set
actions.github.com/scale-set-name: {{ .Release.Name }}
actions.github.com/scale-set-namespace: {{ .Release.Namespace }}
{{- end }}
{{/*
@@ -65,10 +70,6 @@ app.kubernetes.io/instance: {{ .Release.Name }}
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role
{{- end }}
{{- define "gha-runner-scale-set.kubeModeRoleBindingName" -}}
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role-binding
{{- end }}
{{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}}
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-service-account
{{- end }}
@@ -431,7 +432,7 @@ volumeMounts:
{{- include "gha-runner-scale-set.fullname" . }}-manager-role
{{- end }}
{{- define "gha-runner-scale-set.managerRoleBindingName" -}}
{{- define "gha-runner-scale-set.managerRoleBinding" -}}
{{- include "gha-runner-scale-set.fullname" . }}-manager-role-binding
{{- end }}

View File

@@ -10,23 +10,7 @@ metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/component: "autoscaling-runner-set"
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
annotations:
{{- $containerMode := .Values.containerMode }}
{{- if not (kindIs "string" .Values.githubConfigSecret) }}
actions.github.com/cleanup-github-secret-name: {{ include "gha-runner-scale-set.githubsecret" . }}
{{- end }}
actions.github.com/cleanup-manager-role-binding: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
actions.github.com/cleanup-manager-role-name: {{ include "gha-runner-scale-set.managerRoleName" . }}
{{- if and $containerMode (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
actions.github.com/cleanup-kubernetes-mode-role-binding-name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
actions.github.com/cleanup-kubernetes-mode-role-name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
actions.github.com/cleanup-kubernetes-mode-service-account-name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
{{- end }}
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
actions.github.com/cleanup-no-permission-service-account-name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }}
{{- end }}
spec:
githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }}
githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }}
@@ -106,15 +90,14 @@ spec:
{{ $key }}: {{ $val | toYaml | nindent 8 }}
{{- end }}
{{- end }}
{{- $containerMode := .Values.containerMode }}
{{- if eq $containerMode.type "kubernetes" }}
{{- if eq .Values.containerMode.type "kubernetes" }}
serviceAccountName: {{ default (include "gha-runner-scale-set.kubeModeServiceAccountName" .) .Values.template.spec.serviceAccountName }}
{{- else }}
serviceAccountName: {{ default (include "gha-runner-scale-set.noPermissionServiceAccountName" .) .Values.template.spec.serviceAccountName }}
{{- end }}
{{- if or .Values.template.spec.initContainers (eq $containerMode.type "dind") }}
{{- if or .Values.template.spec.initContainers (eq .Values.containerMode.type "dind") }}
initContainers:
{{- if eq $containerMode.type "dind" }}
{{- if eq .Values.containerMode.type "dind" }}
- name: init-dind-externals
{{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }}
{{- end }}
@@ -123,13 +106,13 @@ spec:
{{- end }}
{{- end }}
containers:
{{- if eq $containerMode.type "dind" }}
{{- if eq .Values.containerMode.type "dind" }}
- name: runner
{{- include "gha-runner-scale-set.dind-runner-container" . | nindent 8 }}
- name: dind
{{- include "gha-runner-scale-set.dind-container" . | nindent 8 }}
{{- include "gha-runner-scale-set.non-runner-non-dind-containers" . | nindent 6 }}
{{- else if eq $containerMode.type "kubernetes" }}
{{- else if eq .Values.containerMode.type "kubernetes" }}
- name: runner
{{- include "gha-runner-scale-set.kubernetes-mode-runner-container" . | nindent 8 }}
{{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }}
@@ -137,16 +120,16 @@ spec:
{{- include "gha-runner-scale-set.default-mode-runner-containers" . | nindent 6 }}
{{- end }}
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
{{- if or .Values.template.spec.volumes (eq $containerMode.type "dind") (eq $containerMode.type "kubernetes") $tlsConfig.runnerMountPath }}
{{- if or .Values.template.spec.volumes (eq .Values.containerMode.type "dind") (eq .Values.containerMode.type "kubernetes") $tlsConfig.runnerMountPath }}
volumes:
{{- if $tlsConfig.runnerMountPath }}
{{- include "gha-runner-scale-set.tls-volume" $tlsConfig | nindent 6 }}
{{- end }}
{{- if eq $containerMode.type "dind" }}
{{- if eq .Values.containerMode.type "dind" }}
{{- include "gha-runner-scale-set.dind-volume" . | nindent 6 }}
{{- include "gha-runner-scale-set.dind-work-volume" . | nindent 6 }}
{{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }}
{{- else if eq $containerMode.type "kubernetes" }}
{{- else if eq .Values.containerMode.type "kubernetes" }}
{{- include "gha-runner-scale-set.kubernetes-mode-work-volume" . | nindent 6 }}
{{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }}
{{- else }}

View File

@@ -7,7 +7,7 @@ metadata:
labels:
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
finalizers:
- actions.github.com/cleanup-protection
- actions.github.com/secret-protection
data:
{{- $hasToken := false }}
{{- $hasAppId := false }}
@@ -36,4 +36,4 @@ data:
{{- if and $hasAppId (or (not $hasInstallationId) (not $hasPrivateKey)) }}
{{- fail "A valid .Values.githubConfigSecret is required for setting auth with GitHub server, provide .Values.githubConfigSecret.github_app_installation_id and .Values.githubConfigSecret.github_app_private_key." }}
{{- end }}
{{- end}}
{{- end}}

View File

@@ -1,13 +1,10 @@
{{- $containerMode := .Values.containerMode }}
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
{{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
# default permission for runner pod service account in kubernetes mode (container hook)
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
namespace: {{ .Release.Namespace }}
finalizers:
- actions.github.com/cleanup-protection
rules:
- apiGroups: [""]
resources: ["pods"]
@@ -24,4 +21,4 @@ rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "create", "delete"]
{{- end }}
{{- end }}

View File

@@ -1,12 +1,9 @@
{{- $containerMode := .Values.containerMode }}
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
{{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
namespace: {{ .Release.Namespace }}
finalizers:
- actions.github.com/cleanup-protection
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -15,4 +12,4 @@ subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}
{{- end }}

View File

@@ -1,12 +1,9 @@
{{- $containerMode := .Values.containerMode }}
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
{{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
namespace: {{ .Release.Namespace }}
finalizers:
- actions.github.com/cleanup-protection
labels:
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -3,11 +3,6 @@ kind: Role
metadata:
name: {{ include "gha-runner-scale-set.managerRoleName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
app.kubernetes.io/component: manager-role
finalizers:
- actions.github.com/cleanup-protection
rules:
- apiGroups:
- ""
@@ -34,17 +29,6 @@ rules:
- list
- patch
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- patch
- update
- apiGroups:
- rbac.authorization.k8s.io
resources:
@@ -72,4 +56,4 @@ rules:
- configmaps
verbs:
- get
{{- end }}
{{- end }}

View File

@@ -1,13 +1,8 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
name: {{ include "gha-runner-scale-set.managerRoleBinding" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
app.kubernetes.io/component: manager-role-binding
finalizers:
- actions.github.com/cleanup-protection
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -15,4 +10,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set.managerServiceAccountName" . | nindent 4 }}
namespace: {{ include "gha-runner-scale-set.managerServiceAccountNamespace" . | nindent 4 }}
namespace: {{ include "gha-runner-scale-set.managerServiceAccountNamespace" . | nindent 4 }}

View File

@@ -1,5 +1,4 @@
{{- $containerMode := .Values.containerMode }}
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
{{- if and (ne .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
apiVersion: v1
kind: ServiceAccount
metadata:
@@ -7,6 +6,4 @@ metadata:
namespace: {{ .Release.Namespace }}
labels:
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
finalizers:
- actions.github.com/cleanup-protection
{{- end }}
{{- end }}

View File

@@ -1,13 +1,11 @@
package tests
import (
"fmt"
"path/filepath"
"strings"
"testing"
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
actionsgithubcom "github.com/actions/actions-runner-controller/controllers/actions.github.com"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/gruntwork-io/terratest/modules/random"
@@ -45,7 +43,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) {
assert.Equal(t, namespaceName, githubSecret.Namespace)
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", githubSecret.Name)
assert.Equal(t, "gh_token12345", string(githubSecret.Data["github_token"]))
assert.Equal(t, "actions.github.com/cleanup-protection", githubSecret.Finalizers[0])
assert.Equal(t, "actions.github.com/secret-protection", githubSecret.Finalizers[0])
}
func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) {
@@ -190,7 +188,6 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", ars.Spec.Template.Spec.ServiceAccountName)
assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) // no finalizer protections in place
}
func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
@@ -220,7 +217,6 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
assert.Equal(t, namespaceName, serviceAccount.Namespace)
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", serviceAccount.Name)
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
var role rbacv1.Role
@@ -228,9 +224,6 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
assert.Equal(t, namespaceName, role.Namespace)
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", role.Name)
assert.Equal(t, "actions.github.com/cleanup-protection", role.Finalizers[0])
assert.Len(t, role.Rules, 5, "kube mode role should have 5 rules")
assert.Equal(t, "pods", role.Rules[0].Resources[0])
assert.Equal(t, "pods/exec", role.Rules[1].Resources[0])
@@ -243,21 +236,18 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
helm.UnmarshalK8SYaml(t, output, &roleBinding)
assert.Equal(t, namespaceName, roleBinding.Namespace)
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role-binding", roleBinding.Name)
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.Name)
assert.Len(t, roleBinding.Subjects, 1)
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", roleBinding.Subjects[0].Name)
assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace)
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.RoleRef.Name)
assert.Equal(t, "Role", roleBinding.RoleRef.Kind)
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
expectedServiceAccountName := "test-runners-gha-runner-scale-set-kube-mode-service-account"
assert.Equal(t, expectedServiceAccountName, ars.Spec.Template.Spec.ServiceAccountName)
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", ars.Spec.Template.Spec.ServiceAccountName)
}
func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
@@ -289,7 +279,6 @@ func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, "test-service-account", ars.Spec.Template.Spec.ServiceAccountName)
assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
}
func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
@@ -322,10 +311,6 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/part-of"])
assert.Equal(t, "autoscaling-runner-set", ars.Labels["app.kubernetes.io/component"])
assert.NotEmpty(t, ars.Labels["app.kubernetes.io/version"])
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret)
@@ -1469,11 +1454,7 @@ func TestTemplate_CreateManagerRole(t *testing.T) {
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
assert.Equal(t, 6, len(managerRole.Rules))
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, 5, len(managerRole.Rules))
}
func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) {
@@ -1504,9 +1485,8 @@ func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) {
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
assert.Equal(t, 7, len(managerRole.Rules))
assert.Equal(t, "configmaps", managerRole.Rules[6].Resources[0])
assert.Equal(t, 6, len(managerRole.Rules))
assert.Equal(t, "configmaps", managerRole.Rules[5].Resources[0])
}
func TestTemplate_CreateManagerRoleBinding(t *testing.T) {
@@ -1537,7 +1517,6 @@ func TestTemplate_CreateManagerRoleBinding(t *testing.T) {
assert.Equal(t, namespaceName, managerRoleBinding.Namespace, "namespace should match the namespace of the Helm release")
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role-binding", managerRoleBinding.Name)
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRoleBinding.RoleRef.Name)
assert.Equal(t, "actions.github.com/cleanup-protection", managerRoleBinding.Finalizers[0])
assert.Equal(t, "arc", managerRoleBinding.Subjects[0].Name)
assert.Equal(t, "arc-system", managerRoleBinding.Subjects[0].Namespace)
}
@@ -1709,103 +1688,3 @@ func TestTemplateRenderedAutoScalingRunnerSet_KubeModeMergePodSpec(t *testing.T)
assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others")
assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others")
}
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
annotationExpectedTests := map[string]*helm.Options{
"GitHub token": {
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
},
"GitHub app": {
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_app_id": "10",
"githubConfigSecret.github_app_installation_id": "100",
"githubConfigSecret.github_app_private_key": "private_key",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
},
}
for name, options := range annotationExpectedTests {
t.Run("Annotation set: "+name, func(t *testing.T) {
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
assert.NotEmpty(t, autoscalingRunnerSet.Annotations[actionsgithubcom.AnnotationKeyGitHubSecretName])
})
}
t.Run("Annotation should not be set", func(t *testing.T) {
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "pre-defined-secret",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
assert.Empty(t, autoscalingRunnerSet.Annotations[actionsgithubcom.AnnotationKeyGitHubSecretName])
})
}
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"containerMode.type": "kubernetes",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
annotationValues := map[string]string{
actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-runner-scale-set-github-secret",
actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-runner-scale-set-manager-role",
actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-runner-scale-set-manager-role-binding",
actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-runner-scale-set-kube-mode-service-account",
actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-runner-scale-set-kube-mode-role",
actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-runner-scale-set-kube-mode-role-binding",
}
for annotation, value := range annotationValues {
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
}
}

View File

@@ -65,19 +65,19 @@ githubConfigSecret:
# certificateFrom:
# configMapKeyRef:
# name: config-map-name
# key: ca.crt
# key: ca.pem
# runnerMountPath: /usr/local/share/ca-certificates/
# containerMode:
# type: "dind" ## type can be set to dind or kubernetes
# ## the following is required when containerMode.type=kubernetes
# kubernetesModeWorkVolumeClaim:
# accessModes: ["ReadWriteOnce"]
# # For local testing, use https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md to provide dynamic provision volume with storageClassName: openebs-hostpath
# storageClassName: "dynamic-blob-storage"
# resources:
# requests:
# storage: 1Gi
containerMode:
type: "" ## type can be set to dind or kubernetes
## the following is required when containerMode.type=kubernetes
# kubernetesModeWorkVolumeClaim:
# accessModes: ["ReadWriteOnce"]
# # For local testing, use https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md to provide dynamic provision volume with storageClassName: openebs-hostpath
# storageClassName: "dynamic-blob-storage"
# resources:
# requests:
# storage: 1Gi
## template is the PodSpec for each runner Pod
template:
@@ -161,7 +161,7 @@ template:
image: ghcr.io/actions/actions-runner:latest
command: ["/home/runner/run.sh"]
## Optional controller service account that needs to have required Role and RoleBinding
## Optional controller service account that needs to have required Role and RoleBinding
## to operate this gha-runner-scale-set installation.
## The helm chart will try to find the controller deployment and its service account at installation time.
## In case the helm chart can't find the right service account, you can explicitly pass in the following value
@@ -169,4 +169,4 @@ template:
## Note: if your controller is installed to only watch a single namespace, you have to pass these values explicitly.
# controllerServiceAccount:
# namespace: arc-system
# name: test-arc-gha-runner-scale-set-controller
# name: test-arc-gha-runner-scale-set-controller

View File

@@ -80,9 +80,6 @@ spec:
image:
description: Required
type: string
imagePullPolicy:
description: Required
type: string
imagePullSecrets:
description: Required
items:

View File

@@ -17,21 +17,6 @@ spec:
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.template.spec.enterprise
name: Enterprise
type: string
- jsonPath: .spec.template.spec.organization
name: Organization
type: string
- jsonPath: .spec.template.spec.repository
name: Repository
type: string
- jsonPath: .spec.template.spec.group
name: Group
type: string
- jsonPath: .spec.template.spec.labels
name: Labels
type: string
- jsonPath: .spec.replicas
name: Desired
type: number

View File

@@ -56,8 +56,6 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY
value: IfNotPresent
volumeMounts:
- name: controller-manager
mountPath: "/etc/actions-runner-controller"

View File

@@ -523,8 +523,8 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a
Name: proxyListenerSecretName(autoscalingListener),
Namespace: autoscalingListener.Namespace,
Labels: map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
"auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
"auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetName,
},
},
Data: data,

View File

@@ -27,7 +27,6 @@ import (
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -43,12 +42,12 @@ import (
const (
// TODO: Replace with shared image.
autoscalingRunnerSetOwnerKey = ".metadata.controller"
LabelKeyRunnerSpecHash = "runner-spec-hash"
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
runnerScaleSetNameAnnotationKey = "runner-scale-set-name"
autoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
autoscalingRunnerSetOwnerKey = ".metadata.controller"
LabelKeyRunnerSpecHash = "runner-spec-hash"
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
runnerScaleSetIdKey = "runner-scale-set-id"
runnerScaleSetNameKey = "runner-scale-set-name"
runnerScaleSetRunnerGroupNameKey = "runner-scale-set-runner-group-name"
)
// AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object
@@ -115,17 +114,6 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, err
}
requeue, err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log)
if err != nil {
log.Error(err, "Failed to remove finalizers on dependent resources")
return ctrl.Result{}, err
}
if requeue {
log.Info("Waiting for dependent resources to be deleted")
return ctrl.Result{Requeue: true}, nil
}
log.Info("Removing finalizer")
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName)
@@ -152,7 +140,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, nil
}
scaleSetIdRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
scaleSetIdRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]
if !ok {
// Need to create a new runner scale set on Actions service
log.Info("Runner scale set id annotation does not exist. Creating a new runner scale set.")
@@ -166,14 +154,14 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
}
// Make sure the runner group of the scale set is up to date
currentRunnerGroupName, ok := autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName]
currentRunnerGroupName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetRunnerGroupNameKey]
if !ok || (len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 && !strings.EqualFold(currentRunnerGroupName, autoscalingRunnerSet.Spec.RunnerGroup)) {
log.Info("AutoScalingRunnerSet runner group changed. Updating the runner scale set.")
return r.updateRunnerScaleSetRunnerGroup(ctx, autoscalingRunnerSet, log)
}
// Make sure the runner scale set name is up to date
currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetNameAnnotationKey]
currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetNameKey]
if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) {
log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.")
return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log)
@@ -318,29 +306,6 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
return nil
}
func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (requeue bool, err error) {
c := autoscalingRunnerSetFinalizerDependencyCleaner{
client: r.Client,
autoscalingRunnerSet: autoscalingRunnerSet,
logger: logger,
}
c.removeKubernetesModeRoleBindingFinalizer(ctx)
c.removeKubernetesModeRoleFinalizer(ctx)
c.removeKubernetesModeServiceAccountFinalizer(ctx)
c.removeNoPermissionServiceAccountFinalizer(ctx)
c.removeGitHubSecretFinalizer(ctx)
c.removeManagerRoleBindingFinalizer(ctx)
c.removeManagerRoleFinalizer(ctx)
requeue, err = c.result()
if err != nil {
logger.Error(err, "Failed to cleanup finalizer from dependent resource")
return true, err
}
return requeue, nil
}
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
logger.Info("Creating a new runner scale set")
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
@@ -400,18 +365,12 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
if autoscalingRunnerSet.Annotations == nil {
autoscalingRunnerSet.Annotations = map[string]string{}
}
if autoscalingRunnerSet.Labels == nil {
autoscalingRunnerSet.Labels = map[string]string{}
}
logger.Info("Adding runner scale set ID, name and runner group name as an annotation and url labels")
logger.Info("Adding runner scale set ID, name and runner group name as an annotation")
if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[runnerScaleSetNameAnnotationKey] = runnerScaleSet.Name
obj.Annotations[runnerScaleSetIdAnnotationKey] = strconv.Itoa(runnerScaleSet.Id)
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = runnerScaleSet.RunnerGroupName
if err := applyGitHubURLLabels(obj.Spec.GitHubConfigUrl, obj.Labels); err != nil { // should never happen
logger.Error(err, "Failed to apply GitHub URL labels")
}
obj.Annotations[runnerScaleSetNameKey] = runnerScaleSet.Name
obj.Annotations[runnerScaleSetIdKey] = strconv.Itoa(runnerScaleSet.Id)
obj.Annotations[runnerScaleSetRunnerGroupNameKey] = runnerScaleSet.RunnerGroupName
}); err != nil {
logger.Error(err, "Failed to add runner scale set ID, name and runner group name as an annotation")
return ctrl.Result{}, err
@@ -425,7 +384,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
}
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
if err != nil {
logger.Error(err, "Failed to parse runner scale set ID")
return ctrl.Result{}, err
@@ -456,7 +415,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
logger.Info("Updating runner scale set runner group name as an annotation")
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = updatedRunnerScaleSet.RunnerGroupName
obj.Annotations[runnerScaleSetRunnerGroupNameKey] = updatedRunnerScaleSet.RunnerGroupName
}); err != nil {
logger.Error(err, "Failed to update runner group name annotation")
return ctrl.Result{}, err
@@ -467,7 +426,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
}
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
if err != nil {
logger.Error(err, "Failed to parse runner scale set ID")
return ctrl.Result{}, err
@@ -492,7 +451,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
logger.Info("Updating runner scale set name as an annotation")
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[runnerScaleSetNameAnnotationKey] = updatedRunnerScaleSet.Name
obj.Annotations[runnerScaleSetNameKey] = updatedRunnerScaleSet.Name
}); err != nil {
logger.Error(err, "Failed to update runner scale set name annotation")
return ctrl.Result{}, err
@@ -503,28 +462,12 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
}
func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
scaleSetId, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
if !ok {
// Annotation not being present can occur in 3 scenarios
// 1. Scale set is never created.
// In this case, we don't need to fetch the actions client to delete the scale set that does not exist
//
// 2. The scale set has been deleted by the controller.
// In that case, the controller will clean up annotation because the scale set does not exist anymore.
// Removal of the scale set id is also useful because permission cleanup will eventually lose permission
// assigned to it on a GitHub secret, causing actions client from secret to result in permission denied
//
// 3. Annotation is removed manually.
// In this case, the controller will treat this as if the scale set is being removed from the actions service
// Then, manual deletion of the scale set is required.
return nil
}
logger.Info("Deleting the runner scale set from Actions service")
runnerScaleSetId, err := strconv.Atoi(scaleSetId)
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
if err != nil {
// If the annotation is not set correctly, we are going to get stuck in a loop trying to parse the scale set id.
// If the configuration is invalid (secret does not exist for example), we never got to the point to create runner set.
// But then, manual cleanup would get stuck finalizing the resource trying to parse annotation indefinitely
// If the annotation is not set correctly, or if it does not exist, we are going to get stuck in a loop trying to parse the scale set id.
// If the configuration is invalid (secret does not exist for example), we never get to the point to create runner set. But then, manual cleanup
// would get stuck finalizing the resource trying to parse annotation indefinitely
logger.Info("autoscaling runner set does not have annotation describing scale set id. Skip deletion", "err", err.Error())
return nil
}
@@ -541,14 +484,6 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
return err
}
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
delete(obj.Annotations, runnerScaleSetIdAnnotationKey)
})
if err != nil {
logger.Error(err, "Failed to patch autoscaling runner set with annotation removed", "annotation", runnerScaleSetIdAnnotationKey)
return err
}
logger.Info("Deleted the runner scale set from Actions service")
return nil
}
@@ -718,328 +653,6 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro
Complete(r)
}
type autoscalingRunnerSetFinalizerDependencyCleaner struct {
// configuration fields
client client.Client
autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
logger logr.Logger
// fields to operate on
requeue bool
err error
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) result() (requeue bool, err error) {
return c.requeue, c.err
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleBindingFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
return
}
roleBindingName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName]
if !ok {
c.logger.Info(
"Skipping cleaning up kubernetes mode service account",
"reason",
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeRoleBindingName),
)
return
}
c.logger.Info("Removing finalizer from container mode kubernetes role binding", "name", roleBindingName)
roleBinding := new(rbacv1.RoleBinding)
err := c.client.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
switch {
case err == nil:
if !controllerutil.ContainsFinalizer(roleBinding, autoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("Kubernetes mode role binding finalizer has already been removed", "name", roleBindingName)
return
}
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
})
if err != nil {
c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from container mode kubernetes role binding", "name", roleBindingName)
return
case err != nil && !kerrors.IsNotFound(err):
c.err = fmt.Errorf("failed to fetch kubernetes mode role binding: %w", err)
return
default:
c.logger.Info("Container mode kubernetes role binding has already been deleted", "name", roleBindingName)
return
}
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
return
}
roleName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName]
if !ok {
c.logger.Info(
"Skipping cleaning up kubernetes mode role",
"reason",
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeRoleName),
)
return
}
c.logger.Info("Removing finalizer from container mode kubernetes role", "name", roleName)
role := new(rbacv1.Role)
err := c.client.Get(ctx, types.NamespacedName{Name: roleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
switch {
case err == nil:
if !controllerutil.ContainsFinalizer(role, autoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("Kubernetes mode role finalizer has already been removed", "name", roleName)
return
}
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
})
if err != nil {
c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from container mode kubernetes role")
return
case err != nil && !kerrors.IsNotFound(err):
c.err = fmt.Errorf("failed to fetch kubernetes mode role: %w", err)
return
default:
c.logger.Info("Container mode kubernetes role has already been deleted", "name", roleName)
return
}
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeServiceAccountFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
return
}
serviceAccountName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName]
if !ok {
c.logger.Info(
"Skipping cleaning up kubernetes mode role binding",
"reason",
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeServiceAccountName),
)
return
}
c.logger.Info("Removing finalizer from container mode kubernetes service account", "name", serviceAccountName)
serviceAccount := new(corev1.ServiceAccount)
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
switch {
case err == nil:
if !controllerutil.ContainsFinalizer(serviceAccount, autoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("Kubernetes mode service account finalizer has already been removed", "name", serviceAccountName)
return
}
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
})
if err != nil {
c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from container mode kubernetes service account")
return
case err != nil && !kerrors.IsNotFound(err):
c.err = fmt.Errorf("failed to fetch kubernetes mode service account: %w", err)
return
default:
c.logger.Info("Container mode kubernetes service account has already been deleted", "name", serviceAccountName)
return
}
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServiceAccountFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
return
}
serviceAccountName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName]
if !ok {
c.logger.Info(
"Skipping cleaning up no permission service account",
"reason",
fmt.Sprintf("annotation key %q not present", AnnotationKeyNoPermissionServiceAccountName),
)
return
}
c.logger.Info("Removing finalizer from no permission service account", "name", serviceAccountName)
serviceAccount := new(corev1.ServiceAccount)
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
switch {
case err == nil:
if !controllerutil.ContainsFinalizer(serviceAccount, autoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("No permission service account finalizer has already been removed", "name", serviceAccountName)
return
}
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
})
if err != nil {
c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from no permission service account", "name", serviceAccountName)
return
case err != nil && !kerrors.IsNotFound(err):
c.err = fmt.Errorf("failed to fetch service account: %w", err)
return
default:
c.logger.Info("No permission service account has already been deleted", "name", serviceAccountName)
return
}
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
return
}
githubSecretName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName]
if !ok {
c.logger.Info(
"Skipping cleaning up no permission service account",
"reason",
fmt.Sprintf("annotation key %q not present", AnnotationKeyGitHubSecretName),
)
return
}
c.logger.Info("Removing finalizer from GitHub secret", "name", githubSecretName)
githubSecret := new(corev1.Secret)
err := c.client.Get(ctx, types.NamespacedName{Name: githubSecretName, Namespace: c.autoscalingRunnerSet.Namespace}, githubSecret)
switch {
case err == nil:
if !controllerutil.ContainsFinalizer(githubSecret, autoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("GitHub secret finalizer has already been removed", "name", githubSecretName)
return
}
err = patch(ctx, c.client, githubSecret, func(obj *corev1.Secret) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
})
if err != nil {
c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from GitHub secret", "name", githubSecretName)
return
case err != nil && !kerrors.IsNotFound(err) && !kerrors.IsForbidden(err):
c.err = fmt.Errorf("failed to fetch GitHub secret: %w", err)
return
default:
c.logger.Info("GitHub secret has already been deleted", "name", githubSecretName)
return
}
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindingFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
return
}
managerRoleBindingName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName]
if !ok {
c.logger.Info(
"Skipping cleaning up manager role binding",
"reason",
fmt.Sprintf("annotation key %q not present", AnnotationKeyManagerRoleBindingName),
)
return
}
c.logger.Info("Removing finalizer from manager role binding", "name", managerRoleBindingName)
roleBinding := new(rbacv1.RoleBinding)
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
switch {
case err == nil:
if !controllerutil.ContainsFinalizer(roleBinding, autoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("Manager role binding finalizer has already been removed", "name", managerRoleBindingName)
return
}
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
})
if err != nil {
c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from manager role binding", "name", managerRoleBindingName)
return
case err != nil && !kerrors.IsNotFound(err):
c.err = fmt.Errorf("failed to fetch manager role binding: %w", err)
return
default:
c.logger.Info("Manager role binding has already been deleted", "name", managerRoleBindingName)
return
}
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
return
}
managerRoleName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName]
if !ok {
c.logger.Info(
"Skipping cleaning up manager role",
"reason",
fmt.Sprintf("annotation key %q not present", AnnotationKeyManagerRoleName),
)
return
}
c.logger.Info("Removing finalizer from manager role", "name", managerRoleName)
role := new(rbacv1.Role)
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
switch {
case err == nil:
if !controllerutil.ContainsFinalizer(role, autoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("Manager role finalizer has already been removed", "name", managerRoleName)
return
}
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
})
if err != nil {
c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from manager role", "name", managerRoleName)
return
case err != nil && !kerrors.IsNotFound(err):
c.err = fmt.Errorf("failed to fetch manager role: %w", err)
return
default:
c.logger.Info("Manager role has already been deleted", "name", managerRoleName)
return
}
}
// NOTE: if this is logic should be used for other resources,
// consider using generics
type EphemeralRunnerSets struct {

View File

@@ -0,0 +1,69 @@
package actionsgithubcom
import (
"io"
"log"
"os"
"path/filepath"
"testing"
actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
)
var (
cfg *rest.Config
k8sClient client.Client
)
func TestMain(m *testing.M) {
logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(io.Discard)))
testEnv := &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("../..", "config", "crd", "bases")},
}
// Avoids the following error:
// 2021-03-19T15:14:11.673+0900 ERROR controller-runtime.controller
// Reconciler error {"controller": "testns-tvjzjrunner", "request":
// "testns-gdnyx/example-runnerdeploy-zps4z-j5562", "error": "Pod
// \"example-runnerdeploy-zps4z-j5562\" is invalid:
// [spec.containers[1].image: Required value,
// spec.containers[1].securityContext.privileged: Forbidden: disallowed by
// cluster policy]"}
testEnv.ControlPlane.GetAPIServer().Configure().
Append("allow-privileged", "true")
var err error
cfg, err = testEnv.Start()
if err != nil || cfg == nil {
log.Fatalln(err, "failed to start test environment")
}
err = actionsv1alpha1.AddToScheme(scheme.Scheme)
if err != nil {
log.Fatalln(err, "failed to add scheme")
}
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
if err != nil || k8sClient == nil {
log.Fatalln(err, "failed to create client")
}
// run the tests
code := m.Run()
err = testEnv.Stop()
if err != nil {
log.Fatalln(err, "failed to stop test environment")
}
os.Exit(code)
}

View File

@@ -356,9 +356,10 @@ func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ep
ObjectMeta: metav1.ObjectMeta{
Name: proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet),
Namespace: ephemeralRunnerSet.Namespace,
Labels: map[string]string{
LabelKeyGitHubScaleSetName: ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName],
LabelKeyGitHubScaleSetNamespace: ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace],
Labels: map[string]string{
// TODO: figure out autoScalingRunnerSet name and set it as a label for this secret
// "auto-scaling-runner-set-namespace": ephemeralRunnerSet.Namespace,
// "auto-scaling-runner-set-name": ephemeralRunnerSet.Name,
},
},
Data: proxySecretData,

View File

@@ -3,18 +3,46 @@ package actionsgithubcom
import (
"context"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/onsi/ginkgo/v2"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
const defaultGitHubToken = "gh_token"
func newAutoscalingRunnerSet(namespace, secretName string) *v1alpha1.AutoscalingRunnerSet {
min := 1
max := 10
return &v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs",
Namespace: namespace,
},
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: secretName,
MaxRunners: &max,
MinRunners: &min,
RunnerGroup: "testgroup",
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "runner",
Image: "ghcr.io/actions/runner",
},
},
},
},
},
}
}
func startManagers(t ginkgo.GinkgoTInterface, first manager.Manager, others ...manager.Manager) {
for _, mgr := range append([]manager.Manager{first}, others...) {
ctx, cancel := context.WithCancel(context.Background())
@@ -31,7 +59,7 @@ func startManagers(t ginkgo.GinkgoTInterface, first manager.Manager, others ...m
}
}
func createNamespace(t ginkgo.GinkgoTInterface, client client.Client) (*corev1.Namespace, manager.Manager) {
func createNamespace(t ginkgo.GinkgoTInterface) (*corev1.Namespace, manager.Manager) {
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling" + RandStringRunes(5)},
}
@@ -53,7 +81,7 @@ func createNamespace(t ginkgo.GinkgoTInterface, client client.Client) (*corev1.N
return ns, mgr
}
func createDefaultSecret(t ginkgo.GinkgoTInterface, client client.Client, namespace string) *corev1.Secret {
func createDefaultSecret(t ginkgo.GinkgoTInterface, namespace string) *corev1.Secret {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "github-config-secret",

View File

@@ -8,7 +8,6 @@ import (
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/build"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/hash"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
@@ -20,68 +19,12 @@ const (
jitTokenKey = "jitToken"
)
// Labels applied to resources
// labels applied to resources
const (
// Kubernetes labels
LabelKeyKubernetesPartOf = "app.kubernetes.io/part-of"
LabelKeyKubernetesComponent = "app.kubernetes.io/component"
LabelKeyKubernetesVersion = "app.kubernetes.io/version"
// Github labels
LabelKeyGitHubScaleSetName = "actions.github.com/scale-set-name"
LabelKeyGitHubScaleSetNamespace = "actions.github.com/scale-set-namespace"
LabelKeyGitHubEnterprise = "actions.github.com/enterprise"
LabelKeyGitHubOrganization = "actions.github.com/organization"
LabelKeyGitHubRepository = "actions.github.com/repository"
LabelKeyAutoScaleRunnerSetName = "auto-scaling-runner-set-name"
LabelKeyAutoScaleRunnerSetNamespace = "auto-scaling-runner-set-namespace"
)
const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
// Labels applied to listener roles
const (
labelKeyListenerName = "auto-scaling-listener-name"
labelKeyListenerNamespace = "auto-scaling-listener-namespace"
)
// Annotations applied for later cleanup of resources
const (
AnnotationKeyManagerRoleBindingName = "actions.github.com/cleanup-manager-role-binding"
AnnotationKeyManagerRoleName = "actions.github.com/cleanup-manager-role-name"
AnnotationKeyKubernetesModeRoleName = "actions.github.com/cleanup-kubernetes-mode-role-name"
AnnotationKeyKubernetesModeRoleBindingName = "actions.github.com/cleanup-kubernetes-mode-role-binding-name"
AnnotationKeyKubernetesModeServiceAccountName = "actions.github.com/cleanup-kubernetes-mode-service-account-name"
AnnotationKeyGitHubSecretName = "actions.github.com/cleanup-github-secret-name"
AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name"
)
var commonLabelKeys = [...]string{
LabelKeyKubernetesPartOf,
LabelKeyKubernetesComponent,
LabelKeyKubernetesVersion,
LabelKeyGitHubScaleSetName,
LabelKeyGitHubScaleSetNamespace,
LabelKeyGitHubEnterprise,
LabelKeyGitHubOrganization,
LabelKeyGitHubRepository,
}
const labelValueKubernetesPartOf = "gha-runner-scale-set"
const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent
// scaleSetListenerImagePullPolicy is applied to all listeners
var scaleSetListenerImagePullPolicy = DefaultScaleSetListenerImagePullPolicy
func SetListenerImagePullPolicy(pullPolicy string) bool {
switch p := corev1.PullPolicy(pullPolicy); p {
case corev1.PullAlways, corev1.PullNever, corev1.PullIfNotPresent:
scaleSetListenerImagePullPolicy = p
return true
default:
return false
}
}
type resourceBuilder struct{}
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
@@ -176,7 +119,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
Name: autoscalingListenerContainerName,
Image: autoscalingListener.Spec.Image,
Env: listenerEnv,
ImagePullPolicy: autoscalingListener.Spec.ImagePullPolicy,
ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{
"/github-runnerscaleset-listener",
},
@@ -186,11 +129,6 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
RestartPolicy: corev1.RestartPolicyNever,
}
labels := make(map[string]string, len(autoscalingListener.Labels))
for key, val := range autoscalingListener.Labels {
labels[key] = val
}
newRunnerScaleSetListenerPod := &corev1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@@ -199,7 +137,10 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
ObjectMeta: metav1.ObjectMeta{
Name: autoscalingListener.Name,
Namespace: autoscalingListener.Namespace,
Labels: labels,
Labels: map[string]string{
LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
},
},
Spec: podSpec,
}
@@ -208,28 +149,14 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
}
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
if err != nil {
return nil, err
}
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
newLabels := map[string]string{
LabelKeyRunnerSpecHash: runnerSpecHash,
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesComponent: "runner-set",
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
}
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, newLabels); err != nil {
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
}
newAnnotations := map[string]string{
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
}
newLabels := map[string]string{}
newLabels[LabelKeyRunnerSpecHash] = runnerSpecHash
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
TypeMeta: metav1.TypeMeta{},
@@ -237,7 +164,6 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
Labels: newLabels,
Annotations: newAnnotations,
},
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: 0,
@@ -261,8 +187,8 @@ func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener
Name: scaleSetListenerServiceAccountName(autoscalingListener),
Namespace: autoscalingListener.Namespace,
Labels: map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
},
},
}
@@ -276,11 +202,11 @@ func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.
Name: scaleSetListenerRoleName(autoscalingListener),
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
labelKeyListenerNamespace: autoscalingListener.Namespace,
labelKeyListenerName: autoscalingListener.Name,
"role-policy-rules-hash": rulesHash,
LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
"auto-scaling-listener-namespace": autoscalingListener.Namespace,
"auto-scaling-listener-name": autoscalingListener.Name,
"role-policy-rules-hash": rulesHash,
},
},
Rules: rules,
@@ -310,12 +236,12 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
Name: scaleSetListenerRoleName(autoscalingListener),
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
labelKeyListenerNamespace: autoscalingListener.Namespace,
labelKeyListenerName: autoscalingListener.Name,
"role-binding-role-ref-hash": roleRefHash,
"role-binding-subject-hash": subjectHash,
LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
"auto-scaling-listener-namespace": autoscalingListener.Namespace,
"auto-scaling-listener-name": autoscalingListener.Name,
"role-binding-role-ref-hash": roleRefHash,
"role-binding-subject-hash": subjectHash,
},
},
RoleRef: roleRef,
@@ -333,9 +259,9 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
Name: scaleSetListenerSecretMirrorName(autoscalingListener),
Namespace: autoscalingListener.Namespace,
Labels: map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
"secret-data-hash": dataHash,
LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
"secret-data-hash": dataHash,
},
},
Data: secret.DeepCopy().Data,
@@ -345,7 +271,7 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
}
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
if err != nil {
return nil, err
}
@@ -359,25 +285,14 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
}
githubConfig, err := actions.ParseGitHubConfigFromURL(autoscalingRunnerSet.Spec.GitHubConfigUrl)
if err != nil {
return nil, fmt.Errorf("failed to parse github config from url: %v", err)
}
autoscalingListener := &v1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerName(autoscalingRunnerSet),
Namespace: namespace,
Labels: map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesComponent: "runner-scale-set-listener",
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
LabelKeyGitHubEnterprise: githubConfig.Enterprise,
LabelKeyGitHubOrganization: githubConfig.Organization,
LabelKeyGitHubRepository: githubConfig.Repository,
LabelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
LabelKeyAutoScaleRunnerSetNamespace: autoscalingRunnerSet.Namespace,
LabelKeyAutoScaleRunnerSetName: autoscalingRunnerSet.Name,
LabelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
},
},
Spec: v1alpha1.AutoscalingListenerSpec{
@@ -390,7 +305,6 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
MinRunners: effectiveMinRunners,
MaxRunners: effectiveMaxRunners,
Image: image,
ImagePullPolicy: scaleSetListenerImagePullPolicy,
ImagePullSecrets: imagePullSecrets,
Proxy: autoscalingRunnerSet.Spec.Proxy,
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
@@ -401,30 +315,11 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
}
func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
labels := make(map[string]string)
for _, key := range commonLabelKeys {
switch key {
case LabelKeyKubernetesComponent:
labels[key] = "runner"
default:
v, ok := ephemeralRunnerSet.Labels[key]
if !ok {
continue
}
labels[key] = v
}
}
annotations := make(map[string]string)
for key, val := range ephemeralRunnerSet.Annotations {
annotations[key] = val
}
return &v1alpha1.EphemeralRunner{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
GenerateName: ephemeralRunnerSet.Name + "-runner-",
Namespace: ephemeralRunnerSet.Namespace,
Labels: labels,
Annotations: annotations,
},
Spec: ephemeralRunnerSet.Spec.EphemeralRunnerSpec,
}
@@ -442,7 +337,6 @@ func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
for k, v := range runner.Spec.PodTemplateSpec.Labels {
labels[k] = v
}
labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue)
for k, v := range runner.ObjectMeta.Annotations {
annotations[k] = v
@@ -458,6 +352,8 @@ func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
runner.Status.RunnerJITConfig,
)
labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue)
objectMeta := metav1.ObjectMeta{
Name: runner.ObjectMeta.Name,
Namespace: runner.ObjectMeta.Namespace,
@@ -573,22 +469,3 @@ func rulesForListenerRole(resourceNames []string) []rbacv1.PolicyRule {
},
}
}
func applyGitHubURLLabels(url string, labels map[string]string) error {
githubConfig, err := actions.ParseGitHubConfigFromURL(url)
if err != nil {
return fmt.Errorf("failed to parse github config from url: %v", err)
}
if len(githubConfig.Enterprise) > 0 {
labels[LabelKeyGitHubEnterprise] = githubConfig.Enterprise
}
if len(githubConfig.Organization) > 0 {
labels[LabelKeyGitHubOrganization] = githubConfig.Organization
}
if len(githubConfig.Repository) > 0 {
labels[LabelKeyGitHubRepository] = githubConfig.Repository
}
return nil
}

View File

@@ -1,93 +0,0 @@
package actionsgithubcom
import (
"context"
"testing"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestLabelPropagation(t *testing.T) {
autoscalingRunnerSet := v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-scale-set",
Namespace: "test-ns",
Labels: map[string]string{
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesVersion: "0.2.0",
},
Annotations: map[string]string{
runnerScaleSetIdAnnotationKey: "1",
AnnotationKeyGitHubRunnerGroupName: "test-group",
},
},
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/org/repo",
},
}
var b resourceBuilder
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(&autoscalingRunnerSet)
require.NoError(t, err)
assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf])
assert.Equal(t, "runner-set", ephemeralRunnerSet.Labels[LabelKeyKubernetesComponent])
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], ephemeralRunnerSet.Labels[LabelKeyKubernetesVersion])
assert.NotEmpty(t, ephemeralRunnerSet.Labels[LabelKeyRunnerSpecHash])
assert.Equal(t, autoscalingRunnerSet.Name, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName])
assert.Equal(t, autoscalingRunnerSet.Namespace, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace])
assert.Equal(t, "", ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise])
assert.Equal(t, "org", ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization])
assert.Equal(t, "repo", ephemeralRunnerSet.Labels[LabelKeyGitHubRepository])
assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName])
listener, err := b.newAutoScalingListener(&autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil)
require.NoError(t, err)
assert.Equal(t, labelValueKubernetesPartOf, listener.Labels[LabelKeyKubernetesPartOf])
assert.Equal(t, "runner-scale-set-listener", listener.Labels[LabelKeyKubernetesComponent])
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], listener.Labels[LabelKeyKubernetesVersion])
assert.NotEmpty(t, ephemeralRunnerSet.Labels[LabelKeyRunnerSpecHash])
assert.Equal(t, autoscalingRunnerSet.Name, listener.Labels[LabelKeyGitHubScaleSetName])
assert.Equal(t, autoscalingRunnerSet.Namespace, listener.Labels[LabelKeyGitHubScaleSetNamespace])
assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise])
assert.Equal(t, "org", listener.Labels[LabelKeyGitHubOrganization])
assert.Equal(t, "repo", listener.Labels[LabelKeyGitHubRepository])
listenerServiceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
}
listenerSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
}
listenerPod := b.newScaleSetListenerPod(listener, listenerServiceAccount, listenerSecret)
assert.Equal(t, listenerPod.Labels, listener.Labels)
ephemeralRunner := b.newEphemeralRunner(ephemeralRunnerSet)
require.NoError(t, err)
for _, key := range commonLabelKeys {
if key == LabelKeyKubernetesComponent {
continue
}
assert.Equal(t, ephemeralRunnerSet.Labels[key], ephemeralRunner.Labels[key])
}
assert.Equal(t, "runner", ephemeralRunner.Labels[LabelKeyKubernetesComponent])
assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunner.Annotations[AnnotationKeyGitHubRunnerGroupName])
runnerSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
}
pod := b.newEphemeralRunnerPod(context.TODO(), ephemeralRunner, runnerSecret)
for key := range ephemeralRunner.Labels {
assert.Equal(t, ephemeralRunner.Labels[key], pod.Labels[key])
}
}

View File

@@ -16,73 +16,73 @@ limitations under the License.
package actionsgithubcom
import (
"os"
"path/filepath"
"testing"
"github.com/onsi/ginkgo/config"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
// +kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var (
cfg *rest.Config
k8sClient client.Client
testEnv *envtest.Environment
)
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
config.GinkgoConfig.FocusStrings = append(config.GinkgoConfig.FocusStrings, os.Getenv("GINKGO_FOCUS"))
RunSpecs(t, "Controller Suite")
}
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter)))
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("../..", "config", "crd", "bases")},
}
// Avoids the following error:
// 2021-03-19T15:14:11.673+0900 ERROR controller-runtime.controller Reconciler error {"controller": "testns-tvjzjrunner", "request": "testns-gdnyx/example-runnerdeploy-zps4z-j5562", "error": "Pod \"example-runnerdeploy-zps4z-j5562\" is invalid: [spec.containers[1].image: Required value, spec.containers[1].securityContext.privileged: Forbidden: disallowed by cluster policy]"}
testEnv.ControlPlane.GetAPIServer().Configure().
Append("allow-privileged", "true")
var err error
cfg, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = actionsv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
})
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
// import (
// "os"
// "path/filepath"
// "testing"
//
// "github.com/onsi/ginkgo/config"
//
// . "github.com/onsi/ginkgo/v2"
// . "github.com/onsi/gomega"
//
// actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
// "k8s.io/client-go/kubernetes/scheme"
// "k8s.io/client-go/rest"
// "sigs.k8s.io/controller-runtime/pkg/client"
// "sigs.k8s.io/controller-runtime/pkg/envtest"
// logf "sigs.k8s.io/controller-runtime/pkg/log"
// "sigs.k8s.io/controller-runtime/pkg/log/zap"
// // +kubebuilder:scaffold:imports
// )
//
// // These tests use Ginkgo (BDD-style Go testing framework). Refer to
// // http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
//
// var (
// cfg *rest.Config
// k8sClient client.Client
// testEnv *envtest.Environment
// )
//
// func TestAPIs(t *testing.T) {
// RegisterFailHandler(Fail)
//
// config.GinkgoConfig.FocusStrings = append(config.GinkgoConfig.FocusStrings, os.Getenv("GINKGO_FOCUS"))
//
// RunSpecs(t, "Controller Suite")
// }
//
// var _ = BeforeSuite(func() {
// logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter)))
//
// By("bootstrapping test environment")
// testEnv = &envtest.Environment{
// CRDDirectoryPaths: []string{filepath.Join("../..", "config", "crd", "bases")},
// }
//
// // Avoids the following error:
// // 2021-03-19T15:14:11.673+0900 ERROR controller-runtime.controller Reconciler error {"controller": "testns-tvjzjrunner", "request": "testns-gdnyx/example-runnerdeploy-zps4z-j5562", "error": "Pod \"example-runnerdeploy-zps4z-j5562\" is invalid: [spec.containers[1].image: Required value, spec.containers[1].securityContext.privileged: Forbidden: disallowed by cluster policy]"}
// testEnv.ControlPlane.GetAPIServer().Configure().
// Append("allow-privileged", "true")
//
// var err error
// cfg, err = testEnv.Start()
// Expect(err).ToNot(HaveOccurred())
// Expect(cfg).ToNot(BeNil())
//
// err = actionsv1alpha1.AddToScheme(scheme.Scheme)
// Expect(err).NotTo(HaveOccurred())
//
// // +kubebuilder:scaffold:scheme
//
// k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
// Expect(err).ToNot(HaveOccurred())
// Expect(k8sClient).ToNot(BeNil())
// })
//
// var _ = AfterSuite(func() {
// By("tearing down the test environment")
// err := testEnv.Stop()
// Expect(err).ToNot(HaveOccurred())
// })

View File

@@ -285,20 +285,16 @@ func secretDataToGitHubClientConfig(data map[string][]byte) (*github.Config, err
appID := string(data["github_app_id"])
if appID != "" {
conf.AppID, err = strconv.ParseInt(appID, 10, 64)
if err != nil {
return nil, err
}
conf.AppID, err = strconv.ParseInt(appID, 10, 64)
if err != nil {
return nil, err
}
instID := string(data["github_app_installation_id"])
if instID != "" {
conf.AppInstallationID, err = strconv.ParseInt(instID, 10, 64)
if err != nil {
return nil, err
}
conf.AppInstallationID, err = strconv.ParseInt(instID, 10, 64)
if err != nil {
return nil, err
}
conf.AppPrivateKey = string(data["github_app_private_key"])

View File

@@ -76,12 +76,9 @@ func TestNewRunnerPod(t *testing.T) {
},
},
{
Name: "docker-sock",
Name: "certs-client",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,
SizeLimit: resource.NewScaledQuantity(1, resource.Mega),
},
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
},
@@ -140,7 +137,15 @@ func TestNewRunnerPod(t *testing.T) {
},
{
Name: "DOCKER_HOST",
Value: "unix:///run/docker/docker.sock",
Value: "tcp://localhost:2376",
},
{
Name: "DOCKER_TLS_VERIFY",
Value: "1",
},
{
Name: "DOCKER_CERT_PATH",
Value: "/certs/client",
},
},
VolumeMounts: []corev1.VolumeMount{
@@ -153,8 +158,9 @@ func TestNewRunnerPod(t *testing.T) {
MountPath: "/runner/_work",
},
{
Name: "docker-sock",
MountPath: "/run/docker",
Name: "certs-client",
MountPath: "/certs/client",
ReadOnly: true,
},
},
ImagePullPolicy: corev1.PullAlways,
@@ -163,15 +169,10 @@ func TestNewRunnerPod(t *testing.T) {
{
Name: "docker",
Image: "default-docker-image",
Args: []string{
"dockerd",
"--host=unix:///run/docker/docker.sock",
"--group=$(DOCKER_GROUP_GID)",
},
Env: []corev1.EnvVar{
{
Name: "DOCKER_GROUP_GID",
Value: "121",
Name: "DOCKER_TLS_CERTDIR",
Value: "/certs",
},
},
VolumeMounts: []corev1.VolumeMount{
@@ -180,8 +181,8 @@ func TestNewRunnerPod(t *testing.T) {
MountPath: "/runner",
},
{
Name: "docker-sock",
MountPath: "/run/docker",
Name: "certs-client",
MountPath: "/certs/client",
},
{
Name: "work",
@@ -484,12 +485,9 @@ func TestNewRunnerPod(t *testing.T) {
},
},
{
Name: "docker-sock",
Name: "certs-client",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,
SizeLimit: resource.NewScaledQuantity(1, resource.Mega),
},
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
}
@@ -503,8 +501,9 @@ func TestNewRunnerPod(t *testing.T) {
MountPath: "/runner",
},
{
Name: "docker-sock",
MountPath: "/run/docker",
Name: "certs-client",
MountPath: "/certs/client",
ReadOnly: true,
},
}
}),
@@ -528,12 +527,9 @@ func TestNewRunnerPod(t *testing.T) {
},
},
{
Name: "docker-sock",
Name: "certs-client",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,
SizeLimit: resource.NewScaledQuantity(1, resource.Mega),
},
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
}
@@ -610,12 +606,9 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
},
},
{
Name: "docker-sock",
Name: "certs-client",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,
SizeLimit: resource.NewScaledQuantity(1, resource.Mega),
},
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
},
@@ -674,7 +667,15 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
},
{
Name: "DOCKER_HOST",
Value: "unix:///run/docker/docker.sock",
Value: "tcp://localhost:2376",
},
{
Name: "DOCKER_TLS_VERIFY",
Value: "1",
},
{
Name: "DOCKER_CERT_PATH",
Value: "/certs/client",
},
{
Name: "RUNNER_NAME",
@@ -695,8 +696,9 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
MountPath: "/runner/_work",
},
{
Name: "docker-sock",
MountPath: "/run/docker",
Name: "certs-client",
MountPath: "/certs/client",
ReadOnly: true,
},
},
ImagePullPolicy: corev1.PullAlways,
@@ -705,15 +707,10 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
{
Name: "docker",
Image: "default-docker-image",
Args: []string{
"dockerd",
"--host=unix:///run/docker/docker.sock",
"--group=$(DOCKER_GROUP_GID)",
},
Env: []corev1.EnvVar{
{
Name: "DOCKER_GROUP_GID",
Value: "121",
Name: "DOCKER_TLS_CERTDIR",
Value: "/certs",
},
},
VolumeMounts: []corev1.VolumeMount{
@@ -722,8 +719,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
MountPath: "/runner",
},
{
Name: "docker-sock",
MountPath: "/run/docker",
Name: "certs-client",
MountPath: "/certs/client",
},
{
Name: "work",
@@ -1082,10 +1079,6 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
Name: "work",
MountPath: "/runner/_work",
},
{
Name: "docker-sock",
MountPath: "/run/docker",
},
},
},
},
@@ -1104,12 +1097,9 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
},
},
{
Name: "docker-sock",
Name: "certs-client",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,
SizeLimit: resource.NewScaledQuantity(1, resource.Mega),
},
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
workGenericEphemeralVolume,
@@ -1119,14 +1109,15 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
Name: "work",
MountPath: "/runner/_work",
},
{
Name: "docker-sock",
MountPath: "/run/docker",
},
{
Name: "runner",
MountPath: "/runner",
},
{
Name: "certs-client",
MountPath: "/certs/client",
ReadOnly: true,
},
}
}),
},
@@ -1153,12 +1144,9 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
},
},
{
Name: "docker-sock",
Name: "certs-client",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,
SizeLimit: resource.NewScaledQuantity(1, resource.Mega),
},
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
workGenericEphemeralVolume,

View File

@@ -30,7 +30,6 @@ import (
"github.com/go-logr/logr"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
@@ -1002,35 +1001,6 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
)
}
// explicitly invoke `dockerd` to avoid automatic TLS / TCP binding
dockerdContainer.Args = append([]string{
"dockerd",
"--host=unix:///run/docker/docker.sock",
}, dockerdContainer.Args...)
// this must match a GID for the user in the runner image
// default matches GitHub Actions infra (and default runner images
// for actions-runner-controller) so typically should not need to be
// overridden
if ok, _ := envVarPresent("DOCKER_GROUP_GID", dockerdContainer.Env); !ok {
dockerdContainer.Env = append(dockerdContainer.Env,
corev1.EnvVar{
Name: "DOCKER_GROUP_GID",
Value: "121",
})
}
dockerdContainer.Args = append(dockerdContainer.Args, "--group=$(DOCKER_GROUP_GID)")
// ideally, we could mount the socket directly at `/var/run/docker.sock`
// to use the default, but that's not practical since it won't exist
// when the container starts, so can't use subPath on the volume mount
runnerContainer.Env = append(runnerContainer.Env,
corev1.EnvVar{
Name: "DOCKER_HOST",
Value: "unix:///run/docker/docker.sock",
},
)
if ok, _ := workVolumePresent(pod.Spec.Volumes); !ok {
pod.Spec.Volumes = append(pod.Spec.Volumes,
corev1.Volume{
@@ -1044,12 +1014,9 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
pod.Spec.Volumes = append(pod.Spec.Volumes,
corev1.Volume{
Name: "docker-sock",
Name: "certs-client",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,
SizeLimit: resource.NewScaledQuantity(1, resource.Mega),
},
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
)
@@ -1063,14 +1030,28 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
)
}
if ok, _ := volumeMountPresent("docker-sock", runnerContainer.VolumeMounts); !ok {
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts,
corev1.VolumeMount{
Name: "docker-sock",
MountPath: "/run/docker",
},
)
}
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts,
corev1.VolumeMount{
Name: "certs-client",
MountPath: "/certs/client",
ReadOnly: true,
},
)
runnerContainer.Env = append(runnerContainer.Env, []corev1.EnvVar{
{
Name: "DOCKER_HOST",
Value: "tcp://localhost:2376",
},
{
Name: "DOCKER_TLS_VERIFY",
Value: "1",
},
{
Name: "DOCKER_CERT_PATH",
Value: "/certs/client",
},
}...)
// Determine the volume mounts assigned to the docker sidecar. In case extra mounts are included in the RunnerSpec, append them to the standard
// set of mounts. See https://github.com/actions/actions-runner-controller/issues/435 for context.
@@ -1079,16 +1060,14 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
Name: runnerVolumeName,
MountPath: runnerVolumeMountPath,
},
{
Name: "certs-client",
MountPath: "/certs/client",
},
}
if p, _ := volumeMountPresent("docker-sock", dockerdContainer.VolumeMounts); !p {
dockerVolumeMounts = append(dockerVolumeMounts, corev1.VolumeMount{
Name: "docker-sock",
MountPath: "/run/docker",
})
}
if p, _ := workVolumeMountPresent(dockerdContainer.VolumeMounts); !p {
mountPresent, _ := workVolumeMountPresent(dockerdContainer.VolumeMounts)
if !mountPresent {
dockerVolumeMounts = append(dockerVolumeMounts, corev1.VolumeMount{
Name: "work",
MountPath: workDir,
@@ -1099,6 +1078,11 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
dockerdContainer.Image = defaultDockerImage
}
dockerdContainer.Env = append(dockerdContainer.Env, corev1.EnvVar{
Name: "DOCKER_TLS_CERTDIR",
Value: "/certs",
})
if dockerdContainer.SecurityContext == nil {
dockerdContainer.SecurityContext = &corev1.SecurityContext{
Privileged: &privileged,
@@ -1289,15 +1273,6 @@ func removeFinalizer(finalizers []string, finalizerName string) ([]string, bool)
return result, removed
}
func envVarPresent(name string, items []corev1.EnvVar) (bool, int) {
for index, item := range items {
if item.Name == name {
return true, index
}
}
return false, -1
}
func workVolumePresent(items []corev1.Volume) (bool, int) {
for index, item := range items {
if item.Name == "work" {
@@ -1308,16 +1283,12 @@ func workVolumePresent(items []corev1.Volume) (bool, int) {
}
func workVolumeMountPresent(items []corev1.VolumeMount) (bool, int) {
return volumeMountPresent("work", items)
}
func volumeMountPresent(name string, items []corev1.VolumeMount) (bool, int) {
for index, item := range items {
if item.Name == name {
if item.Name == "work" {
return true, index
}
}
return false, -1
return false, 0
}
func applyWorkVolumeClaimTemplateToPod(pod *corev1.Pod, workVolumeClaimTemplate *v1alpha1.WorkVolumeClaimTemplate, workDir string) error {

View File

@@ -1,84 +0,0 @@
# Improve ARC workflows for autoscaling runner sets
**Date**: 2023-03-17
**Status**: Done
## Context
In the [actions-runner-controller](https://github.com/actions/actions-runner-controller)
repository we essentially have two projects living side by side: the "legacy"
actions-runner-controller and the new one GitHub is supporting
(gha-runner-scale-set). To hasten progress we relied on existing workflows and
added some of our own (e.g.: end-to-end tests). We now got to a point where it's
sort of confusing what does what and why, not to mention the increased running
times of some those workflows and some GHA-related flaky tests getting in the
way of legacy ARC and viceversa. The three main areas we want to cover are: Go
code, Kubernetes manifests / Helm charts and E2E tests.
## Go code
At the moment we have three workflows that validate Go code:
- [golangci-lint](https://github.com/actions/actions-runner-controller/blob/34f3878/.github/workflows/golangci-lint.yaml):
this is a collection of linters that currently runs on all PRs and push to
master
- [Validate ARC](https://github.com/actions/actions-runner-controller/blob/01e9dd3/.github/workflows/validate-arc.yaml):
this is a bit of a catch-all workflow, other than Go tests this also validates
Kubernetes manifests, runs `go generate`, `go fmt` and `go vet`
- [Run CodeQL](https://github.com/actions/actions-runner-controller/blob/a095f0b66aad5fbc8aa8d7032f3299233e4c84d2/.github/workflows/run-codeql.yaml)
### Proposal
I think having one `Go` workflow that collects everything-Go would help a ton with
reliability and understandability of what's going on. This shouldn't be limited
to the GHA-supported mode as there are changes that even if made outside the GHA
code base could affect us (such as a dependency update).
This workflow should only run on changes to `*.go` files, `go.mod` and `go.sum`.
It should have these jobs, aiming to cover all existing functionality and
eliminate some duplication:
- `test`: run all Go tests in the project. We currently use the `-short` and
`-coverprofile` flags: while `-short` is used to skip [old ARC E2E
tests](https://github.com/actions/actions-runner-controller/blob/master/test/e2e/e2e_test.go#L85-L87),
`-coverprofile` is adding to the test time without really giving us any value
in return. We should also start using `actions/setup-go@v4` to take advantage
of caching (it would speed up our tests by a lot) or enable it on `v3` if we
have a strong reason not to upgrade. We should keep ignoring our E2E tests too
as those will be run elsewhere (either use `Short` there too or ignoring the
package like we currently do). As a dependency for tests this needs to run
`make manifests` first: we should fail there and then if there is a diff.
- `fmt`: we currently run `go fmt ./...` as part of `Validate ARC` but do
nothing with the results. We should fail in case of a diff. We don't need
caching for this job.
- `lint`: this corresponds to what's currently the `golanci-lint` workflow (this
also covers `go vet` which currently happens as part of `Validate ARC too`)
- `generate`: the current behaviour for this is actually quite risky, we
generate our code in `Validate ARC` workflow and use the results to run the
tests but we don't validate that up to date generate code is checked in. This
job should run `go generate` and fail on a diff.
- `vulncheck`: **EDIT: this is covered by CodeQL** the Go team is maintaining [`govulncheck`](https://go.dev/blog/vuln), a tool to recursively
analyzing all function calls in Go code and spot vulnerabilities on the call
stack.
## Kubernetes manifests / Helm charts
We have [recently separated](https://github.com/actions/actions-runner-controller/commit/bd9f32e3540663360cf47f04acad26e6010f772e)
Helm chart validation and we validate up-to-dateness of manifests as part of `Go
/ test`.
## End to end tests
These tests are giving us really good coverage and should be one of the main
actors when it comes to trusting our releases. Two improvements that could be
done here are:
- renaming the workflow to `GHA E2E`: since renaming our resources the `gha`
prefix has been used to identify things related to the mode GitHub supports
and these jobs strictly validate the GitHub mode _only_. Having a shorter name
allows for more readability of the various scenarios (e.g. `GHA E2E /
single-namespace-setup`).
- the test currently monitors and validates the number of pods spawning during
the workflow but not the outcome of the workflow. While not necessary to look
at pods specifics, we should at least guarantee that the workflow can
successfully conclude.

View File

@@ -1,5 +1,7 @@
# Autoscaling Runner Scale Sets mode
**⚠️ This mode is currently only available for a limited number of organizations.**
This new autoscaling mode brings numerous enhancements (described in the following sections) that will make your experience more reliable and secure.
## How it works
@@ -36,7 +38,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7
--namespace "${NAMESPACE}" \
--create-namespace \
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \
--version 0.4.0
--version 0.3.0
```
1. Generate a Personal Access Token (PAT) or create and install a GitHub App. See [Creating a personal access token](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token) and [Creating a GitHub App](https://docs.github.com/en/developers/apps/creating-a-github-app).
@@ -57,7 +59,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7
--create-namespace \
--set githubConfigUrl="${GITHUB_CONFIG_URL}" \
--set githubConfigSecret.github_token="${GITHUB_PAT}" \
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.4.0
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.3.0
```
```bash
@@ -75,7 +77,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7
--set githubConfigSecret.github_app_id="${GITHUB_APP_ID}" \
--set githubConfigSecret.github_app_installation_id="${GITHUB_APP_INSTALLATION_ID}" \
--set githubConfigSecret.github_app_private_key="${GITHUB_APP_PRIVATE_KEY}" \
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.4.0
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.3.0
```
1. Check your installation. If everything went well, you should see the following:
@@ -84,8 +86,8 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7
$ helm list -n "${NAMESPACE}"
NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed gha-runner-scale-set-controller-0.4.0 preview
arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed gha-runner-scale-set-0.4.0 0.4.0
arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed gha-runner-scale-set-controller-0.3.0 preview
arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed gha-runner-scale-set-0.3.0 0.3.0
```
```bash
@@ -140,7 +142,7 @@ Upgrading actions-runner-controller requires a few extra steps because CRDs will
```bash
helm pull oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \
--version 0.4.0 \
--version 0.3.0 \
--untar && \
kubectl replace -f <PATH>/gha-runner-scale-set-controller/crds/
```
@@ -149,36 +151,16 @@ Upgrading actions-runner-controller requires a few extra steps because CRDs will
## Troubleshooting
### I'm using the charts from the `master` branch and the controller is not working
The `master` branch is highly unstable! We offer no guarantees that the charts in the `master` branch will work at any given time. If you're using the charts from the `master` branch, you should expect to encounter issues. Please use the latest release instead.
### Controller pod is running but the runner set listener pod is not
You need to inspect the logs of the controller first and see if there are any errors. If there are no errors, and the runner set listener pod is still not running, you need to make sure that the **controller pod has access to the Kubernetes API server in your cluster!**
You'll see something similar to the following in the logs of the controller pod:
```log
kubectl logs <controller_pod_name> -c manager
17:35:28.661069 1 request.go:690] Waited for 1.032376652s due to client-side throttling, not priority and fairness, request: GET:https://10.0.0.1:443/apis/monitoring.coreos.com/v1alpha1?timeout=32s
2023-03-15T17:35:29Z INFO starting manager
```
If you have a proxy configured or you're using a sidecar proxy that's automatically injected (think [Istio](https://istio.io/)), you need to make sure it's configured appropriately to allow traffic from the controller container (manager) to the Kubernetes API server.
### Check the logs
You can check the logs of the controller pod using the following command:
```bash
# Controller logs
kubectl logs -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-runner-scale-set-controller
```
$ kubectl logs -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-runner-scale-set-controller
```bash
# Runner set listener logs
kubectl logs -n "${NAMESPACE}" -l actions.github.com/scale-set-namespace=arc-systems -l actions.github.com/scale-set-name=arc-runner-set
kubectl logs -n "${NAMESPACE}" -l auto-scaling-runner-set-namespace=arc-systems -l auto-scaling-runner-set-name=arc-runner-set
```
### Naming error: `Name must have up to characters`
@@ -199,73 +181,8 @@ Error: INSTALLATION FAILED: execution error at (gha-runner-scale-set/templates/a
Verify that the secret you provided is correct and that the `githubConfigUrl` you provided is accurate.
### Access to the path `/home/runner/_work/_tool` is denied error
You might see this error if you're using kubernetes mode with persistent volumes. This is because the runner container is running with a non-root user and is causing a permissions mismatch with the mounted volume.
To fix this, you can either:
1. Use a volume type that supports `securityContext.fsGroup` (`hostPath` volumes don't support it, `local` volumes do as well as other types). Update the `fsGroup` of your runner pod to match the GID of the runner. You can do that by updating the `gha-runner-scale-set` helm chart values to include the following:
```yaml
spec:
securityContext:
fsGroup: 123
containers:
- name: runner
image: ghcr.io/actions/actions-runner:<VERSION> # Replace <VERSION> with the version you want to use
command: ["/home/runner/run.sh"]
```
1. If updating the `securityContext` of your runner pod is not a viable solution, you can workaround the issue by using `initContainers` to change the mounted volume's ownership, as follows:
```yaml
template:
spec:
initContainers:
- name: kube-init
image: ghcr.io/actions/actions-runner:latest
command: ["sudo", "chown", "-R", "1001:123", "/home/runner/_work"]
volumeMounts:
- name: work
mountPath: /home/runner/_work
containers:
- name: runner
image: ghcr.io/actions/actions-runner:latest
command: ["/home/runner/run.sh"]
```
## Changelog
### v0.4.0
#### ⚠️ Warning
This release contains a major change related to the way permissions are
applied to the manager ([#2276](https://github.com/actions/actions-runner-controller/pull/2276) and [#2363](https://github.com/actions/actions-runner-controller/pull/2363)).
Please evaluate these changes carefully before upgrading.
#### Major changes
1. Surface EphemeralRunnerSet stats to AutoscalingRunnerSet [#2382](https://github.com/actions/actions-runner-controller/pull/2382)
1. Improved security posture by removing list/watch secrets permission from manager cluster role
[#2276](https://github.com/actions/actions-runner-controller/pull/2276)
1. Improved security posture by delaying role/rolebinding creation to gha-runner-scale-set during installation
[#2363](https://github.com/actions/actions-runner-controller/pull/2363)
1. Improved security posture by supporting watching a single namespace from the controller
[#2374](https://github.com/actions/actions-runner-controller/pull/2374)
1. Added labels to AutoscalingRunnerSet subresources to allow easier inspection [#2391](https://github.com/actions/actions-runner-controller/pull/2391)
1. Fixed bug preventing env variables from being specified
[#2450](https://github.com/actions/actions-runner-controller/pull/2450)
1. Enhance quickstart troubleshooting guides
[#2435](https://github.com/actions/actions-runner-controller/pull/2435)
1. Fixed ignore extra dind container when container mode type is "dind"
[#2418](https://github.com/actions/actions-runner-controller/pull/2418)
1. Added additional cleanup finalizers [#2433](https://github.com/actions/actions-runner-controller/pull/2433)
1. gha-runner-scale-set listener pod inherits the ImagePullPolicy from the manager pod [#2477](https://github.com/actions/actions-runner-controller/pull/2477)
1. Treat `.ghe.com` domain as hosted environment [#2480](https://github.com/actions/actions-runner-controller/pull/2480)
### v0.3.0
#### Major changes
@@ -290,3 +207,56 @@ Please evaluate these changes carefully before upgrading.
1. Fixed a bug that was preventing runner scale from being removed from the backend when they were deleted from the cluster [#2255](https://github.com/actions/actions-runner-controller/pull/2255) [#2223](https://github.com/actions/actions-runner-controller/pull/2223)
1. Fixed bugs with the helm chart definitions preventing certain values from being set [#2222](https://github.com/actions/actions-runner-controller/pull/2222)
1. Fixed a bug that prevented the configuration of a runner group for a runner scale set [#2216](https://github.com/actions/actions-runner-controller/pull/2216)
#### Log
- [1c7b7f4](https://github.com/actions/actions-runner-controller/commit/1c7b7f4) Bump arc-2 chart version and prepare 0.2.0 release [#2313](https://github.com/actions/actions-runner-controller/pull/2313)
- [73e22a1](https://github.com/actions/actions-runner-controller/commit/73e22a1) Disable metrics serving in proxy tests [#2307](https://github.com/actions/actions-runner-controller/pull/2307)
- [9b44f00](https://github.com/actions/actions-runner-controller/commit/9b44f00) Documentation corrections [#2116](https://github.com/actions/actions-runner-controller/pull/2116)
- [6b4250c](https://github.com/actions/actions-runner-controller/commit/6b4250c) Add support for proxy [#2286](https://github.com/actions/actions-runner-controller/pull/2286)
- [ced8822](https://github.com/actions/actions-runner-controller/commit/ced8822) Resolves the erroneous webhook scale down due to check runs [#2119](https://github.com/actions/actions-runner-controller/pull/2119)
- [44c06c2](https://github.com/actions/actions-runner-controller/commit/44c06c2) fix: case-insensitive webhook label matching [#2302](https://github.com/actions/actions-runner-controller/pull/2302)
- [4103fe3](https://github.com/actions/actions-runner-controller/commit/4103fe3) Use DOCKER_IMAGE_NAME instead of NAME to avoid conflict. [#2303](https://github.com/actions/actions-runner-controller/pull/2303)
- [a44fe04](https://github.com/actions/actions-runner-controller/commit/a44fe04) Fix manager crashloopback for ARC deployments without scaleset-related controllers [#2293](https://github.com/actions/actions-runner-controller/pull/2293)
- [274d0c8](https://github.com/actions/actions-runner-controller/commit/274d0c8) Added ability to configure log level from chart values [#2252](https://github.com/actions/actions-runner-controller/pull/2252)
- [256e08e](https://github.com/actions/actions-runner-controller/commit/256e08e) Ask runner to wait for docker daemon from DinD. [#2292](https://github.com/actions/actions-runner-controller/pull/2292)
- [f677fd5](https://github.com/actions/actions-runner-controller/commit/f677fd5) doc: Fix chart name for helm commands in docs [#2287](https://github.com/actions/actions-runner-controller/pull/2287)
- [d962714](https://github.com/actions/actions-runner-controller/commit/d962714) Fix helm chart when containerMode.type=dind. [#2291](https://github.com/actions/actions-runner-controller/pull/2291)
- [3886f28](https://github.com/actions/actions-runner-controller/commit/3886f28) Add EKS test environment Terraform templates [#2290](https://github.com/actions/actions-runner-controller/pull/2290)
- [dab9004](https://github.com/actions/actions-runner-controller/commit/dab9004) Added workflow to be triggered via rest api dispatch in e2e test [#2283](https://github.com/actions/actions-runner-controller/pull/2283)
- [dd8ec1a](https://github.com/actions/actions-runner-controller/commit/dd8ec1a) Add testserver package [#2281](https://github.com/actions/actions-runner-controller/pull/2281)
- [8e52a6d](https://github.com/actions/actions-runner-controller/commit/8e52a6d) EphemeralRunner: On cleanup, if pod is pending, delete from service [#2255](https://github.com/actions/actions-runner-controller/pull/2255)
- [9990243](https://github.com/actions/actions-runner-controller/commit/9990243) Early return if finalizer does not exist to make it more readable [#2262](https://github.com/actions/actions-runner-controller/pull/2262)
- [0891981](https://github.com/actions/actions-runner-controller/commit/0891981) Port ADRs from internal repo [#2267](https://github.com/actions/actions-runner-controller/pull/2267)
- [facae69](https://github.com/actions/actions-runner-controller/commit/facae69) Remove un-required permissions for the manager-role of the new `AutoScalingRunnerSet` [#2260](https://github.com/actions/actions-runner-controller/pull/2260)
- [8f62e35](https://github.com/actions/actions-runner-controller/commit/8f62e35) Add options to multi client [#2257](https://github.com/actions/actions-runner-controller/pull/2257)
- [55951c2](https://github.com/actions/actions-runner-controller/commit/55951c2) Add new workflow to automate runner updates [#2247](https://github.com/actions/actions-runner-controller/pull/2247)
- [c4297d2](https://github.com/actions/actions-runner-controller/commit/c4297d2) Avoid deleting scale set if annotation is not parsable or if it does not exist [#2239](https://github.com/actions/actions-runner-controller/pull/2239)
- [0774f06](https://github.com/actions/actions-runner-controller/commit/0774f06) ADR: automate runner updates [#2244](https://github.com/actions/actions-runner-controller/pull/2244)
- [92ab11b](https://github.com/actions/actions-runner-controller/commit/92ab11b) Use UUID v5 for client identifiers [#2241](https://github.com/actions/actions-runner-controller/pull/2241)
- [7414dc6](https://github.com/actions/actions-runner-controller/commit/7414dc6) Add Identifier to actions.Client [#2237](https://github.com/actions/actions-runner-controller/pull/2237)
- [34efb9d](https://github.com/actions/actions-runner-controller/commit/34efb9d) Add documentation to update ARC with prometheus CRDs needed by actions metrics server [#2209](https://github.com/actions/actions-runner-controller/pull/2209)
- [fbad561](https://github.com/actions/actions-runner-controller/commit/fbad561) Allow provide pre-defined kubernetes secret when helm-install AutoScalingRunnerSet [#2234](https://github.com/actions/actions-runner-controller/pull/2234)
- [a5cef7e](https://github.com/actions/actions-runner-controller/commit/a5cef7e) Resolve CI break due to bad merge. [#2236](https://github.com/actions/actions-runner-controller/pull/2236)
- [1f4fe46](https://github.com/actions/actions-runner-controller/commit/1f4fe46) Delete RunnerScaleSet on service when AutoScalingRunnerSet is deleted. [#2223](https://github.com/actions/actions-runner-controller/pull/2223)
- [067686c](https://github.com/actions/actions-runner-controller/commit/067686c) Fix typos and markdown structure in troubleshooting guide [#2148](https://github.com/actions/actions-runner-controller/pull/2148)
- [df12e00](https://github.com/actions/actions-runner-controller/commit/df12e00) Remove network requests from actions.NewClient [#2219](https://github.com/actions/actions-runner-controller/pull/2219)
- [cc26593](https://github.com/actions/actions-runner-controller/commit/cc26593) Skip CT when list-changed=false. [#2228](https://github.com/actions/actions-runner-controller/pull/2228)
- [835eac7](https://github.com/actions/actions-runner-controller/commit/835eac7) Fix helm charts when pass values file. [#2222](https://github.com/actions/actions-runner-controller/pull/2222)
- [01e9dd3](https://github.com/actions/actions-runner-controller/commit/01e9dd3) Update Validate ARC workflow to go 1.19 [#2220](https://github.com/actions/actions-runner-controller/pull/2220)
- [8038181](https://github.com/actions/actions-runner-controller/commit/8038181) Allow update runner group for AutoScalingRunnerSet [#2216](https://github.com/actions/actions-runner-controller/pull/2216)
- [219ba5b](https://github.com/actions/actions-runner-controller/commit/219ba5b) chore(deps): bump sigs.k8s.io/controller-runtime from 0.13.1 to 0.14.1 [#2132](https://github.com/actions/actions-runner-controller/pull/2132)
- [b09e3a2](https://github.com/actions/actions-runner-controller/commit/b09e3a2) Return error for non-existing runner group. [#2215](https://github.com/actions/actions-runner-controller/pull/2215)
- [7ea60e4](https://github.com/actions/actions-runner-controller/commit/7ea60e4) Fix intermittent image push failures to GHCR [#2214](https://github.com/actions/actions-runner-controller/pull/2214)
- [c8918f5](https://github.com/actions/actions-runner-controller/commit/c8918f5) Fix URL for authenticating using a GitHub app [#2206](https://github.com/actions/actions-runner-controller/pull/2206)
- [d57d17f](https://github.com/actions/actions-runner-controller/commit/d57d17f) Add support for custom CA in actions.Client [#2199](https://github.com/actions/actions-runner-controller/pull/2199)
- [6e69c75](https://github.com/actions/actions-runner-controller/commit/6e69c75) chore(deps): bump github.com/hashicorp/go-retryablehttp from 0.7.1 to 0.7.2 [#2203](https://github.com/actions/actions-runner-controller/pull/2203)
- [882bfab](https://github.com/actions/actions-runner-controller/commit/882bfab) Renaming autoScaling to autoscaling in tests matching the convention [#2201](https://github.com/actions/actions-runner-controller/pull/2201)
- [3327f62](https://github.com/actions/actions-runner-controller/commit/3327f62) Refactor actions.Client with options to help extensibility [#2193](https://github.com/actions/actions-runner-controller/pull/2193)
- [282f2dd](https://github.com/actions/actions-runner-controller/commit/282f2dd) chore(deps): bump github.com/onsi/gomega from 1.20.2 to 1.25.0 [#2169](https://github.com/actions/actions-runner-controller/pull/2169)
- [d67f808](https://github.com/actions/actions-runner-controller/commit/d67f808) Include nikola-jokic in CODEOWNERS file [#2184](https://github.com/actions/actions-runner-controller/pull/2184)
- [4932412](https://github.com/actions/actions-runner-controller/commit/4932412) Fix L0 test to make it more reliable. [#2178](https://github.com/actions/actions-runner-controller/pull/2178)
- [6da1cde](https://github.com/actions/actions-runner-controller/commit/6da1cde) Update runner version to 2.301.1 [#2182](https://github.com/actions/actions-runner-controller/pull/2182)
- [f9bae70](https://github.com/actions/actions-runner-controller/commit/f9bae70) Add distinct namespace best practice note [#2181](https://github.com/actions/actions-runner-controller/pull/2181)
- [05a3908](https://github.com/actions/actions-runner-controller/commit/05a3908) Add arc-2 quickstart guide [#2180](https://github.com/actions/actions-runner-controller/pull/2180)
- [606ed1b](https://github.com/actions/actions-runner-controller/commit/606ed1b) Add Repository information to Runner Status [#2093](https://github.com/actions/actions-runner-controller/pull/2093)

View File

@@ -3,7 +3,6 @@ package actions
import (
"fmt"
"net/url"
"os"
"strings"
)
@@ -35,7 +34,9 @@ func ParseGitHubConfigFromURL(in string) (*GitHubConfig, error) {
return nil, err
}
isHosted := isHostedGitHubURL(u)
isHosted := u.Host == "github.com" ||
u.Host == "www.github.com" ||
u.Host == "github.localhost"
configURL := &GitHubConfig{
ConfigURL: u,
@@ -75,35 +76,23 @@ func ParseGitHubConfigFromURL(in string) (*GitHubConfig, error) {
func (c *GitHubConfig) GitHubAPIURL(path string) *url.URL {
result := &url.URL{
Scheme: c.ConfigURL.Scheme,
Host: c.ConfigURL.Host, // default for Enterprise mode
Path: "/api/v3", // default for Enterprise mode
}
isHosted := isHostedGitHubURL(c.ConfigURL)
if isHosted {
switch c.ConfigURL.Host {
// Hosted
case "github.com", "github.localhost":
result.Host = fmt.Sprintf("api.%s", c.ConfigURL.Host)
result.Path = ""
// re-routing www.github.com to api.github.com
case "www.github.com":
result.Host = "api.github.com"
if strings.EqualFold("www.github.com", c.ConfigURL.Host) {
// re-routing www.github.com to api.github.com
result.Host = "api.github.com"
}
// Enterprise
default:
result.Host = c.ConfigURL.Host
result.Path = "/api/v3"
}
result.Path += path
return result
}
func isHostedGitHubURL(u *url.URL) bool {
_, forceGhes := os.LookupEnv("GITHUB_ACTIONS_FORCE_GHES")
if forceGhes {
return false
}
return strings.EqualFold(u.Host, "github.com") ||
strings.EqualFold(u.Host, "www.github.com") ||
strings.EqualFold(u.Host, "github.localhost") ||
strings.HasSuffix(u.Host, ".ghe.com")
}

View File

@@ -3,7 +3,6 @@ package actions_test
import (
"errors"
"net/url"
"os"
"strings"
"testing"
@@ -118,16 +117,6 @@ func TestGitHubConfig(t *testing.T) {
IsHosted: false,
},
},
{
configURL: "https://my-ghes.ghe.com/org/",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeOrganization,
Enterprise: "",
Organization: "org",
Repository: "",
IsHosted: true,
},
},
}
for _, test := range tests {
@@ -162,35 +151,9 @@ func TestGitHubConfig_GitHubAPIURL(t *testing.T) {
t.Run("when hosted", func(t *testing.T) {
config, err := actions.ParseGitHubConfigFromURL("https://github.com/org/repo")
require.NoError(t, err)
assert.True(t, config.IsHosted)
result := config.GitHubAPIURL("/some/path")
assert.Equal(t, "https://api.github.com/some/path", result.String())
})
t.Run("when hosted with ghe.com", func(t *testing.T) {
config, err := actions.ParseGitHubConfigFromURL("https://github.ghe.com/org/repo")
require.NoError(t, err)
assert.True(t, config.IsHosted)
result := config.GitHubAPIURL("/some/path")
assert.Equal(t, "https://api.github.ghe.com/some/path", result.String())
})
t.Run("when not hosted", func(t *testing.T) {
config, err := actions.ParseGitHubConfigFromURL("https://ghes.com/org/repo")
require.NoError(t, err)
assert.False(t, config.IsHosted)
result := config.GitHubAPIURL("/some/path")
assert.Equal(t, "https://ghes.com/api/v3/some/path", result.String())
})
t.Run("when not hosted with ghe.com", func(t *testing.T) {
os.Setenv("GITHUB_ACTIONS_FORCE_GHES", "1")
defer os.Unsetenv("GITHUB_ACTIONS_FORCE_GHES")
config, err := actions.ParseGitHubConfigFromURL("https://test.ghe.com/org/repo")
require.NoError(t, err)
assert.False(t, config.IsHosted)
result := config.GitHubAPIURL("/some/path")
assert.Equal(t, "https://test.ghe.com/api/v3/some/path", result.String())
})
t.Run("when not hosted", func(t *testing.T) {})
}

3
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/actions/actions-runner-controller
go 1.20
go 1.19
require (
github.com/bradleyfalzon/ghinstallation/v2 v2.1.0
@@ -22,6 +22,7 @@ require (
github.com/onsi/gomega v1.27.2
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.14.0
github.com/rentziass/eventually v0.2.0
github.com/stretchr/testify v1.8.2
github.com/teambition/rrule-go v1.8.2
go.uber.org/multierr v1.7.0

2
go.sum
View File

@@ -333,6 +333,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/rentziass/eventually v0.2.0 h1:zC6GB2nRRlARqfngNGUmdKG5Z6cOwyc0BwleF8kcZHY=
github.com/rentziass/eventually v0.2.0/go.mod h1:jiSDJFv0sra6DK66duTH5V4kCfqd9OvmFtflPOExNVI=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=

View File

@@ -170,13 +170,6 @@ func main() {
}
}
listenerPullPolicy := os.Getenv("CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY")
if ok := actionsgithubcom.SetListenerImagePullPolicy(listenerPullPolicy); ok {
log.Info("AutoscalingListener image pull policy changed", "ImagePullPolicy", listenerPullPolicy)
} else {
log.Info("Using default AutoscalingListener image pull policy", "ImagePullPolicy", actionsgithubcom.DefaultScaleSetListenerImagePullPolicy)
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
NewCache: newCache,

View File

@@ -136,27 +136,12 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in
// job_conclusion -> (neutral, success, skipped, cancelled, timed_out, action_required, failure)
githubWorkflowJobConclusionsTotal.With(extraLabel("job_conclusion", *e.WorkflowJob.Conclusion, labels)).Inc()
var (
exitCode = "na"
runTimeSeconds *float64
)
// We need to do our best not to fail the whole event processing
// when the user provided no GitHub API credentials.
// See https://github.com/actions/actions-runner-controller/issues/2424
if reader.GitHubClient != nil {
parseResult, err := reader.fetchAndParseWorkflowJobLogs(ctx, e)
if err != nil {
log.Error(err, "reading workflow job log")
return
}
exitCode = parseResult.ExitCode
s := parseResult.RunTime.Seconds()
runTimeSeconds = &s
log.WithValues(keysAndValues...).Info("reading workflow_job logs", "exit_code", exitCode)
parseResult, err := reader.fetchAndParseWorkflowJobLogs(ctx, e)
if err != nil {
log.Error(err, "reading workflow job log")
return
} else {
log.Info("reading workflow_job logs", keysAndValues...)
}
if *e.WorkflowJob.Conclusion == "failure" {
@@ -182,20 +167,18 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in
}
if *conclusion == "timed_out" {
failedStep = fmt.Sprint(i)
exitCode = "timed_out"
parseResult.ExitCode = "timed_out"
break
}
}
githubWorkflowJobFailuresTotal.With(
extraLabel("failed_step", failedStep,
extraLabel("exit_code", exitCode, labels),
extraLabel("exit_code", parseResult.ExitCode, labels),
),
).Inc()
}
if runTimeSeconds != nil {
githubWorkflowJobRunDurationSeconds.With(extraLabel("job_conclusion", *e.WorkflowJob.Conclusion, labels)).Observe(*runTimeSeconds)
}
githubWorkflowJobRunDurationSeconds.With(extraLabel("job_conclusion", *e.WorkflowJob.Conclusion, labels)).Observe(parseResult.RunTime.Seconds())
}
}

View File

@@ -139,12 +139,8 @@ RUN export SKIP_IPTABLES=1 \
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \
&& mkdir -p /home/runner/.docker/cli-plugins \
&& curl -fLo /home/runner/.docker/cli-plugins/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \
&& chmod +x /home/runner/.docker/cli-plugins/docker-compose \
&& ln -s /home/runner/.docker/cli-plugins/docker-compose /home/runner/bin/docker-compose \
&& which docker-compose \
&& docker compose version
&& curl -fLo /home/runner/bin/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \
&& chmod +x /home/runner/bin/docker-compose
ENTRYPOINT ["/bin/bash", "-c"]
CMD ["entrypoint-dind-rootless.sh"]

View File

@@ -116,12 +116,9 @@ RUN export SKIP_IPTABLES=1 \
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \
&& mkdir -p /home/runner/.docker/cli-plugins \
&& curl -fLo /home/runner/.docker/cli-plugins/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \
&& chmod +x /home/runner/.docker/cli-plugins/docker-compose \
&& ln -s /home/runner/.docker/cli-plugins/docker-compose /home/runner/bin/docker-compose \
&& which docker-compose \
&& docker compose version
&& mkdir -p /home/runner/bin \
&& curl -fLo /home/runner/bin/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-Linux-${ARCH} \
&& chmod +x /home/runner/bin/docker-compose
ENTRYPOINT ["/bin/bash", "-c"]
CMD ["entrypoint-dind-rootless.sh"]

View File

@@ -106,12 +106,8 @@ RUN set -vx; \
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \
&& mkdir -p /usr/libexec/docker/cli-plugins \
&& curl -fLo /usr/libexec/docker/cli-plugins/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \
&& chmod +x /usr/libexec/docker/cli-plugins/docker-compose \
&& ln -s /usr/libexec/docker/cli-plugins/docker-compose /usr/bin/docker-compose \
&& which docker-compose \
&& docker compose version
&& curl -fLo /usr/bin/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \
&& chmod +x /usr/bin/docker-compose
# We place the scripts in `/usr/bin` so that users who extend this image can
# override them with scripts of the same name placed in `/usr/local/bin`.

View File

@@ -82,12 +82,8 @@ RUN set -vx; \
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \
&& mkdir -p /usr/libexec/docker/cli-plugins \
&& curl -fLo /usr/libexec/docker/cli-plugins/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \
&& chmod +x /usr/libexec/docker/cli-plugins/docker-compose \
&& ln -s /usr/libexec/docker/cli-plugins/docker-compose /usr/bin/docker-compose \
&& which docker-compose \
&& docker compose version
&& curl -fLo /usr/bin/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \
&& chmod +x /usr/bin/docker-compose
# We place the scripts in `/usr/bin` so that users who extend this image can
# override them with scripts of the same name placed in `/usr/local/bin`.

View File

@@ -103,12 +103,8 @@ RUN set -vx; \
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \
&& mkdir -p /usr/libexec/docker/cli-plugins \
&& curl -fLo /usr/libexec/docker/cli-plugins/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \
&& chmod +x /usr/libexec/docker/cli-plugins/docker-compose \
&& ln -s /usr/libexec/docker/cli-plugins/docker-compose /usr/bin/docker-compose \
&& which docker-compose \
&& docker compose version
&& curl -fLo /usr/bin/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \
&& chmod +x /usr/bin/docker-compose
# We place the scripts in `/usr/bin` so that users who extend this image can
# override them with scripts of the same name placed in `/usr/local/bin`.

View File

@@ -80,12 +80,8 @@ RUN set -vx; \
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \
&& mkdir -p /usr/libexec/docker/cli-plugins \
&& curl -fLo /usr/libexec/docker/cli-plugins/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \
&& chmod +x /usr/libexec/docker/cli-plugins/docker-compose \
&& ln -s /usr/libexec/docker/cli-plugins/docker-compose /usr/bin/docker-compose \
&& which docker-compose \
&& docker compose version
&& curl -fLo /usr/bin/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \
&& chmod +x /usr/bin/docker-compose
# We place the scripts in `/usr/bin` so that users who extend this image can
# override them with scripts of the same name placed in `/usr/local/bin`.

View File

@@ -101,7 +101,6 @@ func TestE2E(t *testing.T) {
label string
controller, controllerVer string
chart, chartVer string
opt []InstallARCOption
}{
{
label: "stable",
@@ -118,12 +117,6 @@ func TestE2E(t *testing.T) {
controllerVer: vars.controllerImageTag,
chart: "",
chartVer: "",
opt: []InstallARCOption{
func(ia *InstallARCConfig) {
ia.GithubWebhookServerEnvName = "FOO"
ia.GithubWebhookServerEnvValue = "foo"
},
},
},
}
@@ -193,7 +186,7 @@ func TestE2E(t *testing.T) {
for i, v := range testedVersions {
t.Run("install actions-runner-controller "+v.label, func(t *testing.T) {
t.Logf("Using controller %s:%s and chart %s:%s", v.controller, v.controllerVer, v.chart, v.chartVer)
env.installActionsRunnerController(t, v.controller, v.controllerVer, testID, v.chart, v.chartVer, v.opt...)
env.installActionsRunnerController(t, v.controller, v.controllerVer, testID, v.chart, v.chartVer)
})
if t.Failed() {
@@ -307,7 +300,7 @@ func TestE2E(t *testing.T) {
for i, v := range testedVersions {
t.Run("install actions-runner-controller "+v.label, func(t *testing.T) {
t.Logf("Using controller %s:%s and chart %s:%s", v.controller, v.controllerVer, v.chart, v.chartVer)
env.installActionsRunnerController(t, v.controller, v.controllerVer, testID, v.chart, v.chartVer, v.opt...)
env.installActionsRunnerController(t, v.controller, v.controllerVer, testID, v.chart, v.chartVer)
})
if t.Failed() {
@@ -420,10 +413,8 @@ type env struct {
runnerNamespace string
logFormat string
remoteKubeconfig string
admissionWebhooksTimeout string
imagePullSecretName string
imagePullPolicy string
watchNamespace string
vars vars
VerifyTimeout time.Duration
@@ -556,7 +547,6 @@ func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env {
e.runnerNamespace = testing.Getenv(t, "TEST_RUNNER_NAMESPACE", "default")
e.logFormat = testing.Getenv(t, "ARC_E2E_LOG_FORMAT", "")
e.remoteKubeconfig = testing.Getenv(t, "ARC_E2E_REMOTE_KUBECONFIG", "")
e.admissionWebhooksTimeout = testing.Getenv(t, "ARC_E2E_ADMISSION_WEBHOOKS_TIMEOUT", "")
e.imagePullSecretName = testing.Getenv(t, "ARC_E2E_IMAGE_PULL_SECRET_NAME", "")
e.vars = vars
@@ -566,8 +556,6 @@ func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env {
e.imagePullPolicy = "IfNotPresent"
}
e.watchNamespace = testing.Getenv(t, "TEST_WATCH_NAMESPACE", "")
if e.remoteKubeconfig == "" {
e.Kind = testing.StartKind(t, k8sMinorVer, testing.Preload(images...))
e.Env.Kubeconfig = e.Kind.Kubeconfig()
@@ -718,20 +706,9 @@ func (e *env) installCertManager(t *testing.T) {
e.KubectlWaitUntilDeployAvailable(t, "cert-manager", waitCfg.WithTimeout(60*time.Second))
}
type InstallARCConfig struct {
GithubWebhookServerEnvName, GithubWebhookServerEnvValue string
}
type InstallARCOption func(*InstallARCConfig)
func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID, chart, chartVer string, opts ...InstallARCOption) {
func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID, chart, chartVer string) {
t.Helper()
var c InstallARCConfig
for _, opt := range opts {
opt(&c)
}
e.createControllerNamespaceAndServiceAccount(t)
scriptEnv := []string{
@@ -747,10 +724,8 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID, ch
"TEST_ID=" + testID,
"NAME=" + repo,
"VERSION=" + tag,
"ADMISSION_WEBHOOKS_TIMEOUT=" + e.admissionWebhooksTimeout,
"IMAGE_PULL_SECRET=" + e.imagePullSecretName,
"IMAGE_PULL_POLICY=" + e.imagePullPolicy,
"WATCH_NAMESPACE=" + e.watchNamespace,
}
if e.useApp {
@@ -773,11 +748,6 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID, ch
)
}
varEnv = append(varEnv,
"GITHUB_WEBHOOK_SERVER_ENV_NAME="+c.GithubWebhookServerEnvName,
"GITHUB_WEBHOOK_SERVER_ENV_VALUE="+c.GithubWebhookServerEnvValue,
)
scriptEnv = append(scriptEnv, varEnv...)
scriptEnv = append(scriptEnv, e.vars.commonScriptEnv...)
@@ -1081,17 +1051,6 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
},
},
)
// Ensure both the alias and the full command work after
// https://github.com/actions/actions-runner-controller/pull/2326
steps = append(steps,
testing.Step{
Run: "docker-compose version",
},
testing.Step{
Run: "docker compose version",
},
)
}
steps = append(steps,
@@ -1107,6 +1066,7 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
if !kubernetesContainerMode {
setupBuildXActionWith := &testing.With{
BuildkitdFlags: "--debug",
Endpoint: "mycontext",
// As the consequence of setting `install: false`, it doesn't install buildx as an alias to `docker build`
// so we need to use `docker buildx build` in the next step
Install: false,
@@ -1132,24 +1092,16 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
setupBuildXActionWith.Driver = "docker"
dockerfile = "Dockerfile.nocache"
}
useCustomDockerContext := os.Getenv("ARC_E2E_USE_CUSTOM_DOCKER_CONTEXT") != ""
if useCustomDockerContext {
setupBuildXActionWith.Endpoint = "mycontext"
steps = append(steps, testing.Step{
steps = append(steps,
testing.Step{
// https://github.com/docker/buildx/issues/413#issuecomment-710660155
// To prevent setup-buildx-action from failing with:
// error: could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`
Run: "docker context create mycontext",
},
testing.Step{
Run: "docker context use mycontext",
},
)
}
steps = append(steps,
testing.Step{
Run: "docker context use mycontext",
},
testing.Step{
Name: "Set up Docker Buildx",
Uses: "docker/setup-buildx-action@v1",

View File

@@ -5,17 +5,17 @@ import (
"time"
)
const letterBytes = "abcdefghijklmnopqrstuvwxyz"
func init() {
rand.Seed(time.Now().UnixNano())
}
var (
random = rand.New(rand.NewSource(time.Now().UnixNano()))
)
const letterBytes = "abcdefghijklmnopqrstuvwxyz"
// Copied from https://stackoverflow.com/a/31832326 with thanks
func RandStringBytesRmndr(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[random.Int63()%int64(len(letterBytes))]
b[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]
}
return string(b)
}