mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 19:50:30 +00:00
Compare commits
80 Commits
gha-runner
...
v0.27.4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f798cddca1 | ||
|
|
367ee46122 | ||
|
|
f4a318fca6 | ||
|
|
4ee21cb24b | ||
|
|
102c9e1afa | ||
|
|
73e676f951 | ||
|
|
41ebb43c65 | ||
|
|
aa50b62c01 | ||
|
|
942f773fef | ||
|
|
21722a5de8 | ||
|
|
a2d4b95b79 | ||
|
|
04fb9f4fa1 | ||
|
|
8304b80955 | ||
|
|
9bd4025e9c | ||
|
|
94c089c407 | ||
|
|
9859bbc7f2 | ||
|
|
c1e2c4ef9d | ||
|
|
2ee15dbca3 | ||
|
|
a4cf626410 | ||
|
|
58f4b6ff2d | ||
|
|
22fbd10bd3 | ||
|
|
52b97139b6 | ||
|
|
3e0bc3f7be | ||
|
|
ba1ac0990b | ||
|
|
76fe43e8e0 | ||
|
|
8869ad28bb | ||
|
|
b86af190f7 | ||
|
|
1a491cbfe5 | ||
|
|
087f20fd5d | ||
|
|
a880114e57 | ||
|
|
e80bc21fa5 | ||
|
|
56754094ea | ||
|
|
8fa4520376 | ||
|
|
a804bf8b00 | ||
|
|
5dea6db412 | ||
|
|
2a0b770a63 | ||
|
|
a7ef871248 | ||
|
|
e45e4c53f1 | ||
|
|
a608abd124 | ||
|
|
02d9add322 | ||
|
|
f5ac134787 | ||
|
|
42abad5def | ||
|
|
514b7da742 | ||
|
|
c8e3bb5ec3 | ||
|
|
878c9b8b49 | ||
|
|
4536707af6 | ||
|
|
13802c5a6d | ||
|
|
362fa5d52e | ||
|
|
65184f1ed8 | ||
|
|
c23e31123c | ||
|
|
56e1c62ac2 | ||
|
|
64cedff2b4 | ||
|
|
37f93b794e | ||
|
|
dc833e57a0 | ||
|
|
5228aded87 | ||
|
|
f49d08e4bc | ||
|
|
064039afc0 | ||
|
|
e5d8d65396 | ||
|
|
c465ace8fb | ||
|
|
34f3878829 | ||
|
|
44c3931d8e | ||
|
|
08acb1b831 | ||
|
|
40811ebe0e | ||
|
|
3417c5a3a8 | ||
|
|
172faa883c | ||
|
|
9e6c7d019f | ||
|
|
9fbcafa703 | ||
|
|
2bf83d0d7f | ||
|
|
19d30dea5f | ||
|
|
6c66c1633f | ||
|
|
e55708588b | ||
|
|
261d4371b5 | ||
|
|
bd9f32e354 | ||
|
|
babbfc77d5 | ||
|
|
322df79617 | ||
|
|
1c7c6639ed | ||
|
|
bcaac39a2e | ||
|
|
af625dd1cb | ||
|
|
44969659df | ||
|
|
a5f98dea75 |
45
.github/actions/e2e-arc-test/action.yaml
vendored
45
.github/actions/e2e-arc-test/action.yaml
vendored
@@ -1,45 +0,0 @@
|
|||||||
name: 'E2E ARC Test Action'
|
|
||||||
description: 'Includes common arc installation, setup and test file run'
|
|
||||||
|
|
||||||
inputs:
|
|
||||||
github-token:
|
|
||||||
description: 'JWT generated with Github App inputs'
|
|
||||||
required: true
|
|
||||||
config-url:
|
|
||||||
description: "URL of the repo, org or enterprise where the runner scale sets will be registered"
|
|
||||||
required: true
|
|
||||||
docker-image-repo:
|
|
||||||
description: "Local docker image repo for testing"
|
|
||||||
required: true
|
|
||||||
docker-image-tag:
|
|
||||||
description: "Tag of ARC Docker image for testing"
|
|
||||||
required: true
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: "composite"
|
|
||||||
steps:
|
|
||||||
- name: Install ARC
|
|
||||||
run: helm install arc --namespace "arc-systems" --create-namespace --set image.tag=${{ inputs.docker-image-tag }} --set image.repository=${{ inputs.docker-image-repo }} ./charts/gha-runner-scale-set-controller
|
|
||||||
shell: bash
|
|
||||||
- name: Get datetime
|
|
||||||
# We are using this value further in the runner installation to avoid runner name collision that are a risk with hard coded values.
|
|
||||||
# A datetime including the 3 nanoseconds are a good option for this and also adds to readability and runner sorting if needed.
|
|
||||||
run: echo "DATE_TIME=$(date +'%Y-%m-%d-%H-%M-%S-%3N')" >> $GITHUB_ENV
|
|
||||||
shell: bash
|
|
||||||
- name: Install runners
|
|
||||||
run: |
|
|
||||||
helm install "arc-runner-${{ env.DATE_TIME }}" \
|
|
||||||
--namespace "arc-runners" \
|
|
||||||
--create-namespace \
|
|
||||||
--set githubConfigUrl="${{ inputs.config-url }}" \
|
|
||||||
--set githubConfigSecret.github_token="${{ inputs.github-token }}" \
|
|
||||||
./charts/gha-runner-scale-set \
|
|
||||||
--debug
|
|
||||||
kubectl get pods -A
|
|
||||||
shell: bash
|
|
||||||
- name: Test ARC scales pods up and down
|
|
||||||
run: |
|
|
||||||
export GITHUB_TOKEN="${{ inputs.github-token }}"
|
|
||||||
export DATE_TIME="${{ env.DATE_TIME }}"
|
|
||||||
go test ./test_e2e_arc -v
|
|
||||||
shell: bash
|
|
||||||
160
.github/actions/execute-assert-arc-e2e/action.yaml
vendored
Normal file
160
.github/actions/execute-assert-arc-e2e/action.yaml
vendored
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
name: 'Execute and Assert ARC E2E Test Action'
|
||||||
|
description: 'Queue E2E test workflow and assert workflow run result to be succeed'
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
auth-token:
|
||||||
|
description: 'GitHub access token to queue workflow run'
|
||||||
|
required: true
|
||||||
|
repo-owner:
|
||||||
|
description: "The repository owner name that has the test workflow file, ex: actions"
|
||||||
|
required: true
|
||||||
|
repo-name:
|
||||||
|
description: "The repository name that has the test workflow file, ex: test"
|
||||||
|
required: true
|
||||||
|
workflow-file:
|
||||||
|
description: 'The file name of the workflow yaml, ex: test.yml'
|
||||||
|
required: true
|
||||||
|
arc-name:
|
||||||
|
description: 'The name of the configured gha-runner-scale-set'
|
||||||
|
required: true
|
||||||
|
arc-namespace:
|
||||||
|
description: 'The namespace of the configured gha-runner-scale-set'
|
||||||
|
required: true
|
||||||
|
arc-controller-namespace:
|
||||||
|
description: 'The namespace of the configured gha-runner-scale-set-controller'
|
||||||
|
required: true
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Queue test workflow
|
||||||
|
shell: bash
|
||||||
|
id: queue_workflow
|
||||||
|
run: |
|
||||||
|
queue_time=`date +%FT%TZ`
|
||||||
|
echo "queue_time=$queue_time" >> $GITHUB_OUTPUT
|
||||||
|
curl -X POST https://api.github.com/repos/${{inputs.repo-owner}}/${{inputs.repo-name}}/actions/workflows/${{inputs.workflow-file}}/dispatches \
|
||||||
|
-H "Accept: application/vnd.github.v3+json" \
|
||||||
|
-H "Authorization: token ${{inputs.auth-token}}" \
|
||||||
|
-d '{"ref": "main", "inputs": { "arc_name": "${{inputs.arc-name}}" } }'
|
||||||
|
|
||||||
|
- name: Fetch workflow run & job ids
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
id: query_workflow
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
// Try to find the workflow run triggered by the previous step using the workflow_dispatch event.
|
||||||
|
// - Find recently create workflow runs in the test repository
|
||||||
|
// - For each workflow run, list its workflow job and see if the job's labels contain `inputs.arc-name`
|
||||||
|
// - Since the inputs.arc-name should be unique per e2e workflow run, once we find the job with the label, we find the workflow that we just triggered.
|
||||||
|
function sleep(ms) {
|
||||||
|
return new Promise(resolve => setTimeout(resolve, ms))
|
||||||
|
}
|
||||||
|
const owner = '${{inputs.repo-owner}}'
|
||||||
|
const repo = '${{inputs.repo-name}}'
|
||||||
|
const workflow_id = '${{inputs.workflow-file}}'
|
||||||
|
let workflow_run_id = 0
|
||||||
|
let workflow_job_id = 0
|
||||||
|
let workflow_run_html_url = ""
|
||||||
|
let count = 0
|
||||||
|
while (count++<12) {
|
||||||
|
await sleep(10 * 1000);
|
||||||
|
let listRunResponse = await github.rest.actions.listWorkflowRuns({
|
||||||
|
owner: owner,
|
||||||
|
repo: repo,
|
||||||
|
workflow_id: workflow_id,
|
||||||
|
created: '>${{steps.queue_workflow.outputs.queue_time}}'
|
||||||
|
})
|
||||||
|
if (listRunResponse.data.total_count > 0) {
|
||||||
|
console.log(`Found some new workflow runs for ${workflow_id}`)
|
||||||
|
for (let i = 0; i<listRunResponse.data.total_count; i++) {
|
||||||
|
let workflowRun = listRunResponse.data.workflow_runs[i]
|
||||||
|
console.log(`Check if workflow run ${workflowRun.id} is triggered by us.`)
|
||||||
|
let listJobResponse = await github.rest.actions.listJobsForWorkflowRun({
|
||||||
|
owner: owner,
|
||||||
|
repo: repo,
|
||||||
|
run_id: workflowRun.id
|
||||||
|
})
|
||||||
|
console.log(`Workflow run ${workflowRun.id} has ${listJobResponse.data.total_count} jobs.`)
|
||||||
|
if (listJobResponse.data.total_count > 0) {
|
||||||
|
for (let j = 0; j<listJobResponse.data.total_count; j++) {
|
||||||
|
let workflowJob = listJobResponse.data.jobs[j]
|
||||||
|
console.log(`Check if workflow job ${workflowJob.id} is triggered by us.`)
|
||||||
|
console.log(JSON.stringify(workflowJob.labels));
|
||||||
|
if (workflowJob.labels.includes('${{inputs.arc-name}}')) {
|
||||||
|
console.log(`Workflow job ${workflowJob.id} (Run id: ${workflowJob.run_id}) is triggered by us.`)
|
||||||
|
workflow_run_id = workflowJob.run_id
|
||||||
|
workflow_job_id = workflowJob.id
|
||||||
|
workflow_run_html_url = workflowRun.html_url
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (workflow_job_id > 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (workflow_job_id > 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (workflow_job_id == 0) {
|
||||||
|
core.setFailed(`Can't find workflow run and workflow job triggered to 'runs-on ${{inputs.arc-name}}'`)
|
||||||
|
} else {
|
||||||
|
core.setOutput('workflow_run', workflow_run_id);
|
||||||
|
core.setOutput('workflow_job', workflow_job_id);
|
||||||
|
core.setOutput('workflow_run_url', workflow_run_html_url);
|
||||||
|
}
|
||||||
|
|
||||||
|
- name: Generate summary about the triggered workflow run
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cat <<-EOF > $GITHUB_STEP_SUMMARY
|
||||||
|
| **Triggered workflow run** |
|
||||||
|
|:--------------------------:|
|
||||||
|
| ${{steps.query_workflow.outputs.workflow_run_url}} |
|
||||||
|
EOF
|
||||||
|
|
||||||
|
- name: Wait for workflow to finish successfully
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
// Wait 5 minutes and make sure the workflow run we triggered completed with result 'success'
|
||||||
|
function sleep(ms) {
|
||||||
|
return new Promise(resolve => setTimeout(resolve, ms))
|
||||||
|
}
|
||||||
|
const owner = '${{inputs.repo-owner}}'
|
||||||
|
const repo = '${{inputs.repo-name}}'
|
||||||
|
const workflow_run_id = ${{steps.query_workflow.outputs.workflow_run}}
|
||||||
|
const workflow_job_id = ${{steps.query_workflow.outputs.workflow_job}}
|
||||||
|
let count = 0
|
||||||
|
while (count++<10) {
|
||||||
|
await sleep(30 * 1000);
|
||||||
|
let getRunResponse = await github.rest.actions.getWorkflowRun({
|
||||||
|
owner: owner,
|
||||||
|
repo: repo,
|
||||||
|
run_id: workflow_run_id
|
||||||
|
})
|
||||||
|
console.log(`${getRunResponse.data.html_url}: ${getRunResponse.data.status} (${getRunResponse.data.conclusion})`);
|
||||||
|
if (getRunResponse.data.status == 'completed') {
|
||||||
|
if ( getRunResponse.data.conclusion == 'success') {
|
||||||
|
console.log(`Workflow run finished properly.`)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
core.setFailed(`The triggered workflow run finish with result ${getRunResponse.data.conclusion}`)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
core.setFailed(`The triggered workflow run didn't finish properly using ${{inputs.arc-name}}`)
|
||||||
|
|
||||||
|
- name: Gather logs and cleanup
|
||||||
|
shell: bash
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
|
||||||
|
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-name}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
|
||||||
|
kubectl logs deployment/arc-gha-runner-scale-set-controller -n ${{inputs.arc-controller-namespace}}
|
||||||
63
.github/actions/setup-arc-e2e/action.yaml
vendored
Normal file
63
.github/actions/setup-arc-e2e/action.yaml
vendored
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
name: 'Setup ARC E2E Test Action'
|
||||||
|
description: 'Build controller image, create kind cluster, load the image, and exchange ARC configure token.'
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
app-id:
|
||||||
|
description: 'GitHub App Id for exchange access token'
|
||||||
|
required: true
|
||||||
|
app-pk:
|
||||||
|
description: "GitHub App private key for exchange access token"
|
||||||
|
required: true
|
||||||
|
image-name:
|
||||||
|
description: "Local docker image name for building"
|
||||||
|
required: true
|
||||||
|
image-tag:
|
||||||
|
description: "Tag of ARC Docker image for building"
|
||||||
|
required: true
|
||||||
|
target-org:
|
||||||
|
description: "The test organization for ARC e2e test"
|
||||||
|
required: true
|
||||||
|
|
||||||
|
outputs:
|
||||||
|
token:
|
||||||
|
description: 'Token to use for configure ARC'
|
||||||
|
value: ${{steps.config-token.outputs.token}}
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
with:
|
||||||
|
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||||
|
# BuildKit v0.11 which has a bug causing intermittent
|
||||||
|
# failures pushing images to GHCR
|
||||||
|
version: v0.9.1
|
||||||
|
driver-opts: image=moby/buildkit:v0.10.6
|
||||||
|
|
||||||
|
- name: Build controller image
|
||||||
|
uses: docker/build-push-action@v3
|
||||||
|
with:
|
||||||
|
file: Dockerfile
|
||||||
|
platforms: linux/amd64
|
||||||
|
load: true
|
||||||
|
build-args: |
|
||||||
|
DOCKER_IMAGE_NAME=${{inputs.image-name}}
|
||||||
|
VERSION=${{inputs.image-tag}}
|
||||||
|
tags: |
|
||||||
|
${{inputs.image-name}}:${{inputs.image-tag}}
|
||||||
|
no-cache: true
|
||||||
|
|
||||||
|
- name: Create minikube cluster and load image
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
minikube start
|
||||||
|
minikube image load ${{inputs.image-name}}:${{inputs.image-tag}}
|
||||||
|
|
||||||
|
- name: Get configure token
|
||||||
|
id: config-token
|
||||||
|
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||||
|
with:
|
||||||
|
application_id: ${{ inputs.app-id }}
|
||||||
|
application_private_key: ${{ inputs.app-pk }}
|
||||||
|
organization: ${{ inputs.target-org}}
|
||||||
43
.github/renovate.json5
vendored
43
.github/renovate.json5
vendored
@@ -1,43 +0,0 @@
|
|||||||
{
|
|
||||||
"extends": ["config:base"],
|
|
||||||
"labels": ["dependencies"],
|
|
||||||
"packageRules": [
|
|
||||||
{
|
|
||||||
// automatically merge an update of runner
|
|
||||||
"matchPackageNames": ["actions/runner"],
|
|
||||||
"extractVersion": "^v(?<version>.*)$",
|
|
||||||
"automerge": true
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"regexManagers": [
|
|
||||||
{
|
|
||||||
// use https://github.com/actions/runner/releases
|
|
||||||
"fileMatch": [
|
|
||||||
".github/workflows/runners.yaml"
|
|
||||||
],
|
|
||||||
"matchStrings": ["RUNNER_VERSION: +(?<currentValue>.*?)\\n"],
|
|
||||||
"depNameTemplate": "actions/runner",
|
|
||||||
"datasourceTemplate": "github-releases"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"fileMatch": [
|
|
||||||
"runner/Makefile",
|
|
||||||
"Makefile"
|
|
||||||
],
|
|
||||||
"matchStrings": ["RUNNER_VERSION \\?= +(?<currentValue>.*?)\\n"],
|
|
||||||
"depNameTemplate": "actions/runner",
|
|
||||||
"datasourceTemplate": "github-releases"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"fileMatch": [
|
|
||||||
"runner/actions-runner.ubuntu-20.04.dockerfile",
|
|
||||||
"runner/actions-runner.ubuntu-22.04.dockerfile",
|
|
||||||
"runner/actions-runner-dind.ubuntu-20.04.dockerfile",
|
|
||||||
"runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile"
|
|
||||||
],
|
|
||||||
"matchStrings": ["RUNNER_VERSION=+(?<currentValue>.*?)\\n"],
|
|
||||||
"depNameTemplate": "actions/runner",
|
|
||||||
"datasourceTemplate": "github-releases"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
name: ARC Reusable Workflow
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
date_time:
|
|
||||||
description: 'Datetime for runner name uniqueness, format: %Y-%m-%d-%H-%M-%S-%3N, example: 2023-02-14-13-00-16-791'
|
|
||||||
required: true
|
|
||||||
jobs:
|
|
||||||
arc-runner-job:
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
job: [1, 2, 3]
|
|
||||||
runs-on: arc-runner-${{ inputs.date_time }}
|
|
||||||
steps:
|
|
||||||
- run: echo "Hello World!" >> $GITHUB_STEP_SUMMARY
|
|
||||||
720
.github/workflows/e2e-test-linux-vm.yaml
vendored
720
.github/workflows/e2e-test-linux-vm.yaml
vendored
@@ -5,47 +5,701 @@ on:
|
|||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
pull_request:
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
env:
|
env:
|
||||||
TARGET_ORG: actions-runner-controller
|
TARGET_ORG: actions-runner-controller
|
||||||
CLUSTER_NAME: e2e-test
|
TARGET_REPO: arc_e2e_test_dummy
|
||||||
RUNNER_VERSION: 2.302.1
|
IMAGE_NAME: "arc-test-image"
|
||||||
IMAGE_REPO: "test/test-image"
|
IMAGE_VERSION: "0.4.0"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
setup-steps:
|
default-setup:
|
||||||
runs-on: [ubuntu-latest]
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 20
|
||||||
|
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||||
|
env:
|
||||||
|
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Add env variables
|
|
||||||
run: |
|
|
||||||
TAG=$(echo "0.0.$GITHUB_SHA")
|
|
||||||
echo "TAG=$TAG" >> $GITHUB_ENV
|
|
||||||
echo "IMAGE=$IMAGE_REPO:$TAG" >> $GITHUB_ENV
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
with:
|
with:
|
||||||
version: latest
|
ref: ${{github.head_ref}}
|
||||||
- name: Docker Build Test Image
|
|
||||||
run: |
|
- uses: ./.github/actions/setup-arc-e2e
|
||||||
DOCKER_CLI_EXPERIMENTAL=enabled DOCKER_BUILDKIT=1 docker buildx build --build-arg RUNNER_VERSION=$RUNNER_VERSION --build-arg TAG=$TAG -t $IMAGE . --load
|
id: setup
|
||||||
- name: Create Kind cluster
|
|
||||||
run: |
|
|
||||||
PATH=$(go env GOPATH)/bin:$PATH
|
|
||||||
kind create cluster --name $CLUSTER_NAME
|
|
||||||
- name: Load Image to Kind Cluster
|
|
||||||
run: kind load docker-image $IMAGE --name $CLUSTER_NAME
|
|
||||||
- name: Get Token
|
|
||||||
id: get_workflow_token
|
|
||||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
|
||||||
with:
|
with:
|
||||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||||
organization: ${{ env.TARGET_ORG }}
|
image-name: ${{env.IMAGE_NAME}}
|
||||||
- uses: ./.github/actions/e2e-arc-test
|
image-tag: ${{env.IMAGE_VERSION}}
|
||||||
|
target-org: ${{env.TARGET_ORG}}
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set-controller
|
||||||
|
id: install_arc_controller
|
||||||
|
run: |
|
||||||
|
helm install arc \
|
||||||
|
--namespace "arc-systems" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||||
|
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||||
|
./charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set
|
||||||
|
id: install_arc
|
||||||
|
run: |
|
||||||
|
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||||
|
helm install "$ARC_NAME" \
|
||||||
|
--namespace "arc-runners" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||||
|
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||||
|
./charts/gha-runner-scale-set \
|
||||||
|
--debug
|
||||||
|
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
|
||||||
|
- name: Test ARC E2E
|
||||||
|
uses: ./.github/actions/execute-assert-arc-e2e
|
||||||
|
timeout-minutes: 10
|
||||||
with:
|
with:
|
||||||
github-token: ${{ steps.get_workflow_token.outputs.token }}
|
auth-token: ${{ steps.setup.outputs.token }}
|
||||||
config-url: "https://github.com/actions-runner-controller/arc_e2e_test_dummy"
|
repo-owner: ${{ env.TARGET_ORG }}
|
||||||
docker-image-repo: $IMAGE_REPO
|
repo-name: ${{env.TARGET_REPO}}
|
||||||
docker-image-tag: $TAG
|
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||||
|
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||||
|
arc-namespace: "arc-runners"
|
||||||
|
arc-controller-namespace: "arc-systems"
|
||||||
|
|
||||||
|
single-namespace-setup:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 20
|
||||||
|
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||||
|
env:
|
||||||
|
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{github.head_ref}}
|
||||||
|
|
||||||
|
- uses: ./.github/actions/setup-arc-e2e
|
||||||
|
id: setup
|
||||||
|
with:
|
||||||
|
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||||
|
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||||
|
image-name: ${{env.IMAGE_NAME}}
|
||||||
|
image-tag: ${{env.IMAGE_VERSION}}
|
||||||
|
target-org: ${{env.TARGET_ORG}}
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set-controller
|
||||||
|
id: install_arc_controller
|
||||||
|
run: |
|
||||||
|
kubectl create namespace arc-runners
|
||||||
|
helm install arc \
|
||||||
|
--namespace "arc-systems" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||||
|
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||||
|
--set flags.watchSingleNamespace=arc-runners \
|
||||||
|
./charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set
|
||||||
|
id: install_arc
|
||||||
|
run: |
|
||||||
|
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||||
|
helm install "$ARC_NAME" \
|
||||||
|
--namespace "arc-runners" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||||
|
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||||
|
./charts/gha-runner-scale-set \
|
||||||
|
--debug
|
||||||
|
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
|
||||||
|
- name: Test ARC E2E
|
||||||
|
uses: ./.github/actions/execute-assert-arc-e2e
|
||||||
|
timeout-minutes: 10
|
||||||
|
with:
|
||||||
|
auth-token: ${{ steps.setup.outputs.token }}
|
||||||
|
repo-owner: ${{ env.TARGET_ORG }}
|
||||||
|
repo-name: ${{env.TARGET_REPO}}
|
||||||
|
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||||
|
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||||
|
arc-namespace: "arc-runners"
|
||||||
|
arc-controller-namespace: "arc-systems"
|
||||||
|
|
||||||
|
dind-mode-setup:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 20
|
||||||
|
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||||
|
env:
|
||||||
|
WORKFLOW_FILE: arc-test-dind-workflow.yaml
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{github.head_ref}}
|
||||||
|
|
||||||
|
- uses: ./.github/actions/setup-arc-e2e
|
||||||
|
id: setup
|
||||||
|
with:
|
||||||
|
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||||
|
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||||
|
image-name: ${{env.IMAGE_NAME}}
|
||||||
|
image-tag: ${{env.IMAGE_VERSION}}
|
||||||
|
target-org: ${{env.TARGET_ORG}}
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set-controller
|
||||||
|
id: install_arc_controller
|
||||||
|
run: |
|
||||||
|
helm install arc \
|
||||||
|
--namespace "arc-systems" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||||
|
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||||
|
./charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set
|
||||||
|
id: install_arc
|
||||||
|
run: |
|
||||||
|
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||||
|
helm install "$ARC_NAME" \
|
||||||
|
--namespace "arc-runners" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||||
|
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||||
|
--set containerMode.type="dind" \
|
||||||
|
./charts/gha-runner-scale-set \
|
||||||
|
--debug
|
||||||
|
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
|
||||||
|
- name: Test ARC E2E
|
||||||
|
uses: ./.github/actions/execute-assert-arc-e2e
|
||||||
|
timeout-minutes: 10
|
||||||
|
with:
|
||||||
|
auth-token: ${{ steps.setup.outputs.token }}
|
||||||
|
repo-owner: ${{ env.TARGET_ORG }}
|
||||||
|
repo-name: ${{env.TARGET_REPO}}
|
||||||
|
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||||
|
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||||
|
arc-namespace: "arc-runners"
|
||||||
|
arc-controller-namespace: "arc-systems"
|
||||||
|
|
||||||
|
kubernetes-mode-setup:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 20
|
||||||
|
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||||
|
env:
|
||||||
|
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{github.head_ref}}
|
||||||
|
|
||||||
|
- uses: ./.github/actions/setup-arc-e2e
|
||||||
|
id: setup
|
||||||
|
with:
|
||||||
|
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||||
|
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||||
|
image-name: ${{env.IMAGE_NAME}}
|
||||||
|
image-tag: ${{env.IMAGE_VERSION}}
|
||||||
|
target-org: ${{env.TARGET_ORG}}
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set-controller
|
||||||
|
id: install_arc_controller
|
||||||
|
run: |
|
||||||
|
echo "Install openebs/dynamic-localpv-provisioner"
|
||||||
|
helm repo add openebs https://openebs.github.io/charts
|
||||||
|
helm repo update
|
||||||
|
helm install openebs openebs/openebs -n openebs --create-namespace
|
||||||
|
|
||||||
|
helm install arc \
|
||||||
|
--namespace "arc-systems" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||||
|
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||||
|
./charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n openebs -l name=openebs-localpv-provisioner
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set
|
||||||
|
id: install_arc
|
||||||
|
run: |
|
||||||
|
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||||
|
helm install "$ARC_NAME" \
|
||||||
|
--namespace "arc-runners" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||||
|
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||||
|
--set containerMode.type="kubernetes" \
|
||||||
|
--set containerMode.kubernetesModeWorkVolumeClaim.accessModes={"ReadWriteOnce"} \
|
||||||
|
--set containerMode.kubernetesModeWorkVolumeClaim.storageClassName="openebs-hostpath" \
|
||||||
|
--set containerMode.kubernetesModeWorkVolumeClaim.resources.requests.storage="1Gi" \
|
||||||
|
./charts/gha-runner-scale-set \
|
||||||
|
--debug
|
||||||
|
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
|
||||||
|
- name: Test ARC E2E
|
||||||
|
uses: ./.github/actions/execute-assert-arc-e2e
|
||||||
|
timeout-minutes: 10
|
||||||
|
with:
|
||||||
|
auth-token: ${{ steps.setup.outputs.token }}
|
||||||
|
repo-owner: ${{ env.TARGET_ORG }}
|
||||||
|
repo-name: ${{env.TARGET_REPO}}
|
||||||
|
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||||
|
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||||
|
arc-namespace: "arc-runners"
|
||||||
|
arc-controller-namespace: "arc-systems"
|
||||||
|
|
||||||
|
auth-proxy-setup:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 20
|
||||||
|
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||||
|
env:
|
||||||
|
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{github.head_ref}}
|
||||||
|
|
||||||
|
- uses: ./.github/actions/setup-arc-e2e
|
||||||
|
id: setup
|
||||||
|
with:
|
||||||
|
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||||
|
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||||
|
image-name: ${{env.IMAGE_NAME}}
|
||||||
|
image-tag: ${{env.IMAGE_VERSION}}
|
||||||
|
target-org: ${{env.TARGET_ORG}}
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set-controller
|
||||||
|
id: install_arc_controller
|
||||||
|
run: |
|
||||||
|
helm install arc \
|
||||||
|
--namespace "arc-systems" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||||
|
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||||
|
./charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set
|
||||||
|
id: install_arc
|
||||||
|
run: |
|
||||||
|
docker run -d \
|
||||||
|
--name squid \
|
||||||
|
--publish 3128:3128 \
|
||||||
|
huangtingluo/squid-proxy:latest
|
||||||
|
kubectl create namespace arc-runners
|
||||||
|
kubectl create secret generic proxy-auth \
|
||||||
|
--namespace=arc-runners \
|
||||||
|
--from-literal=username=github \
|
||||||
|
--from-literal=password='actions'
|
||||||
|
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||||
|
helm install "$ARC_NAME" \
|
||||||
|
--namespace "arc-runners" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||||
|
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||||
|
--set proxy.https.url="http://host.minikube.internal:3128" \
|
||||||
|
--set proxy.https.credentialSecretRef="proxy-auth" \
|
||||||
|
--set "proxy.noProxy[0]=10.96.0.1:443" \
|
||||||
|
./charts/gha-runner-scale-set \
|
||||||
|
--debug
|
||||||
|
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
|
||||||
|
- name: Test ARC E2E
|
||||||
|
uses: ./.github/actions/execute-assert-arc-e2e
|
||||||
|
timeout-minutes: 10
|
||||||
|
with:
|
||||||
|
auth-token: ${{ steps.setup.outputs.token }}
|
||||||
|
repo-owner: ${{ env.TARGET_ORG }}
|
||||||
|
repo-name: ${{env.TARGET_REPO}}
|
||||||
|
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||||
|
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||||
|
arc-namespace: "arc-runners"
|
||||||
|
arc-controller-namespace: "arc-systems"
|
||||||
|
|
||||||
|
anonymous-proxy-setup:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 20
|
||||||
|
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||||
|
env:
|
||||||
|
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{github.head_ref}}
|
||||||
|
|
||||||
|
- uses: ./.github/actions/setup-arc-e2e
|
||||||
|
id: setup
|
||||||
|
with:
|
||||||
|
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||||
|
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||||
|
image-name: ${{env.IMAGE_NAME}}
|
||||||
|
image-tag: ${{env.IMAGE_VERSION}}
|
||||||
|
target-org: ${{env.TARGET_ORG}}
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set-controller
|
||||||
|
id: install_arc_controller
|
||||||
|
run: |
|
||||||
|
helm install arc \
|
||||||
|
--namespace "arc-systems" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||||
|
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||||
|
./charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set
|
||||||
|
id: install_arc
|
||||||
|
run: |
|
||||||
|
docker run -d \
|
||||||
|
--name squid \
|
||||||
|
--publish 3128:3128 \
|
||||||
|
ubuntu/squid:latest
|
||||||
|
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||||
|
helm install "$ARC_NAME" \
|
||||||
|
--namespace "arc-runners" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||||
|
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||||
|
--set proxy.https.url="http://host.minikube.internal:3128" \
|
||||||
|
--set "proxy.noProxy[0]=10.96.0.1:443" \
|
||||||
|
./charts/gha-runner-scale-set \
|
||||||
|
--debug
|
||||||
|
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
|
||||||
|
- name: Test ARC E2E
|
||||||
|
uses: ./.github/actions/execute-assert-arc-e2e
|
||||||
|
timeout-minutes: 10
|
||||||
|
with:
|
||||||
|
auth-token: ${{ steps.setup.outputs.token }}
|
||||||
|
repo-owner: ${{ env.TARGET_ORG }}
|
||||||
|
repo-name: ${{env.TARGET_REPO}}
|
||||||
|
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||||
|
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||||
|
arc-namespace: "arc-runners"
|
||||||
|
arc-controller-namespace: "arc-systems"
|
||||||
|
|
||||||
|
self-signed-ca-setup:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 20
|
||||||
|
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||||
|
env:
|
||||||
|
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{github.head_ref}}
|
||||||
|
|
||||||
|
- uses: ./.github/actions/setup-arc-e2e
|
||||||
|
id: setup
|
||||||
|
with:
|
||||||
|
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||||
|
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||||
|
image-name: ${{env.IMAGE_NAME}}
|
||||||
|
image-tag: ${{env.IMAGE_VERSION}}
|
||||||
|
target-org: ${{env.TARGET_ORG}}
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set-controller
|
||||||
|
id: install_arc_controller
|
||||||
|
run: |
|
||||||
|
helm install arc \
|
||||||
|
--namespace "arc-systems" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||||
|
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||||
|
./charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set
|
||||||
|
id: install_arc
|
||||||
|
run: |
|
||||||
|
docker run -d \
|
||||||
|
--rm \
|
||||||
|
--name mitmproxy \
|
||||||
|
--publish 8080:8080 \
|
||||||
|
-v ${{ github.workspace }}/mitmproxy:/home/mitmproxy/.mitmproxy \
|
||||||
|
mitmproxy/mitmproxy:latest \
|
||||||
|
mitmdump
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
if [ -f "${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem" ]; then
|
||||||
|
echo "CA cert generated"
|
||||||
|
cat ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for mitmproxy generate its CA cert"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
sudo cp ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
|
||||||
|
sudo chown runner ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
|
||||||
|
kubectl create namespace arc-runners
|
||||||
|
kubectl -n arc-runners create configmap ca-cert --from-file="${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt"
|
||||||
|
kubectl -n arc-runners get configmap ca-cert -o yaml
|
||||||
|
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||||
|
helm install "$ARC_NAME" \
|
||||||
|
--namespace "arc-runners" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||||
|
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||||
|
--set proxy.https.url="http://host.minikube.internal:8080" \
|
||||||
|
--set "proxy.noProxy[0]=10.96.0.1:443" \
|
||||||
|
--set "githubServerTLS.certificateFrom.configMapKeyRef.name=ca-cert" \
|
||||||
|
--set "githubServerTLS.certificateFrom.configMapKeyRef.key=mitmproxy-ca-cert.crt" \
|
||||||
|
--set "githubServerTLS.runnerMountPath=/usr/local/share/ca-certificates/" \
|
||||||
|
./charts/gha-runner-scale-set \
|
||||||
|
--debug
|
||||||
|
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
|
||||||
|
- name: Test ARC E2E
|
||||||
|
uses: ./.github/actions/execute-assert-arc-e2e
|
||||||
|
timeout-minutes: 10
|
||||||
|
with:
|
||||||
|
auth-token: ${{ steps.setup.outputs.token }}
|
||||||
|
repo-owner: ${{ env.TARGET_ORG }}
|
||||||
|
repo-name: ${{env.TARGET_REPO}}
|
||||||
|
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||||
|
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||||
|
arc-namespace: "arc-runners"
|
||||||
|
arc-controller-namespace: "arc-systems"
|
||||||
|
|||||||
80
.github/workflows/go.yaml
vendored
Normal file
80
.github/workflows/go.yaml
vendored
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
name: Go
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
paths:
|
||||||
|
- '.github/workflows/go.yaml'
|
||||||
|
- '**.go'
|
||||||
|
- 'go.mod'
|
||||||
|
- 'go.sum'
|
||||||
|
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- '.github/workflows/go.yaml'
|
||||||
|
- '**.go'
|
||||||
|
- 'go.mod'
|
||||||
|
- 'go.sum'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
fmt:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
cache: false
|
||||||
|
- name: fmt
|
||||||
|
run: go fmt ./...
|
||||||
|
- name: Check diff
|
||||||
|
run: git diff --exit-code
|
||||||
|
|
||||||
|
lint:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
cache: false
|
||||||
|
- name: golangci-lint
|
||||||
|
uses: golangci/golangci-lint-action@v3
|
||||||
|
with:
|
||||||
|
only-new-issues: true
|
||||||
|
version: v1.51.1
|
||||||
|
|
||||||
|
generate:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
cache: false
|
||||||
|
- name: Generate
|
||||||
|
run: make generate
|
||||||
|
- name: Check diff
|
||||||
|
run: git diff --exit-code
|
||||||
|
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
- run: make manifests
|
||||||
|
- name: Check diff
|
||||||
|
run: git diff --exit-code
|
||||||
|
- name: Install kubebuilder
|
||||||
|
run: |
|
||||||
|
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz
|
||||||
|
tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz
|
||||||
|
sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder
|
||||||
|
- name: Run go tests
|
||||||
|
run: |
|
||||||
|
go test -short `go list ./... | grep -v ./test_e2e_arc`
|
||||||
23
.github/workflows/golangci-lint.yaml
vendored
23
.github/workflows/golangci-lint.yaml
vendored
@@ -1,23 +0,0 @@
|
|||||||
name: golangci-lint
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: read
|
|
||||||
jobs:
|
|
||||||
golangci:
|
|
||||||
name: lint
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: 1.19
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- name: golangci-lint
|
|
||||||
uses: golangci/golangci-lint-action@v3
|
|
||||||
with:
|
|
||||||
only-new-issues: true
|
|
||||||
version: v1.49.0
|
|
||||||
4
.github/workflows/publish-arc.yaml
vendored
4
.github/workflows/publish-arc.yaml
vendored
@@ -29,6 +29,10 @@ jobs:
|
|||||||
release-controller:
|
release-controller:
|
||||||
name: Release
|
name: Release
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
# gha-runner-scale-set has its own release workflow.
|
||||||
|
# We don't want to publish a new actions-runner-controller image
|
||||||
|
# we release gha-runner-scale-set.
|
||||||
|
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|||||||
85
.github/workflows/publish-canary.yaml
vendored
85
.github/workflows/publish-canary.yaml
vendored
@@ -8,35 +8,47 @@ on:
|
|||||||
- master
|
- master
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- '**.md'
|
- '**.md'
|
||||||
|
- '.github/actions/**'
|
||||||
- '.github/ISSUE_TEMPLATE/**'
|
- '.github/ISSUE_TEMPLATE/**'
|
||||||
- '.github/workflows/validate-chart.yaml'
|
- '.github/workflows/e2e-test-dispatch-workflow.yaml'
|
||||||
- '.github/workflows/publish-chart.yaml'
|
- '.github/workflows/e2e-test-linux-vm.yaml'
|
||||||
- '.github/workflows/publish-arc.yaml'
|
- '.github/workflows/publish-arc.yaml'
|
||||||
- '.github/workflows/runners.yaml'
|
- '.github/workflows/publish-chart.yaml'
|
||||||
- '.github/workflows/validate-entrypoint.yaml'
|
- '.github/workflows/publish-runner-scale-set.yaml'
|
||||||
- '.github/renovate.*'
|
- '.github/workflows/release-runners.yaml'
|
||||||
|
- '.github/workflows/run-codeql.yaml'
|
||||||
|
- '.github/workflows/run-first-interaction.yaml'
|
||||||
|
- '.github/workflows/run-stale.yaml'
|
||||||
|
- '.github/workflows/update-runners.yaml'
|
||||||
|
- '.github/workflows/validate-arc.yaml'
|
||||||
|
- '.github/workflows/validate-chart.yaml'
|
||||||
|
- '.github/workflows/validate-gha-chart.yaml'
|
||||||
|
- '.github/workflows/validate-runners.yaml'
|
||||||
|
- '.github/dependabot.yml'
|
||||||
|
- '.github/RELEASE_NOTE_TEMPLATE.md'
|
||||||
- 'runner/**'
|
- 'runner/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
- 'PROJECT'
|
- 'PROJECT'
|
||||||
- 'LICENSE'
|
- 'LICENSE'
|
||||||
- 'Makefile'
|
- 'Makefile'
|
||||||
|
|
||||||
env:
|
|
||||||
# Safeguard to prevent pushing images to registeries after build
|
|
||||||
PUSH_TO_REGISTRIES: true
|
|
||||||
TARGET_ORG: actions-runner-controller
|
|
||||||
TARGET_REPO: actions-runner-controller
|
|
||||||
|
|
||||||
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
|
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
env:
|
||||||
|
# Safeguard to prevent pushing images to registeries after build
|
||||||
|
PUSH_TO_REGISTRIES: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
canary-build:
|
legacy-canary-build:
|
||||||
name: Build and Publish Canary Image
|
name: Build and Publish Legacy Canary Image
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
TARGET_ORG: actions-runner-controller
|
||||||
|
TARGET_REPO: actions-runner-controller
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -68,3 +80,50 @@ jobs:
|
|||||||
echo "" >> $GITHUB_STEP_SUMMARY
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml)" >> $GITHUB_STEP_SUMMARY
|
echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml)" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
canary-build:
|
||||||
|
name: Build and Publish gha-runner-scale-set-controller Canary Image
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Login to GitHub Container Registry
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
# Normalization is needed because upper case characters are not allowed in the repository name
|
||||||
|
# and the short sha is needed for image tagging
|
||||||
|
- name: Resolve parameters
|
||||||
|
id: resolve_parameters
|
||||||
|
run: |
|
||||||
|
echo "INFO: Resolving short sha"
|
||||||
|
echo "short_sha=$(git rev-parse --short ${{ github.ref }})" >> $GITHUB_OUTPUT
|
||||||
|
echo "INFO: Normalizing repository name (lowercase)"
|
||||||
|
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
|
||||||
|
# Unstable builds - run at your own risk
|
||||||
|
- name: Build and Push
|
||||||
|
uses: docker/build-push-action@v3
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
file: ./Dockerfile
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
build-args: VERSION=canary-"${{ github.ref }}"
|
||||||
|
push: ${{ env.PUSH_TO_REGISTRIES }}
|
||||||
|
tags: |
|
||||||
|
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary
|
||||||
|
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary-${{ steps.resolve_parameters.outputs.short_sha }}
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
32
.github/workflows/publish-chart.yaml
vendored
32
.github/workflows/publish-chart.yaml
vendored
@@ -14,13 +14,19 @@ on:
|
|||||||
- '!charts/gha-runner-scale-set/**'
|
- '!charts/gha-runner-scale-set/**'
|
||||||
- '!**.md'
|
- '!**.md'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
force:
|
||||||
|
description: 'Force publish even if the chart version is not bumped'
|
||||||
|
type: boolean
|
||||||
|
required: true
|
||||||
|
default: false
|
||||||
|
|
||||||
env:
|
env:
|
||||||
KUBE_SCORE_VERSION: 1.10.0
|
KUBE_SCORE_VERSION: 1.10.0
|
||||||
HELM_VERSION: v3.8.0
|
HELM_VERSION: v3.8.0
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint-chart:
|
lint-chart:
|
||||||
@@ -45,20 +51,12 @@ jobs:
|
|||||||
chmod 755 kube-score
|
chmod 755 kube-score
|
||||||
|
|
||||||
- name: Kube-score generated manifests
|
- name: Kube-score generated manifests
|
||||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
|
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
|
||||||
--ignore-test pod-networkpolicy
|
|
||||||
--ignore-test deployment-has-poddisruptionbudget
|
|
||||||
--ignore-test deployment-has-host-podantiaffinity
|
|
||||||
--ignore-test container-security-context
|
|
||||||
--ignore-test pod-probes
|
|
||||||
--ignore-test container-image-tag
|
|
||||||
--enable-optional-test container-security-context-privileged
|
|
||||||
--enable-optional-test container-security-context-readonlyrootfilesystem
|
|
||||||
|
|
||||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.7'
|
python-version: '3.11'
|
||||||
|
|
||||||
- name: Set up chart-testing
|
- name: Set up chart-testing
|
||||||
uses: helm/chart-testing-action@v2.3.1
|
uses: helm/chart-testing-action@v2.3.1
|
||||||
@@ -98,9 +96,12 @@ jobs:
|
|||||||
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
|
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
|
||||||
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
|
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
|
||||||
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
|
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
|
||||||
|
|
||||||
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
|
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
|
||||||
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
|
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
|
||||||
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION ]]; then
|
|
||||||
|
# Always publish if force is true
|
||||||
|
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
|
||||||
echo "publish=true" >> $GITHUB_OUTPUT
|
echo "publish=true" >> $GITHUB_OUTPUT
|
||||||
else
|
else
|
||||||
echo "publish=false" >> $GITHUB_OUTPUT
|
echo "publish=false" >> $GITHUB_OUTPUT
|
||||||
@@ -170,13 +171,14 @@ jobs:
|
|||||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||||
--index-path ${{ github.workspace }}/index.yaml \
|
--index-path ${{ github.workspace }}/index.yaml \
|
||||||
|
--push \
|
||||||
--pages-branch 'gh-pages' \
|
--pages-branch 'gh-pages' \
|
||||||
--pages-index-path 'index.yaml'
|
--pages-index-path 'index.yaml'
|
||||||
|
|
||||||
# Chart Release was never intended to publish to a different repo
|
# Chart Release was never intended to publish to a different repo
|
||||||
# this workaround is intended to move the index.yaml to the target repo
|
# this workaround is intended to move the index.yaml to the target repo
|
||||||
# where the github pages are hosted
|
# where the github pages are hosted
|
||||||
- name: Checkout pages repository
|
- name: Checkout target repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||||
@@ -188,7 +190,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
|
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
|
||||||
|
|
||||||
- name: Commit and push
|
- name: Commit and push to target repository
|
||||||
run: |
|
run: |
|
||||||
git config user.name "$GITHUB_ACTOR"
|
git config user.name "$GITHUB_ACTOR"
|
||||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||||
@@ -202,4 +204,4 @@ jobs:
|
|||||||
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
|
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "" >> $GITHUB_STEP_SUMMARY
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/main/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
|
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|||||||
@@ -46,6 +46,13 @@ jobs:
|
|||||||
# If inputs.ref is empty, it'll resolve to the default branch
|
# If inputs.ref is empty, it'll resolve to the default branch
|
||||||
ref: ${{ inputs.ref }}
|
ref: ${{ inputs.ref }}
|
||||||
|
|
||||||
|
- name: Check chart versions
|
||||||
|
# Binary version and chart versions need to match.
|
||||||
|
# In case of an upgrade, the controller will try to clean up
|
||||||
|
# resources with older versions that should have been cleaned up
|
||||||
|
# during the upgrade process
|
||||||
|
run: ./hack/check-gh-chart-versions.sh ${{ inputs.release_tag_name }}
|
||||||
|
|
||||||
- name: Resolve parameters
|
- name: Resolve parameters
|
||||||
id: resolve_parameters
|
id: resolve_parameters
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
2
.github/workflows/release-runners.yaml
vendored
2
.github/workflows/release-runners.yaml
vendored
@@ -17,7 +17,7 @@ env:
|
|||||||
PUSH_TO_REGISTRIES: true
|
PUSH_TO_REGISTRIES: true
|
||||||
TARGET_ORG: actions-runner-controller
|
TARGET_ORG: actions-runner-controller
|
||||||
TARGET_WORKFLOW: release-runners.yaml
|
TARGET_WORKFLOW: release-runners.yaml
|
||||||
DOCKER_VERSION: 20.10.21
|
DOCKER_VERSION: 20.10.23
|
||||||
RUNNER_CONTAINER_HOOKS_VERSION: 0.2.0
|
RUNNER_CONTAINER_HOOKS_VERSION: 0.2.0
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|||||||
3
.github/workflows/update-runners.yaml
vendored
3
.github/workflows/update-runners.yaml
vendored
@@ -77,6 +77,7 @@ jobs:
|
|||||||
permissions:
|
permissions:
|
||||||
pull-requests: write
|
pull-requests: write
|
||||||
contents: write
|
contents: write
|
||||||
|
actions: write
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ github.token }}
|
GH_TOKEN: ${{ github.token }}
|
||||||
CURRENT_VERSION: ${{ needs.check_versions.outputs.current_version }}
|
CURRENT_VERSION: ${{ needs.check_versions.outputs.current_version }}
|
||||||
@@ -93,7 +94,7 @@ jobs:
|
|||||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
|
||||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
|
||||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
|
||||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" .github/workflows/e2e_test_linux_vm.yaml
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" .github/workflows/e2e-test-linux-vm.yaml
|
||||||
|
|
||||||
- name: Commit changes
|
- name: Commit changes
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
60
.github/workflows/validate-arc.yaml
vendored
60
.github/workflows/validate-arc.yaml
vendored
@@ -1,60 +0,0 @@
|
|||||||
name: Validate ARC
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
paths-ignore:
|
|
||||||
- '**.md'
|
|
||||||
- '.github/ISSUE_TEMPLATE/**'
|
|
||||||
- '.github/workflows/publish-canary.yaml'
|
|
||||||
- '.github/workflows/validate-chart.yaml'
|
|
||||||
- '.github/workflows/publish-chart.yaml'
|
|
||||||
- '.github/workflows/runners.yaml'
|
|
||||||
- '.github/workflows/publish-arc.yaml'
|
|
||||||
- '.github/workflows/validate-entrypoint.yaml'
|
|
||||||
- '.github/renovate.*'
|
|
||||||
- 'runner/**'
|
|
||||||
- '.gitignore'
|
|
||||||
- 'PROJECT'
|
|
||||||
- 'LICENSE'
|
|
||||||
- 'Makefile'
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test-controller:
|
|
||||||
name: Test ARC
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set-up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.19'
|
|
||||||
check-latest: false
|
|
||||||
|
|
||||||
- uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
|
|
||||||
- name: Install kubebuilder
|
|
||||||
run: |
|
|
||||||
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz
|
|
||||||
tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz
|
|
||||||
sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: |
|
|
||||||
make test
|
|
||||||
|
|
||||||
- name: Verify manifests are up-to-date
|
|
||||||
run: |
|
|
||||||
make manifests
|
|
||||||
git diff --exit-code
|
|
||||||
4
.github/workflows/validate-chart.yaml
vendored
4
.github/workflows/validate-chart.yaml
vendored
@@ -9,12 +9,16 @@ on:
|
|||||||
- '.github/workflows/validate-chart.yaml'
|
- '.github/workflows/validate-chart.yaml'
|
||||||
- '!charts/actions-runner-controller/docs/**'
|
- '!charts/actions-runner-controller/docs/**'
|
||||||
- '!**.md'
|
- '!**.md'
|
||||||
|
- '!charts/gha-runner-scale-set-controller/**'
|
||||||
|
- '!charts/gha-runner-scale-set/**'
|
||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- 'charts/**'
|
- 'charts/**'
|
||||||
- '.github/workflows/validate-chart.yaml'
|
- '.github/workflows/validate-chart.yaml'
|
||||||
- '!charts/actions-runner-controller/docs/**'
|
- '!charts/actions-runner-controller/docs/**'
|
||||||
- '!**.md'
|
- '!**.md'
|
||||||
|
- '!charts/gha-runner-scale-set-controller/**'
|
||||||
|
- '!charts/gha-runner-scale-set/**'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
env:
|
env:
|
||||||
KUBE_SCORE_VERSION: 1.10.0
|
KUBE_SCORE_VERSION: 1.10.0
|
||||||
|
|||||||
134
.github/workflows/validate-gha-chart.yaml
vendored
Normal file
134
.github/workflows/validate-gha-chart.yaml
vendored
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
name: Validate Helm Chart (gha-runner-scale-set-controller and gha-runner-scale-set)
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
paths:
|
||||||
|
- 'charts/**'
|
||||||
|
- '.github/workflows/validate-gha-chart.yaml'
|
||||||
|
- '!charts/actions-runner-controller/**'
|
||||||
|
- '!**.md'
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- 'charts/**'
|
||||||
|
- '.github/workflows/validate-gha-chart.yaml'
|
||||||
|
- '!charts/actions-runner-controller/**'
|
||||||
|
- '!**.md'
|
||||||
|
workflow_dispatch:
|
||||||
|
env:
|
||||||
|
KUBE_SCORE_VERSION: 1.16.1
|
||||||
|
HELM_VERSION: v3.8.0
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
validate-chart:
|
||||||
|
name: Lint Chart
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Helm
|
||||||
|
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
|
||||||
|
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
|
||||||
|
with:
|
||||||
|
version: ${{ env.HELM_VERSION }}
|
||||||
|
|
||||||
|
- name: Set up kube-score
|
||||||
|
run: |
|
||||||
|
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
|
||||||
|
chmod 755 kube-score
|
||||||
|
|
||||||
|
- name: Kube-score generated manifests
|
||||||
|
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
|
||||||
|
--ignore-test pod-networkpolicy
|
||||||
|
--ignore-test deployment-has-poddisruptionbudget
|
||||||
|
--ignore-test deployment-has-host-podantiaffinity
|
||||||
|
--ignore-test container-security-context
|
||||||
|
--ignore-test pod-probes
|
||||||
|
--ignore-test container-image-tag
|
||||||
|
--enable-optional-test container-security-context-privileged
|
||||||
|
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||||
|
|
||||||
|
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.7'
|
||||||
|
|
||||||
|
- name: Set up chart-testing
|
||||||
|
uses: helm/chart-testing-action@v2.3.1
|
||||||
|
|
||||||
|
- name: Set up latest version chart-testing
|
||||||
|
run: |
|
||||||
|
echo 'deb [trusted=yes] https://repo.goreleaser.com/apt/ /' | sudo tee /etc/apt/sources.list.d/goreleaser.list
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install goreleaser
|
||||||
|
git clone https://github.com/helm/chart-testing
|
||||||
|
cd chart-testing
|
||||||
|
unset CT_CONFIG_DIR
|
||||||
|
goreleaser build --clean --skip-validate
|
||||||
|
./dist/chart-testing_linux_amd64_v1/ct version
|
||||||
|
echo 'Adding ct directory to PATH...'
|
||||||
|
echo "$RUNNER_TEMP/chart-testing/dist/chart-testing_linux_amd64_v1" >> "$GITHUB_PATH"
|
||||||
|
echo 'Setting CT_CONFIG_DIR...'
|
||||||
|
echo "CT_CONFIG_DIR=$RUNNER_TEMP/chart-testing/etc" >> "$GITHUB_ENV"
|
||||||
|
working-directory: ${{ runner.temp }}
|
||||||
|
|
||||||
|
- name: Run chart-testing (list-changed)
|
||||||
|
id: list-changed
|
||||||
|
run: |
|
||||||
|
ct version
|
||||||
|
changed=$(ct list-changed --config charts/.ci/ct-config-gha.yaml)
|
||||||
|
if [[ -n "$changed" ]]; then
|
||||||
|
echo "::set-output name=changed::true"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Run chart-testing (lint)
|
||||||
|
run: |
|
||||||
|
ct lint --config charts/.ci/ct-config-gha.yaml
|
||||||
|
|
||||||
|
- name: Set up docker buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
|
||||||
|
- name: Build controller image
|
||||||
|
uses: docker/build-push-action@v3
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
with:
|
||||||
|
file: Dockerfile
|
||||||
|
platforms: linux/amd64
|
||||||
|
load: true
|
||||||
|
build-args: |
|
||||||
|
DOCKER_IMAGE_NAME=test-arc
|
||||||
|
VERSION=dev
|
||||||
|
tags: |
|
||||||
|
test-arc:dev
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
|
- name: Create kind cluster
|
||||||
|
uses: helm/kind-action@v1.4.0
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
with:
|
||||||
|
cluster_name: chart-testing
|
||||||
|
|
||||||
|
- name: Load image into cluster
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
run: |
|
||||||
|
export DOCKER_IMAGE_NAME=test-arc
|
||||||
|
export VERSION=dev
|
||||||
|
export IMG_RESULT=load
|
||||||
|
make docker-buildx
|
||||||
|
kind load docker-image test-arc:dev --name chart-testing
|
||||||
|
|
||||||
|
- name: Run chart-testing (install)
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
run: |
|
||||||
|
ct install --config charts/.ci/ct-config-gha.yaml
|
||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -35,3 +35,5 @@ bin
|
|||||||
.DS_STORE
|
.DS_STORE
|
||||||
|
|
||||||
/test-assets
|
/test-assets
|
||||||
|
|
||||||
|
/.tools
|
||||||
|
|||||||
13
Makefile
13
Makefile
@@ -5,7 +5,7 @@ else
|
|||||||
endif
|
endif
|
||||||
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
||||||
VERSION ?= dev
|
VERSION ?= dev
|
||||||
RUNNER_VERSION ?= 2.302.1
|
RUNNER_VERSION ?= 2.304.0
|
||||||
TARGETPLATFORM ?= $(shell arch)
|
TARGETPLATFORM ?= $(shell arch)
|
||||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||||
RUNNER_TAG ?= ${VERSION}
|
RUNNER_TAG ?= ${VERSION}
|
||||||
@@ -92,9 +92,14 @@ manager: generate fmt vet
|
|||||||
run: generate fmt vet manifests
|
run: generate fmt vet manifests
|
||||||
go run ./main.go
|
go run ./main.go
|
||||||
|
|
||||||
|
run-scaleset: generate fmt vet
|
||||||
|
CONTROLLER_MANAGER_POD_NAMESPACE=default \
|
||||||
|
CONTROLLER_MANAGER_CONTAINER_IMAGE="${DOCKER_IMAGE_NAME}:${VERSION}" \
|
||||||
|
go run ./main.go --auto-scaling-runner-set-only
|
||||||
|
|
||||||
# Install CRDs into a cluster
|
# Install CRDs into a cluster
|
||||||
install: manifests
|
install: manifests
|
||||||
kustomize build config/crd | kubectl apply -f -
|
kustomize build config/crd | kubectl apply --server-side -f -
|
||||||
|
|
||||||
# Uninstall CRDs from a cluster
|
# Uninstall CRDs from a cluster
|
||||||
uninstall: manifests
|
uninstall: manifests
|
||||||
@@ -103,7 +108,7 @@ uninstall: manifests
|
|||||||
# Deploy controller in the configured Kubernetes cluster in ~/.kube/config
|
# Deploy controller in the configured Kubernetes cluster in ~/.kube/config
|
||||||
deploy: manifests
|
deploy: manifests
|
||||||
cd config/manager && kustomize edit set image controller=${DOCKER_IMAGE_NAME}:${VERSION}
|
cd config/manager && kustomize edit set image controller=${DOCKER_IMAGE_NAME}:${VERSION}
|
||||||
kustomize build config/default | kubectl apply -f -
|
kustomize build config/default | kubectl apply --server-side -f -
|
||||||
|
|
||||||
# Generate manifests e.g. CRD, RBAC etc.
|
# Generate manifests e.g. CRD, RBAC etc.
|
||||||
manifests: manifests-gen-crds chart-crds
|
manifests: manifests-gen-crds chart-crds
|
||||||
@@ -197,7 +202,7 @@ generate: controller-gen
|
|||||||
|
|
||||||
# Run shellcheck on runner scripts
|
# Run shellcheck on runner scripts
|
||||||
shellcheck: shellcheck-install
|
shellcheck: shellcheck-install
|
||||||
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh
|
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh hack/*.sh
|
||||||
|
|
||||||
docker-buildx:
|
docker-buildx:
|
||||||
export DOCKER_CLI_EXPERIMENTAL=enabled ;\
|
export DOCKER_CLI_EXPERIMENTAL=enabled ;\
|
||||||
|
|||||||
@@ -6,17 +6,14 @@
|
|||||||
|
|
||||||
## People
|
## People
|
||||||
|
|
||||||
`actions-runner-controller` is an open-source project currently developed and maintained in collaboration with maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions), mostly in their spare time.
|
`actions-runner-controller` is an open-source project currently developed and maintained in collaboration with the GitHub Actions team, external maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions).
|
||||||
|
|
||||||
If you think the project is awesome and it's becoming a basis for your important business, consider [sponsoring us](https://github.com/sponsors/actions-runner-controller)!
|
If you think the project is awesome and is adding value to your business, please consider directly sponsoring [community maintainers](https://github.com/sponsors/actions-runner-controller) and individual contributors via GitHub Sponsors.
|
||||||
|
|
||||||
In case you are already the employer of one of contributors, sponsoring via GitHub Sponsors might not be an option. Just support them in other means!
|
In case you are already the employer of one of contributors, sponsoring via GitHub Sponsors might not be an option. Just support them in other means!
|
||||||
|
|
||||||
We don't currently have [any sponsors dedicated to this project yet](https://github.com/sponsors/actions-runner-controller).
|
|
||||||
|
|
||||||
However, [HelloFresh](https://www.hellofreshgroup.com/en/) has recently started sponsoring @mumoshu for this project along with his other works. A part of their sponsorship will enable @mumoshu to add an E2E test to keep ARC even more reliable on AWS. Thank you for your sponsorship!
|
See [the sponsorship dashboard](https://github.com/sponsors/actions-runner-controller) for the former and the current sponsors.
|
||||||
|
|
||||||
[<img src="https://user-images.githubusercontent.com/22009/170898715-07f02941-35ec-418b-8cd4-251b422fa9ac.png" width="219" height="71" />](https://careers.hellofresh.com/)
|
|
||||||
|
|
||||||
## Status
|
## Status
|
||||||
|
|
||||||
|
|||||||
@@ -61,6 +61,9 @@ if [ "${tool}" == "helm" ]; then
|
|||||||
flags+=( --set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
flags+=( --set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
||||||
flags+=( --set actionsMetricsServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
flags+=( --set actionsMetricsServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
||||||
fi
|
fi
|
||||||
|
if [ "${WATCH_NAMESPACE}" != "" ]; then
|
||||||
|
flags+=( --set watchNamespace=${WATCH_NAMESPACE} --set singleNamespace=true)
|
||||||
|
fi
|
||||||
if [ "${CHART_VERSION}" != "" ]; then
|
if [ "${CHART_VERSION}" != "" ]; then
|
||||||
flags+=( --version ${CHART_VERSION})
|
flags+=( --version ${CHART_VERSION})
|
||||||
fi
|
fi
|
||||||
@@ -69,6 +72,9 @@ if [ "${tool}" == "helm" ]; then
|
|||||||
flags+=( --set githubWebhookServer.logFormat=${LOG_FORMAT})
|
flags+=( --set githubWebhookServer.logFormat=${LOG_FORMAT})
|
||||||
flags+=( --set actionsMetricsServer.logFormat=${LOG_FORMAT})
|
flags+=( --set actionsMetricsServer.logFormat=${LOG_FORMAT})
|
||||||
fi
|
fi
|
||||||
|
if [ "${ADMISSION_WEBHOOKS_TIMEOUT}" != "" ]; then
|
||||||
|
flags+=( --set admissionWebHooks.timeoutSeconds=${ADMISSION_WEBHOOKS_TIMEOUT})
|
||||||
|
fi
|
||||||
if [ -n "${CREATE_SECRETS_USING_HELM}" ]; then
|
if [ -n "${CREATE_SECRETS_USING_HELM}" ]; then
|
||||||
if [ -z "${WEBHOOK_GITHUB_TOKEN}" ]; then
|
if [ -z "${WEBHOOK_GITHUB_TOKEN}" ]; then
|
||||||
echo 'Failed deploying secret "actions-metrics-server" using helm. Set WEBHOOK_GITHUB_TOKEN to deploy.' 1>&2
|
echo 'Failed deploying secret "actions-metrics-server" using helm. Set WEBHOOK_GITHUB_TOKEN to deploy.' 1>&2
|
||||||
@@ -77,6 +83,10 @@ if [ "${tool}" == "helm" ]; then
|
|||||||
flags+=( --set actionsMetricsServer.secret.create=true)
|
flags+=( --set actionsMetricsServer.secret.create=true)
|
||||||
flags+=( --set actionsMetricsServer.secret.github_token=${WEBHOOK_GITHUB_TOKEN})
|
flags+=( --set actionsMetricsServer.secret.github_token=${WEBHOOK_GITHUB_TOKEN})
|
||||||
fi
|
fi
|
||||||
|
if [ -n "${GITHUB_WEBHOOK_SERVER_ENV_NAME}" ] && [ -n "${GITHUB_WEBHOOK_SERVER_ENV_VALUE}" ]; then
|
||||||
|
flags+=( --set githubWebhookServer.env[0].name=${GITHUB_WEBHOOK_SERVER_ENV_NAME})
|
||||||
|
flags+=( --set githubWebhookServer.env[0].value=${GITHUB_WEBHOOK_SERVER_ENV_VALUE})
|
||||||
|
fi
|
||||||
|
|
||||||
set -vx
|
set -vx
|
||||||
|
|
||||||
@@ -92,6 +102,7 @@ if [ "${tool}" == "helm" ]; then
|
|||||||
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
|
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
|
||||||
--set actionsMetricsServer.podAnnotations.test-id=${TEST_ID} \
|
--set actionsMetricsServer.podAnnotations.test-id=${TEST_ID} \
|
||||||
${flags[@]} --set image.imagePullPolicy=${IMAGE_PULL_POLICY} \
|
${flags[@]} --set image.imagePullPolicy=${IMAGE_PULL_POLICY} \
|
||||||
|
--set image.dindSidecarRepositoryAndTag=${DIND_SIDECAR_REPOSITORY_AND_TAG} \
|
||||||
-f ${VALUES_FILE}
|
-f ${VALUES_FILE}
|
||||||
set +v
|
set +v
|
||||||
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`
|
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`
|
||||||
|
|||||||
@@ -6,6 +6,10 @@ OP=${OP:-apply}
|
|||||||
|
|
||||||
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
||||||
|
|
||||||
|
# See https://github.com/actions/actions-runner-controller/issues/2123
|
||||||
|
kubectl delete secret generic docker-config || :
|
||||||
|
kubectl create secret generic docker-config --from-file .dockerconfigjson=<(jq -M 'del(.aliases)' $HOME/.docker/config.json) --type=kubernetes.io/dockerconfigjson || :
|
||||||
|
|
||||||
cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=${RUNNER_NAMESPACE} envsubst | kubectl apply -f -
|
cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=${RUNNER_NAMESPACE} envsubst | kubectl apply -f -
|
||||||
|
|
||||||
if [ -n "${TEST_REPO}" ]; then
|
if [ -n "${TEST_REPO}" ]; then
|
||||||
|
|||||||
27
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
27
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
@@ -95,6 +95,24 @@ spec:
|
|||||||
# that part is created by dockerd.
|
# that part is created by dockerd.
|
||||||
mountPath: /home/runner/.local
|
mountPath: /home/runner/.local
|
||||||
readOnly: false
|
readOnly: false
|
||||||
|
# See https://github.com/actions/actions-runner-controller/issues/2123
|
||||||
|
# Be sure to omit the "aliases" field from the config.json.
|
||||||
|
# Otherwise you may encounter nasty errors like:
|
||||||
|
# $ docker build
|
||||||
|
# docker: 'buildx' is not a docker command.
|
||||||
|
# See 'docker --help'
|
||||||
|
# due to the incompatibility between your host docker config.json and the runner environment.
|
||||||
|
# That is, your host dockcer config.json might contain this:
|
||||||
|
# "aliases": {
|
||||||
|
# "builder": "buildx"
|
||||||
|
# }
|
||||||
|
# And this results in the above error when the runner does not have buildx installed yet.
|
||||||
|
- name: docker-config
|
||||||
|
mountPath: /home/runner/.docker/config.json
|
||||||
|
subPath: config.json
|
||||||
|
readOnly: true
|
||||||
|
- name: docker-config-root
|
||||||
|
mountPath: /home/runner/.docker
|
||||||
volumes:
|
volumes:
|
||||||
- name: rootless-dind-work-dir
|
- name: rootless-dind-work-dir
|
||||||
ephemeral:
|
ephemeral:
|
||||||
@@ -105,6 +123,15 @@ spec:
|
|||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
storage: 3Gi
|
storage: 3Gi
|
||||||
|
- name: docker-config
|
||||||
|
# Refer to .dockerconfigjson/.docker/config.json
|
||||||
|
secret:
|
||||||
|
secretName: docker-config
|
||||||
|
items:
|
||||||
|
- key: .dockerconfigjson
|
||||||
|
path: config.json
|
||||||
|
- name: docker-config-root
|
||||||
|
emptyDir: {}
|
||||||
|
|
||||||
#
|
#
|
||||||
# Non-standard working directory
|
# Non-standard working directory
|
||||||
|
|||||||
@@ -1,18 +0,0 @@
|
|||||||
# Title
|
|
||||||
|
|
||||||
<!-- ADR titles should typically be imperative sentences. -->
|
|
||||||
|
|
||||||
**Status**: (Proposed|Accepted|Rejected|Superceded|Deprecated)
|
|
||||||
|
|
||||||
## Context
|
|
||||||
|
|
||||||
*What is the issue or background knowledge necessary for future readers
|
|
||||||
to understand why this ADR was written?*
|
|
||||||
|
|
||||||
## Decision
|
|
||||||
|
|
||||||
**What** is the change being proposed? / **How** will it be implemented?*
|
|
||||||
|
|
||||||
## Consequences
|
|
||||||
|
|
||||||
*What becomes easier or more difficult to do because of this change?*
|
|
||||||
@@ -52,6 +52,9 @@ type AutoscalingListenerSpec struct {
|
|||||||
// Required
|
// Required
|
||||||
Image string `json:"image,omitempty"`
|
Image string `json:"image,omitempty"`
|
||||||
|
|
||||||
|
// Required
|
||||||
|
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
|
||||||
|
|
||||||
// Required
|
// Required
|
||||||
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
||||||
|
|
||||||
|
|||||||
@@ -33,10 +33,14 @@ import (
|
|||||||
|
|
||||||
//+kubebuilder:object:root=true
|
//+kubebuilder:object:root=true
|
||||||
//+kubebuilder:subresource:status
|
//+kubebuilder:subresource:status
|
||||||
//+kubebuilder:printcolumn:JSONPath=".spec.minRunners",name=Minimum Runners,type=number
|
//+kubebuilder:printcolumn:JSONPath=".spec.minRunners",name=Minimum Runners,type=integer
|
||||||
//+kubebuilder:printcolumn:JSONPath=".spec.maxRunners",name=Maximum Runners,type=number
|
//+kubebuilder:printcolumn:JSONPath=".spec.maxRunners",name=Maximum Runners,type=integer
|
||||||
//+kubebuilder:printcolumn:JSONPath=".status.currentRunners",name=Current Runners,type=number
|
//+kubebuilder:printcolumn:JSONPath=".status.currentRunners",name=Current Runners,type=integer
|
||||||
//+kubebuilder:printcolumn:JSONPath=".status.state",name=State,type=string
|
//+kubebuilder:printcolumn:JSONPath=".status.state",name=State,type=string
|
||||||
|
//+kubebuilder:printcolumn:JSONPath=".status.pendingEphemeralRunners",name=Pending Runners,type=integer
|
||||||
|
//+kubebuilder:printcolumn:JSONPath=".status.runningEphemeralRunners",name=Running Runners,type=integer
|
||||||
|
//+kubebuilder:printcolumn:JSONPath=".status.finishedEphemeralRunners",name=Finished Runners,type=integer
|
||||||
|
//+kubebuilder:printcolumn:JSONPath=".status.deletingEphemeralRunners",name=Deleting Runners,type=integer
|
||||||
|
|
||||||
// AutoscalingRunnerSet is the Schema for the autoscalingrunnersets API
|
// AutoscalingRunnerSet is the Schema for the autoscalingrunnersets API
|
||||||
type AutoscalingRunnerSet struct {
|
type AutoscalingRunnerSet struct {
|
||||||
@@ -228,14 +232,22 @@ type ProxyServerConfig struct {
|
|||||||
// AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet
|
// AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet
|
||||||
type AutoscalingRunnerSetStatus struct {
|
type AutoscalingRunnerSetStatus struct {
|
||||||
// +optional
|
// +optional
|
||||||
CurrentRunners int `json:"currentRunners,omitempty"`
|
CurrentRunners int `json:"currentRunners"`
|
||||||
|
|
||||||
// +optional
|
// +optional
|
||||||
State string `json:"state,omitempty"`
|
State string `json:"state"`
|
||||||
|
|
||||||
|
// EphemeralRunner counts separated by the stage ephemeral runners are in, taken from the EphemeralRunnerSet
|
||||||
|
|
||||||
|
//+optional
|
||||||
|
PendingEphemeralRunners int `json:"pendingEphemeralRunners"`
|
||||||
|
// +optional
|
||||||
|
RunningEphemeralRunners int `json:"runningEphemeralRunners"`
|
||||||
|
// +optional
|
||||||
|
FailedEphemeralRunners int `json:"failedEphemeralRunners"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ars *AutoscalingRunnerSet) ListenerSpecHash() string {
|
func (ars *AutoscalingRunnerSet) ListenerSpecHash() string {
|
||||||
type listenerSpec = AutoscalingRunnerSetSpec
|
|
||||||
arsSpec := ars.Spec.DeepCopy()
|
arsSpec := ars.Spec.DeepCopy()
|
||||||
spec := arsSpec
|
spec := arsSpec
|
||||||
return hash.ComputeTemplateHash(&spec)
|
return hash.ComputeTemplateHash(&spec)
|
||||||
|
|||||||
@@ -31,13 +31,27 @@ type EphemeralRunnerSetSpec struct {
|
|||||||
// EphemeralRunnerSetStatus defines the observed state of EphemeralRunnerSet
|
// EphemeralRunnerSetStatus defines the observed state of EphemeralRunnerSet
|
||||||
type EphemeralRunnerSetStatus struct {
|
type EphemeralRunnerSetStatus struct {
|
||||||
// CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet.
|
// CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet.
|
||||||
CurrentReplicas int `json:"currentReplicas,omitempty"`
|
CurrentReplicas int `json:"currentReplicas"`
|
||||||
|
|
||||||
|
// EphemeralRunner counts separated by the stage ephemeral runners are in
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
PendingEphemeralRunners int `json:"pendingEphemeralRunners"`
|
||||||
|
// +optional
|
||||||
|
RunningEphemeralRunners int `json:"runningEphemeralRunners"`
|
||||||
|
// +optional
|
||||||
|
FailedEphemeralRunners int `json:"failedEphemeralRunners"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
// +kubebuilder:subresource:status
|
// +kubebuilder:subresource:status
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name="DesiredReplicas",type="integer"
|
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name="DesiredReplicas",type="integer"
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.currentReplicas", name="CurrentReplicas",type="integer"
|
// +kubebuilder:printcolumn:JSONPath=".status.currentReplicas", name="CurrentReplicas",type="integer"
|
||||||
|
//+kubebuilder:printcolumn:JSONPath=".status.pendingEphemeralRunners",name=Pending Runners,type=integer
|
||||||
|
//+kubebuilder:printcolumn:JSONPath=".status.runningEphemeralRunners",name=Running Runners,type=integer
|
||||||
|
//+kubebuilder:printcolumn:JSONPath=".status.finishedEphemeralRunners",name=Finished Runners,type=integer
|
||||||
|
//+kubebuilder:printcolumn:JSONPath=".status.deletingEphemeralRunners",name=Deleting Runners,type=integer
|
||||||
|
|
||||||
// EphemeralRunnerSet is the Schema for the ephemeralrunnersets API
|
// EphemeralRunnerSet is the Schema for the ephemeralrunnersets API
|
||||||
type EphemeralRunnerSet struct {
|
type EphemeralRunnerSet struct {
|
||||||
metav1.TypeMeta `json:",inline"`
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
|||||||
@@ -77,6 +77,11 @@ type RunnerDeploymentStatus struct {
|
|||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
// +kubebuilder:resource:shortName=rdeploy
|
// +kubebuilder:resource:shortName=rdeploy
|
||||||
// +kubebuilder:subresource:status
|
// +kubebuilder:subresource:status
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.enterprise",name=Enterprise,type=string
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.organization",name=Organization,type=string
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.repository",name=Repository,type=string
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.group",name=Group,type=string
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.labels",name=Labels,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
|
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
|
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.updatedReplicas",name=Up-To-Date,type=number
|
// +kubebuilder:printcolumn:JSONPath=".status.updatedReplicas",name=Up-To-Date,type=number
|
||||||
|
|||||||
9
charts/.ci/ct-config-gha.yaml
Normal file
9
charts/.ci/ct-config-gha.yaml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow
|
||||||
|
lint-conf: charts/.ci/lint-config.yaml
|
||||||
|
chart-repos:
|
||||||
|
- jetstack=https://charts.jetstack.io
|
||||||
|
check-version-increment: false # Disable checking that the chart version has been bumped
|
||||||
|
charts:
|
||||||
|
- charts/gha-runner-scale-set-controller
|
||||||
|
- charts/gha-runner-scale-set
|
||||||
|
skip-clean-up: true
|
||||||
@@ -5,5 +5,3 @@ chart-repos:
|
|||||||
check-version-increment: false # Disable checking that the chart version has been bumped
|
check-version-increment: false # Disable checking that the chart version has been bumped
|
||||||
charts:
|
charts:
|
||||||
- charts/actions-runner-controller
|
- charts/actions-runner-controller
|
||||||
- charts/gha-runner-scale-set-controller
|
|
||||||
- charts/gha-runner-scale-set
|
|
||||||
@@ -15,10 +15,10 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.22.0
|
version: 0.23.2
|
||||||
|
|
||||||
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||||
appVersion: 0.27.0
|
appVersion: 0.27.3
|
||||||
|
|
||||||
home: https://github.com/actions/actions-runner-controller
|
home: https://github.com/actions/actions-runner-controller
|
||||||
|
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
|||||||
| `metrics.port` | Set port of metrics service | 8443 |
|
| `metrics.port` | Set port of metrics service | 8443 |
|
||||||
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||||
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||||
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
|
||||||
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||||
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
|
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
|
||||||
| `fullnameOverride` | Override the full resource names | |
|
| `fullnameOverride` | Override the full resource names | |
|
||||||
@@ -102,8 +102,11 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
|||||||
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
|
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
|
||||||
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
|
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
|
||||||
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
|
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
|
||||||
|
| `githubWebhookServer.terminationGracePeriodSeconds` | Set the githubWebhookServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
|
||||||
|
| `githubWebhookServer.lifecycle` | Set the githubWebhookServer pod lifecycle hooks | `{}` |
|
||||||
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
|
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
|
||||||
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||||
|
| `githubWebhookServer.service.loadBalancerSourceRanges` | Set githubWebhookServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
|
||||||
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
|
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
|
||||||
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
|
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||||
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||||
@@ -115,9 +118,9 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
|||||||
| `actionsMetricsServer.logLevel` | Set the log level of the actionsMetricsServer container | |
|
| `actionsMetricsServer.logLevel` | Set the log level of the actionsMetricsServer container | |
|
||||||
| `actionsMetricsServer.logFormat` | Set the log format of the actionsMetricsServer controller. Valid options are "text" and "json" | text |
|
| `actionsMetricsServer.logFormat` | Set the log format of the actionsMetricsServer controller. Valid options are "text" and "json" | text |
|
||||||
| `actionsMetricsServer.enabled` | Deploy the actions metrics server pod | false |
|
| `actionsMetricsServer.enabled` | Deploy the actions metrics server pod | false |
|
||||||
| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
|
| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the actions-metrics-server | false |
|
||||||
| `actionsMetricsServer.secret.create` | Deploy the webhook hook secret | false |
|
| `actionsMetricsServer.secret.create` | Deploy the webhook hook secret | false |
|
||||||
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
|
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | actions-metrics-server |
|
||||||
| `actionsMetricsServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
| `actionsMetricsServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
||||||
| `actionsMetricsServer.imagePullSecrets` | Specifies the secret to be used when pulling the actionsMetricsServer pod containers | |
|
| `actionsMetricsServer.imagePullSecrets` | Specifies the secret to be used when pulling the actionsMetricsServer pod containers | |
|
||||||
| `actionsMetricsServer.nameOverride` | Override the resource name prefix | |
|
| `actionsMetricsServer.nameOverride` | Override the resource name prefix | |
|
||||||
@@ -135,8 +138,11 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
|||||||
| `actionsMetricsServer.tolerations` | Set the actionsMetricsServer pod tolerations | |
|
| `actionsMetricsServer.tolerations` | Set the actionsMetricsServer pod tolerations | |
|
||||||
| `actionsMetricsServer.affinity` | Set the actionsMetricsServer pod affinity rules | |
|
| `actionsMetricsServer.affinity` | Set the actionsMetricsServer pod affinity rules | |
|
||||||
| `actionsMetricsServer.priorityClassName` | Set the actionsMetricsServer pod priorityClassName | |
|
| `actionsMetricsServer.priorityClassName` | Set the actionsMetricsServer pod priorityClassName | |
|
||||||
|
| `actionsMetricsServer.terminationGracePeriodSeconds` | Set the actionsMetricsServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
|
||||||
|
| `actionsMetricsServer.lifecycle` | Set the actionsMetricsServer pod lifecycle hooks | `{}` |
|
||||||
| `actionsMetricsServer.service.type` | Set actionsMetricsServer service type | |
|
| `actionsMetricsServer.service.type` | Set actionsMetricsServer service type | |
|
||||||
| `actionsMetricsServer.service.ports` | Set actionsMetricsServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
| `actionsMetricsServer.service.ports` | Set actionsMetricsServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||||
|
| `actionsMetricsServer.service.loadBalancerSourceRanges` | Set actionsMetricsServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
|
||||||
| `actionsMetricsServer.ingress.enabled` | Deploy an ingress kind for the actionsMetricsServer | false |
|
| `actionsMetricsServer.ingress.enabled` | Deploy an ingress kind for the actionsMetricsServer | false |
|
||||||
| `actionsMetricsServer.ingress.annotations` | Set annotations for the ingress kind | |
|
| `actionsMetricsServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||||
| `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
| `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||||
@@ -147,5 +153,5 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
|||||||
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
|
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
|
||||||
| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||||
| `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
| `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||||
| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
|
||||||
| `actionsMetrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
| `actionsMetrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||||
|
|||||||
@@ -17,6 +17,21 @@ spec:
|
|||||||
scope: Namespaced
|
scope: Namespaced
|
||||||
versions:
|
versions:
|
||||||
- additionalPrinterColumns:
|
- additionalPrinterColumns:
|
||||||
|
- jsonPath: .spec.template.spec.enterprise
|
||||||
|
name: Enterprise
|
||||||
|
type: string
|
||||||
|
- jsonPath: .spec.template.spec.organization
|
||||||
|
name: Organization
|
||||||
|
type: string
|
||||||
|
- jsonPath: .spec.template.spec.repository
|
||||||
|
name: Repository
|
||||||
|
type: string
|
||||||
|
- jsonPath: .spec.template.spec.group
|
||||||
|
name: Group
|
||||||
|
type: string
|
||||||
|
- jsonPath: .spec.template.spec.labels
|
||||||
|
name: Labels
|
||||||
|
type: string
|
||||||
- jsonPath: .spec.replicas
|
- jsonPath: .spec.replicas
|
||||||
name: Desired
|
name: Desired
|
||||||
type: number
|
type: number
|
||||||
|
|||||||
@@ -50,6 +50,12 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
command:
|
command:
|
||||||
- "/actions-metrics-server"
|
- "/actions-metrics-server"
|
||||||
|
{{- if .Values.actionsMetricsServer.lifecycle }}
|
||||||
|
{{- with .Values.actionsMetricsServer.lifecycle }}
|
||||||
|
lifecycle:
|
||||||
|
{{- toYaml . | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
env:
|
env:
|
||||||
- name: GITHUB_WEBHOOK_SECRET_TOKEN
|
- name: GITHUB_WEBHOOK_SECRET_TOKEN
|
||||||
valueFrom:
|
valueFrom:
|
||||||
@@ -142,7 +148,7 @@ spec:
|
|||||||
securityContext:
|
securityContext:
|
||||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
terminationGracePeriodSeconds: 10
|
terminationGracePeriodSeconds: {{ .Values.actionsMetricsServer.terminationGracePeriodSeconds }}
|
||||||
{{- with .Values.actionsMetricsServer.nodeSelector }}
|
{{- with .Values.actionsMetricsServer.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
|
|||||||
@@ -0,0 +1,90 @@
|
|||||||
|
{{- if .Values.actionsMetricsServer.enabled }}
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }}
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- horizontalrunnerautoscalers
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- horizontalrunnerautoscalers/finalizers
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- horizontalrunnerautoscalers/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnersets
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnerdeployments
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnerdeployments/finalizers
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnerdeployments/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- authentication.k8s.io
|
||||||
|
resources:
|
||||||
|
- tokenreviews
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- apiGroups:
|
||||||
|
- authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- subjectaccessreviews
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
{{- if .Values.actionsMetricsServer.enabled }}
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }}
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ include "actions-runner-controller-actions-metrics-server.serviceAccountName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
@@ -5,7 +5,7 @@ metadata:
|
|||||||
name: {{ include "actions-runner-controller-actions-metrics-server.fullname" . }}
|
name: {{ include "actions-runner-controller-actions-metrics-server.fullname" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }}
|
||||||
{{- if .Values.actionsMetricsServer.service.annotations }}
|
{{- if .Values.actionsMetricsServer.service.annotations }}
|
||||||
annotations:
|
annotations:
|
||||||
{{ toYaml .Values.actionsMetricsServer.service.annotations | nindent 4 }}
|
{{ toYaml .Values.actionsMetricsServer.service.annotations | nindent 4 }}
|
||||||
@@ -23,4 +23,10 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
selector:
|
selector:
|
||||||
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }}
|
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }}
|
||||||
|
{{- if .Values.actionsMetricsServer.service.loadBalancerSourceRanges }}
|
||||||
|
loadBalancerSourceRanges:
|
||||||
|
{{- range $ip := .Values.actionsMetricsServer.service.loadBalancerSourceRanges }}
|
||||||
|
- {{ $ip -}}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -117,10 +117,14 @@ spec:
|
|||||||
name: {{ include "actions-runner-controller.secretName" . }}
|
name: {{ include "actions-runner-controller.secretName" . }}
|
||||||
optional: true
|
optional: true
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if kindIs "slice" .Values.githubWebhookServer.env }}
|
||||||
|
{{- toYaml .Values.githubWebhookServer.env | nindent 8 }}
|
||||||
|
{{- else }}
|
||||||
{{- range $key, $val := .Values.githubWebhookServer.env }}
|
{{- range $key, $val := .Values.githubWebhookServer.env }}
|
||||||
- name: {{ $key }}
|
- name: {{ $key }}
|
||||||
value: {{ $val | quote }}
|
value: {{ $val | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||||
name: github-webhook-server
|
name: github-webhook-server
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
|||||||
@@ -250,14 +250,6 @@ rules:
|
|||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- secrets
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
{{- if .Values.runner.statusUpdateHook.enabled }}
|
{{- if .Values.runner.statusUpdateHook.enabled }}
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
@@ -311,11 +303,4 @@ rules:
|
|||||||
- list
|
- list
|
||||||
- create
|
- create
|
||||||
- delete
|
- delete
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- secrets
|
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -0,0 +1,21 @@
|
|||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
{{- if .Values.scope.singleNamespace }}
|
||||||
|
kind: RoleBinding
|
||||||
|
{{- else }}
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
{{- end }}
|
||||||
|
metadata:
|
||||||
|
name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
{{- if .Values.scope.singleNamespace }}
|
||||||
|
kind: Role
|
||||||
|
{{- else }}
|
||||||
|
kind: ClusterRole
|
||||||
|
{{- end }}
|
||||||
|
name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ include "actions-runner-controller.serviceAccountName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
{{- if .Values.scope.singleNamespace }}
|
||||||
|
kind: Role
|
||||||
|
{{- else }}
|
||||||
|
kind: ClusterRole
|
||||||
|
{{- end }}
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
{{- if .Values.rbac.allowGrantingKubernetesContainerModePermissions }}
|
||||||
|
{{/* These permissions are required by ARC to create RBAC resources for the runner pod to use the kubernetes container mode. */}}
|
||||||
|
{{/* See https://github.com/actions/actions-runner-controller/pull/1268/files#r917331632 */}}
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
{{- end }}
|
||||||
@@ -44,6 +44,7 @@ webhooks:
|
|||||||
resources:
|
resources:
|
||||||
- runners
|
- runners
|
||||||
sideEffects: None
|
sideEffects: None
|
||||||
|
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
|
||||||
- admissionReviewVersions:
|
- admissionReviewVersions:
|
||||||
- v1beta1
|
- v1beta1
|
||||||
{{- if .Values.scope.singleNamespace }}
|
{{- if .Values.scope.singleNamespace }}
|
||||||
@@ -74,6 +75,7 @@ webhooks:
|
|||||||
resources:
|
resources:
|
||||||
- runnerdeployments
|
- runnerdeployments
|
||||||
sideEffects: None
|
sideEffects: None
|
||||||
|
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
|
||||||
- admissionReviewVersions:
|
- admissionReviewVersions:
|
||||||
- v1beta1
|
- v1beta1
|
||||||
{{- if .Values.scope.singleNamespace }}
|
{{- if .Values.scope.singleNamespace }}
|
||||||
@@ -104,6 +106,7 @@ webhooks:
|
|||||||
resources:
|
resources:
|
||||||
- runnerreplicasets
|
- runnerreplicasets
|
||||||
sideEffects: None
|
sideEffects: None
|
||||||
|
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
|
||||||
- admissionReviewVersions:
|
- admissionReviewVersions:
|
||||||
- v1beta1
|
- v1beta1
|
||||||
{{- if .Values.scope.singleNamespace }}
|
{{- if .Values.scope.singleNamespace }}
|
||||||
@@ -136,6 +139,7 @@ webhooks:
|
|||||||
objectSelector:
|
objectSelector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
"actions-runner-controller/inject-registration-token": "true"
|
"actions-runner-controller/inject-registration-token": "true"
|
||||||
|
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
|
||||||
---
|
---
|
||||||
apiVersion: admissionregistration.k8s.io/v1
|
apiVersion: admissionregistration.k8s.io/v1
|
||||||
kind: ValidatingWebhookConfiguration
|
kind: ValidatingWebhookConfiguration
|
||||||
@@ -177,6 +181,7 @@ webhooks:
|
|||||||
resources:
|
resources:
|
||||||
- runners
|
- runners
|
||||||
sideEffects: None
|
sideEffects: None
|
||||||
|
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
|
||||||
- admissionReviewVersions:
|
- admissionReviewVersions:
|
||||||
- v1beta1
|
- v1beta1
|
||||||
{{- if .Values.scope.singleNamespace }}
|
{{- if .Values.scope.singleNamespace }}
|
||||||
@@ -207,6 +212,7 @@ webhooks:
|
|||||||
resources:
|
resources:
|
||||||
- runnerdeployments
|
- runnerdeployments
|
||||||
sideEffects: None
|
sideEffects: None
|
||||||
|
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
|
||||||
- admissionReviewVersions:
|
- admissionReviewVersions:
|
||||||
- v1beta1
|
- v1beta1
|
||||||
{{- if .Values.scope.singleNamespace }}
|
{{- if .Values.scope.singleNamespace }}
|
||||||
@@ -238,6 +244,7 @@ webhooks:
|
|||||||
- runnerreplicasets
|
- runnerreplicasets
|
||||||
sideEffects: None
|
sideEffects: None
|
||||||
{{ if not (or (hasKey .Values.admissionWebHooks "caBundle") .Values.certManagerEnabled) }}
|
{{ if not (or (hasKey .Values.admissionWebHooks "caBundle") .Values.certManagerEnabled) }}
|
||||||
|
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Secret
|
kind: Secret
|
||||||
|
|||||||
@@ -47,6 +47,7 @@ authSecret:
|
|||||||
#github_basicauth_username: ""
|
#github_basicauth_username: ""
|
||||||
#github_basicauth_password: ""
|
#github_basicauth_password: ""
|
||||||
|
|
||||||
|
# http(s) should be specified for dockerRegistryMirror, e.g.: dockerRegistryMirror="https://<your-docker-registry-mirror>"
|
||||||
dockerRegistryMirror: ""
|
dockerRegistryMirror: ""
|
||||||
image:
|
image:
|
||||||
repository: "summerwind/actions-runner-controller"
|
repository: "summerwind/actions-runner-controller"
|
||||||
@@ -279,6 +280,19 @@ githubWebhookServer:
|
|||||||
# queueLimit: 100
|
# queueLimit: 100
|
||||||
terminationGracePeriodSeconds: 10
|
terminationGracePeriodSeconds: 10
|
||||||
lifecycle: {}
|
lifecycle: {}
|
||||||
|
# specify additional environment variables for the webhook server pod.
|
||||||
|
# It's possible to specify either key vale pairs e.g.:
|
||||||
|
# my_env_var: "some value"
|
||||||
|
# my_other_env_var: "other value"
|
||||||
|
|
||||||
|
# or a list of complete environment variable definitions e.g.:
|
||||||
|
# - name: GITHUB_WEBHOOK_SECRET_TOKEN
|
||||||
|
# valueFrom:
|
||||||
|
# secretKeyRef:
|
||||||
|
# key: GITHUB_WEBHOOK_SECRET_TOKEN
|
||||||
|
# name: prod-gha-controller-webhook-token
|
||||||
|
# optional: true
|
||||||
|
env: {}
|
||||||
|
|
||||||
actionsMetrics:
|
actionsMetrics:
|
||||||
serviceAnnotations: {}
|
serviceAnnotations: {}
|
||||||
@@ -346,6 +360,7 @@ actionsMetricsServer:
|
|||||||
protocol: TCP
|
protocol: TCP
|
||||||
name: http
|
name: http
|
||||||
#nodePort: someFixedPortForUseWithTerraformCdkCfnEtc
|
#nodePort: someFixedPortForUseWithTerraformCdkCfnEtc
|
||||||
|
loadBalancerSourceRanges: []
|
||||||
ingress:
|
ingress:
|
||||||
enabled: false
|
enabled: false
|
||||||
ingressClassName: ""
|
ingressClassName: ""
|
||||||
@@ -375,4 +390,5 @@ actionsMetricsServer:
|
|||||||
# - secretName: chart-example-tls
|
# - secretName: chart-example-tls
|
||||||
# hosts:
|
# hosts:
|
||||||
# - chart-example.local
|
# - chart-example.local
|
||||||
|
terminationGracePeriodSeconds: 10
|
||||||
|
lifecycle: {}
|
||||||
|
|||||||
@@ -15,13 +15,13 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.3.0
|
version: 0.4.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "0.3.0"
|
appVersion: "0.4.0"
|
||||||
|
|
||||||
home: https://github.com/actions/actions-runner-controller
|
home: https://github.com/actions/actions-runner-controller
|
||||||
|
|
||||||
|
|||||||
5
charts/gha-runner-scale-set-controller/ci/ci-values.yaml
Normal file
5
charts/gha-runner-scale-set-controller/ci/ci-values.yaml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# Set the following to dummy values.
|
||||||
|
# This is only useful in CI
|
||||||
|
image:
|
||||||
|
repository: test-arc
|
||||||
|
tag: dev
|
||||||
@@ -80,6 +80,9 @@ spec:
|
|||||||
image:
|
image:
|
||||||
description: Required
|
description: Required
|
||||||
type: string
|
type: string
|
||||||
|
imagePullPolicy:
|
||||||
|
description: Required
|
||||||
|
type: string
|
||||||
imagePullSecrets:
|
imagePullSecrets:
|
||||||
description: Required
|
description: Required
|
||||||
items:
|
items:
|
||||||
|
|||||||
@@ -17,16 +17,28 @@ spec:
|
|||||||
- additionalPrinterColumns:
|
- additionalPrinterColumns:
|
||||||
- jsonPath: .spec.minRunners
|
- jsonPath: .spec.minRunners
|
||||||
name: Minimum Runners
|
name: Minimum Runners
|
||||||
type: number
|
type: integer
|
||||||
- jsonPath: .spec.maxRunners
|
- jsonPath: .spec.maxRunners
|
||||||
name: Maximum Runners
|
name: Maximum Runners
|
||||||
type: number
|
type: integer
|
||||||
- jsonPath: .status.currentRunners
|
- jsonPath: .status.currentRunners
|
||||||
name: Current Runners
|
name: Current Runners
|
||||||
type: number
|
type: integer
|
||||||
- jsonPath: .status.state
|
- jsonPath: .status.state
|
||||||
name: State
|
name: State
|
||||||
type: string
|
type: string
|
||||||
|
- jsonPath: .status.pendingEphemeralRunners
|
||||||
|
name: Pending Runners
|
||||||
|
type: integer
|
||||||
|
- jsonPath: .status.runningEphemeralRunners
|
||||||
|
name: Running Runners
|
||||||
|
type: integer
|
||||||
|
- jsonPath: .status.finishedEphemeralRunners
|
||||||
|
name: Finished Runners
|
||||||
|
type: integer
|
||||||
|
- jsonPath: .status.deletingEphemeralRunners
|
||||||
|
name: Deleting Runners
|
||||||
|
type: integer
|
||||||
name: v1alpha1
|
name: v1alpha1
|
||||||
schema:
|
schema:
|
||||||
openAPIV3Schema:
|
openAPIV3Schema:
|
||||||
@@ -4306,6 +4318,12 @@ spec:
|
|||||||
properties:
|
properties:
|
||||||
currentRunners:
|
currentRunners:
|
||||||
type: integer
|
type: integer
|
||||||
|
failedEphemeralRunners:
|
||||||
|
type: integer
|
||||||
|
pendingEphemeralRunners:
|
||||||
|
type: integer
|
||||||
|
runningEphemeralRunners:
|
||||||
|
type: integer
|
||||||
state:
|
state:
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
|
|||||||
@@ -21,6 +21,18 @@ spec:
|
|||||||
- jsonPath: .status.currentReplicas
|
- jsonPath: .status.currentReplicas
|
||||||
name: CurrentReplicas
|
name: CurrentReplicas
|
||||||
type: integer
|
type: integer
|
||||||
|
- jsonPath: .status.pendingEphemeralRunners
|
||||||
|
name: Pending Runners
|
||||||
|
type: integer
|
||||||
|
- jsonPath: .status.runningEphemeralRunners
|
||||||
|
name: Running Runners
|
||||||
|
type: integer
|
||||||
|
- jsonPath: .status.finishedEphemeralRunners
|
||||||
|
name: Finished Runners
|
||||||
|
type: integer
|
||||||
|
- jsonPath: .status.deletingEphemeralRunners
|
||||||
|
name: Deleting Runners
|
||||||
|
type: integer
|
||||||
name: v1alpha1
|
name: v1alpha1
|
||||||
schema:
|
schema:
|
||||||
openAPIV3Schema:
|
openAPIV3Schema:
|
||||||
@@ -4296,6 +4308,14 @@ spec:
|
|||||||
currentReplicas:
|
currentReplicas:
|
||||||
description: CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet.
|
description: CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet.
|
||||||
type: integer
|
type: integer
|
||||||
|
failedEphemeralRunners:
|
||||||
|
type: integer
|
||||||
|
pendingEphemeralRunners:
|
||||||
|
type: integer
|
||||||
|
runningEphemeralRunners:
|
||||||
|
type: integer
|
||||||
|
required:
|
||||||
|
- currentReplicas
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
served: true
|
served: true
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ helm.sh/chart: {{ include "gha-runner-scale-set-controller.chart" . }}
|
|||||||
{{- if .Chart.AppVersion }}
|
{{- if .Chart.AppVersion }}
|
||||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
app.kubernetes.io/part-of: {{ .Chart.Name }}
|
app.kubernetes.io/part-of: gha-runner-scale-set-controller
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
{{- range $k, $v := .Values.labels }}
|
{{- range $k, $v := .Values.labels }}
|
||||||
{{ $k }}: {{ $v }}
|
{{ $k }}: {{ $v }}
|
||||||
@@ -59,25 +59,41 @@ Create the name of the service account to use
|
|||||||
*/}}
|
*/}}
|
||||||
{{- define "gha-runner-scale-set-controller.serviceAccountName" -}}
|
{{- define "gha-runner-scale-set-controller.serviceAccountName" -}}
|
||||||
{{- if eq .Values.serviceAccount.name "default"}}
|
{{- if eq .Values.serviceAccount.name "default"}}
|
||||||
{{- fail "serviceAccount.name cannot be set to 'default'" }}
|
{{- fail "serviceAccount.name cannot be set to 'default'" }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.serviceAccount.create }}
|
{{- if .Values.serviceAccount.create }}
|
||||||
{{- default (include "gha-runner-scale-set-controller.fullname" .) .Values.serviceAccount.name }}
|
{{- default (include "gha-runner-scale-set-controller.fullname" .) .Values.serviceAccount.name }}
|
||||||
{{- else }}
|
{{- else }}
|
||||||
{{- if not .Values.serviceAccount.name }}
|
{{- if not .Values.serviceAccount.name }}
|
||||||
{{- fail "serviceAccount.name must be set if serviceAccount.create is false" }}
|
{{- fail "serviceAccount.name must be set if serviceAccount.create is false" }}
|
||||||
{{- else }}
|
{{- else }}
|
||||||
{{- .Values.serviceAccount.name }}
|
{{- .Values.serviceAccount.name }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set-controller.managerRoleName" -}}
|
{{- define "gha-runner-scale-set-controller.managerClusterRoleName" -}}
|
||||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-role
|
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-cluster-role
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set-controller.managerRoleBinding" -}}
|
{{- define "gha-runner-scale-set-controller.managerClusterRoleBinding" -}}
|
||||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-rolebinding
|
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-cluster-rolebinding
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" -}}
|
||||||
|
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-role
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" -}}
|
||||||
|
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-rolebinding
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "gha-runner-scale-set-controller.managerListenerRoleName" -}}
|
||||||
|
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-role
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "gha-runner-scale-set-controller.managerListenerRoleBinding" -}}
|
||||||
|
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-rolebinding
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set-controller.leaderElectionRoleName" -}}
|
{{- define "gha-runner-scale-set-controller.leaderElectionRoleName" -}}
|
||||||
@@ -91,7 +107,7 @@ Create the name of the service account to use
|
|||||||
{{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}}
|
{{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}}
|
||||||
{{- $names := list }}
|
{{- $names := list }}
|
||||||
{{- range $k, $v := . }}
|
{{- range $k, $v := . }}
|
||||||
{{- $names = append $names $v.name }}
|
{{- $names = append $names $v.name }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- $names | join ","}}
|
{{- $names | join ","}}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -5,6 +5,11 @@ metadata:
|
|||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "gha-runner-scale-set-controller.labels" . | nindent 4 }}
|
{{- include "gha-runner-scale-set-controller.labels" . | nindent 4 }}
|
||||||
|
actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }}
|
||||||
|
actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||||
|
{{- if .Values.flags.watchSingleNamespace }}
|
||||||
|
actions.github.com/controller-watch-single-namespace: {{ .Values.flags.watchSingleNamespace }}
|
||||||
|
{{- end }}
|
||||||
spec:
|
spec:
|
||||||
replicas: {{ default 1 .Values.replicaCount }}
|
replicas: {{ default 1 .Values.replicaCount }}
|
||||||
selector:
|
selector:
|
||||||
@@ -18,7 +23,7 @@ spec:
|
|||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/part-of: actions-runner-controller
|
app.kubernetes.io/part-of: gha-runner-scale-set-controller
|
||||||
app.kubernetes.io/component: controller-manager
|
app.kubernetes.io/component: controller-manager
|
||||||
app.kubernetes.io/version: {{ .Chart.Version }}
|
app.kubernetes.io/version: {{ .Chart.Version }}
|
||||||
{{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }}
|
{{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }}
|
||||||
@@ -51,25 +56,23 @@ spec:
|
|||||||
{{- with .Values.flags.logLevel }}
|
{{- with .Values.flags.logLevel }}
|
||||||
- "--log-level={{ . }}"
|
- "--log-level={{ . }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- with .Values.flags.watchSingleNamespace }}
|
||||||
|
- "--watch-single-namespace={{ . }}"
|
||||||
|
{{- end }}
|
||||||
command:
|
command:
|
||||||
- "/manager"
|
- "/manager"
|
||||||
env:
|
env:
|
||||||
- name: CONTROLLER_MANAGER_POD_NAME
|
- name: CONTROLLER_MANAGER_CONTAINER_IMAGE
|
||||||
valueFrom:
|
value: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.name
|
|
||||||
- name: CONTROLLER_MANAGER_POD_NAMESPACE
|
- name: CONTROLLER_MANAGER_POD_NAMESPACE
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: metadata.namespace
|
fieldPath: metadata.namespace
|
||||||
|
- name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY
|
||||||
|
value: "{{ .Values.image.pullPolicy | default "IfNotPresent" }}"
|
||||||
{{- with .Values.env }}
|
{{- with .Values.env }}
|
||||||
{{- if kindIs "slice" .Values.env }}
|
{{- if kindIs "slice" . }}
|
||||||
{{- toYaml .Values.env | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
{{- else }}
|
|
||||||
{{- range $key, $val := .Values.env }}
|
|
||||||
- name: {{ $key }}
|
|
||||||
value: {{ $val | quote }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- with .Values.resources }}
|
{{- with .Values.resources }}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
{{- if gt (int (default 1 .Values.replicaCount)) 1 -}}
|
{{- if gt (int (default 1 .Values.replicaCount)) 1 }}
|
||||||
# permissions to do leader election.
|
# permissions to do leader election.
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: Role
|
kind: Role
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
{{- if gt (int (default 1 .Values.replicaCount)) 1 -}}
|
{{- if gt (int (default 1 .Values.replicaCount)) 1 }}
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: RoleBinding
|
kind: RoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
|
{{- if empty .Values.flags.watchSingleNamespace }}
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "gha-runner-scale-set-controller.managerRoleName" . }}
|
name: {{ include "gha-runner-scale-set-controller.managerClusterRoleName" . }}
|
||||||
rules:
|
rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- actions.github.com
|
- actions.github.com
|
||||||
@@ -20,6 +21,7 @@ rules:
|
|||||||
resources:
|
resources:
|
||||||
- autoscalingrunnersets/finalizers
|
- autoscalingrunnersets/finalizers
|
||||||
verbs:
|
verbs:
|
||||||
|
- patch
|
||||||
- update
|
- update
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- actions.github.com
|
- actions.github.com
|
||||||
@@ -54,6 +56,7 @@ rules:
|
|||||||
resources:
|
resources:
|
||||||
- autoscalinglisteners/finalizers
|
- autoscalinglisteners/finalizers
|
||||||
verbs:
|
verbs:
|
||||||
|
- patch
|
||||||
- update
|
- update
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- actions.github.com
|
- actions.github.com
|
||||||
@@ -75,6 +78,13 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- ephemeralrunnersets/finalizers
|
||||||
|
verbs:
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- actions.github.com
|
- actions.github.com
|
||||||
resources:
|
resources:
|
||||||
@@ -92,13 +102,8 @@ rules:
|
|||||||
resources:
|
resources:
|
||||||
- ephemeralrunners/finalizers
|
- ephemeralrunners/finalizers
|
||||||
verbs:
|
verbs:
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
- watch
|
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- actions.github.com
|
- actions.github.com
|
||||||
resources:
|
resources:
|
||||||
@@ -112,45 +117,13 @@ rules:
|
|||||||
resources:
|
resources:
|
||||||
- pods
|
- pods
|
||||||
verbs:
|
verbs:
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- pods/status
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- secrets
|
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
- get
|
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- update
|
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
- serviceaccounts
|
- serviceaccounts
|
||||||
verbs:
|
verbs:
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- configmaps
|
|
||||||
verbs:
|
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
@@ -158,10 +131,6 @@ rules:
|
|||||||
resources:
|
resources:
|
||||||
- rolebindings
|
- rolebindings
|
||||||
verbs:
|
verbs:
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
- get
|
|
||||||
- update
|
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
@@ -169,9 +138,7 @@ rules:
|
|||||||
resources:
|
resources:
|
||||||
- roles
|
- roles
|
||||||
verbs:
|
verbs:
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
- get
|
|
||||||
- update
|
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
|
- patch
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
{{- if empty .Values.flags.watchSingleNamespace }}
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: {{ include "gha-runner-scale-set-controller.managerClusterRoleBinding" . }}
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: {{ include "gha-runner-scale-set-controller.managerClusterRoleName" . }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,40 @@
|
|||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
name: {{ include "gha-runner-scale-set-controller.managerListenerRoleName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- serviceaccounts
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
@@ -1,11 +1,12 @@
|
|||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRoleBinding
|
kind: RoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "gha-runner-scale-set-controller.managerRoleBinding" . }}
|
name: {{ include "gha-runner-scale-set-controller.managerListenerRoleBinding" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
kind: ClusterRole
|
kind: Role
|
||||||
name: {{ include "gha-runner-scale-set-controller.managerRoleName" . }}
|
name: {{ include "gha-runner-scale-set-controller.managerListenerRoleName" . }}
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||||
@@ -0,0 +1,84 @@
|
|||||||
|
{{- if .Values.flags.watchSingleNamespace }}
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- autoscalinglisteners
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- autoscalinglisteners/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- autoscalinglisteners/finalizers
|
||||||
|
verbs:
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- serviceaccounts
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- rolebindings
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- roles
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- autoscalingrunnersets
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- ephemeralrunnersets
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- ephemeralrunners
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,15 @@
|
|||||||
|
{{- if .Values.flags.watchSingleNamespace }}
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,125 @@
|
|||||||
|
{{- if .Values.flags.watchSingleNamespace }}
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
|
||||||
|
namespace: {{ .Values.flags.watchSingleNamespace }}
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- autoscalingrunnersets
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- autoscalingrunnersets/finalizers
|
||||||
|
verbs:
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- autoscalingrunnersets/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- ephemeralrunnersets
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- ephemeralrunnersets/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- ephemeralrunnersets/finalizers
|
||||||
|
verbs:
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- ephemeralrunners
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- ephemeralrunners/finalizers
|
||||||
|
verbs:
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- ephemeralrunners/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- autoscalinglisteners
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- serviceaccounts
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- rolebindings
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- roles
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- patch
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,15 @@
|
|||||||
|
{{- if .Values.flags.watchSingleNamespace }}
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" . }}
|
||||||
|
namespace: {{ .Values.flags.watchSingleNamespace }}
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
{{- if .Values.serviceAccount.create -}}
|
{{- if .Values.serviceAccount.create }}
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
|
|||||||
@@ -147,7 +147,7 @@ func TestTemplate_NotCreateServiceAccount_ServiceAccountNotSet(t *testing.T) {
|
|||||||
assert.ErrorContains(t, err, "serviceAccount.name must be set if serviceAccount.create is false", "We should get an error because the default service account cannot be used")
|
assert.ErrorContains(t, err, "serviceAccount.name must be set if serviceAccount.create is false", "We should get an error because the default service account cannot be used")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTemplate_CreateManagerRole(t *testing.T) {
|
func TestTemplate_CreateManagerClusterRole(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
// Path to the helm chart we will test
|
// Path to the helm chart we will test
|
||||||
@@ -162,17 +162,23 @@ func TestTemplate_CreateManagerRole(t *testing.T) {
|
|||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
|
|
||||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"})
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role.yaml"})
|
||||||
|
|
||||||
var managerRole rbacv1.ClusterRole
|
var managerClusterRole rbacv1.ClusterRole
|
||||||
helm.UnmarshalK8SYaml(t, output, &managerRole)
|
helm.UnmarshalK8SYaml(t, output, &managerClusterRole)
|
||||||
|
|
||||||
assert.Empty(t, managerRole.Namespace, "ClusterRole should not have a namespace")
|
assert.Empty(t, managerClusterRole.Namespace, "ClusterRole should not have a namespace")
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-role", managerRole.Name)
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRole.Name)
|
||||||
assert.Equal(t, 18, len(managerRole.Rules))
|
assert.Equal(t, 16, len(managerClusterRole.Rules))
|
||||||
|
|
||||||
|
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"})
|
||||||
|
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_controller_role.yaml in chart", "We should get an error because the template should be skipped")
|
||||||
|
|
||||||
|
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role.yaml"})
|
||||||
|
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_watch_role.yaml in chart", "We should get an error because the template should be skipped")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTemplate_ManagerRoleBinding(t *testing.T) {
|
func TestTemplate_ManagerClusterRoleBinding(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
// Path to the helm chart we will test
|
// Path to the helm chart we will test
|
||||||
@@ -189,16 +195,80 @@ func TestTemplate_ManagerRoleBinding(t *testing.T) {
|
|||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
|
|
||||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role_binding.yaml"})
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role_binding.yaml"})
|
||||||
|
|
||||||
var managerRoleBinding rbacv1.ClusterRoleBinding
|
var managerClusterRoleBinding rbacv1.ClusterRoleBinding
|
||||||
helm.UnmarshalK8SYaml(t, output, &managerRoleBinding)
|
helm.UnmarshalK8SYaml(t, output, &managerClusterRoleBinding)
|
||||||
|
|
||||||
assert.Empty(t, managerRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace")
|
assert.Empty(t, managerClusterRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace")
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-rolebinding", managerRoleBinding.Name)
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-rolebinding", managerClusterRoleBinding.Name)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-role", managerRoleBinding.RoleRef.Name)
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRoleBinding.RoleRef.Name)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerRoleBinding.Subjects[0].Name)
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerClusterRoleBinding.Subjects[0].Name)
|
||||||
assert.Equal(t, namespaceName, managerRoleBinding.Subjects[0].Namespace)
|
assert.Equal(t, namespaceName, managerClusterRoleBinding.Subjects[0].Namespace)
|
||||||
|
|
||||||
|
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role_binding.yaml"})
|
||||||
|
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_controller_role_binding.yaml in chart", "We should get an error because the template should be skipped")
|
||||||
|
|
||||||
|
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role_binding.yaml"})
|
||||||
|
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_watch_role_binding.yaml in chart", "We should get an error because the template should be skipped")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTemplate_CreateManagerListenerRole(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-arc"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_listener_role.yaml"})
|
||||||
|
|
||||||
|
var managerListenerRole rbacv1.Role
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &managerListenerRole)
|
||||||
|
|
||||||
|
assert.Equal(t, namespaceName, managerListenerRole.Namespace, "Role should have a namespace")
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-role", managerListenerRole.Name)
|
||||||
|
assert.Equal(t, 4, len(managerListenerRole.Rules))
|
||||||
|
assert.Equal(t, "pods", managerListenerRole.Rules[0].Resources[0])
|
||||||
|
assert.Equal(t, "pods/status", managerListenerRole.Rules[1].Resources[0])
|
||||||
|
assert.Equal(t, "secrets", managerListenerRole.Rules[2].Resources[0])
|
||||||
|
assert.Equal(t, "serviceaccounts", managerListenerRole.Rules[3].Resources[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTemplate_ManagerListenerRoleBinding(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-arc"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"serviceAccount.create": "true",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_listener_role_binding.yaml"})
|
||||||
|
|
||||||
|
var managerListenerRoleBinding rbacv1.RoleBinding
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &managerListenerRoleBinding)
|
||||||
|
|
||||||
|
assert.Equal(t, namespaceName, managerListenerRoleBinding.Namespace, "RoleBinding should have a namespace")
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-rolebinding", managerListenerRoleBinding.Name)
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-role", managerListenerRoleBinding.RoleRef.Name)
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerListenerRoleBinding.Subjects[0].Name)
|
||||||
|
assert.Equal(t, namespaceName, managerListenerRoleBinding.Subjects[0].Namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
||||||
@@ -237,6 +307,10 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
|||||||
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
||||||
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
||||||
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
||||||
|
assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"])
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"])
|
||||||
|
assert.NotContains(t, deployment.Labels, "actions.github.com/controller-watch-single-namespace")
|
||||||
|
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/part-of"])
|
||||||
|
|
||||||
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
||||||
|
|
||||||
@@ -261,9 +335,11 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
|||||||
assert.Nil(t, deployment.Spec.Template.Spec.Affinity)
|
assert.Nil(t, deployment.Spec.Template.Spec.Affinity)
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0)
|
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0)
|
||||||
|
|
||||||
|
managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev"
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers, 1)
|
||||||
assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name)
|
assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name)
|
||||||
assert.Equal(t, "ghcr.io/actions/gha-runner-scale-set-controller:dev", deployment.Spec.Template.Spec.Containers[0].Image)
|
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Image)
|
||||||
assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy)
|
assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
||||||
@@ -273,13 +349,16 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
|||||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
||||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAME", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||||
assert.Equal(t, "metadata.name", deployment.Spec.Template.Spec.Containers[0].Env[0].ValueFrom.FieldRef.FieldPath)
|
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||||
|
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||||
|
|
||||||
|
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||||
|
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
|
||||||
|
|
||||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
|
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
|
||||||
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
|
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
|
||||||
@@ -314,6 +393,8 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
"imagePullSecrets[0].name": "dockerhub",
|
"imagePullSecrets[0].name": "dockerhub",
|
||||||
"nameOverride": "gha-runner-scale-set-controller-override",
|
"nameOverride": "gha-runner-scale-set-controller-override",
|
||||||
"fullnameOverride": "gha-runner-scale-set-controller-fullname-override",
|
"fullnameOverride": "gha-runner-scale-set-controller-fullname-override",
|
||||||
|
"env[0].name": "ENV_VAR_NAME_1",
|
||||||
|
"env[0].value": "ENV_VAR_VALUE_1",
|
||||||
"serviceAccount.name": "gha-runner-scale-set-controller-sa",
|
"serviceAccount.name": "gha-runner-scale-set-controller-sa",
|
||||||
"podAnnotations.foo": "bar",
|
"podAnnotations.foo": "bar",
|
||||||
"podSecurityContext.fsGroup": "1000",
|
"podSecurityContext.fsGroup": "1000",
|
||||||
@@ -341,6 +422,7 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
||||||
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
||||||
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
||||||
|
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/part-of"])
|
||||||
assert.Equal(t, "bar", deployment.Labels["foo"])
|
assert.Equal(t, "bar", deployment.Labels["foo"])
|
||||||
assert.Equal(t, "actions", deployment.Labels["github"])
|
assert.Equal(t, "actions", deployment.Labels["github"])
|
||||||
|
|
||||||
@@ -355,6 +437,9 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"])
|
assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"])
|
||||||
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
||||||
|
|
||||||
|
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||||
|
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1)
|
||||||
assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name)
|
assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name)
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller-sa", deployment.Spec.Template.Spec.ServiceAccountName)
|
assert.Equal(t, "gha-runner-scale-set-controller-sa", deployment.Spec.Template.Spec.ServiceAccountName)
|
||||||
@@ -375,9 +460,11 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 1)
|
||||||
assert.Equal(t, "foo", deployment.Spec.Template.Spec.Tolerations[0].Key)
|
assert.Equal(t, "foo", deployment.Spec.Template.Spec.Tolerations[0].Key)
|
||||||
|
|
||||||
|
managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev"
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers, 1)
|
||||||
assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name)
|
assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name)
|
||||||
assert.Equal(t, "ghcr.io/actions/gha-runner-scale-set-controller:dev", deployment.Spec.Template.Spec.Containers[0].Image)
|
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Image)
|
||||||
assert.Equal(t, corev1.PullAlways, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy)
|
assert.Equal(t, corev1.PullAlways, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
||||||
@@ -388,9 +475,15 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 4)
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAME", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||||
assert.Equal(t, "metadata.name", deployment.Spec.Template.Spec.Containers[0].Env[0].ValueFrom.FieldRef.FieldPath)
|
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||||
|
|
||||||
|
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||||
|
assert.Equal(t, "Always", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
|
||||||
|
|
||||||
|
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||||
|
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
|
||||||
|
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||||
@@ -531,3 +624,264 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
|
|||||||
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub,ghcr", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub,ghcr", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
chart := new(Chart)
|
||||||
|
err = yaml.Unmarshal(chartContent, chart)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-arc"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"image.tag": "dev",
|
||||||
|
"flags.watchSingleNamespace": "demo",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
|
||||||
|
|
||||||
|
var deployment appsv1.Deployment
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||||
|
|
||||||
|
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name)
|
||||||
|
assert.Equal(t, "gha-runner-scale-set-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
|
||||||
|
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/name"])
|
||||||
|
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
||||||
|
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
||||||
|
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
||||||
|
assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"])
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"])
|
||||||
|
assert.Equal(t, "demo", deployment.Labels["actions.github.com/controller-watch-single-namespace"])
|
||||||
|
|
||||||
|
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
||||||
|
|
||||||
|
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
|
||||||
|
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
|
||||||
|
|
||||||
|
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
|
||||||
|
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
|
||||||
|
|
||||||
|
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
||||||
|
|
||||||
|
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0)
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.ServiceAccountName)
|
||||||
|
assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext)
|
||||||
|
assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName)
|
||||||
|
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
|
||||||
|
assert.Len(t, deployment.Spec.Template.Spec.Volumes, 1)
|
||||||
|
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name)
|
||||||
|
assert.NotNil(t, 10, deployment.Spec.Template.Spec.Volumes[0].EmptyDir)
|
||||||
|
|
||||||
|
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 0)
|
||||||
|
assert.Nil(t, deployment.Spec.Template.Spec.Affinity)
|
||||||
|
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0)
|
||||||
|
|
||||||
|
managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev"
|
||||||
|
|
||||||
|
assert.Len(t, deployment.Spec.Template.Spec.Containers, 1)
|
||||||
|
assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name)
|
||||||
|
assert.Equal(t, "ghcr.io/actions/gha-runner-scale-set-controller:dev", deployment.Spec.Template.Spec.Containers[0].Image)
|
||||||
|
assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy)
|
||||||
|
|
||||||
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
||||||
|
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
||||||
|
|
||||||
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3)
|
||||||
|
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
||||||
|
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||||
|
assert.Equal(t, "--watch-single-namespace=demo", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
||||||
|
|
||||||
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
|
||||||
|
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||||
|
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||||
|
|
||||||
|
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||||
|
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||||
|
|
||||||
|
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||||
|
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
|
||||||
|
|
||||||
|
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
|
||||||
|
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||||
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
|
||||||
|
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
|
||||||
|
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTemplate_ControllerContainerEnvironmentVariables(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-arc"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"env[0].Name": "ENV_VAR_NAME_1",
|
||||||
|
"env[0].Value": "ENV_VAR_VALUE_1",
|
||||||
|
"env[1].Name": "ENV_VAR_NAME_2",
|
||||||
|
"env[1].ValueFrom.SecretKeyRef.Key": "ENV_VAR_NAME_2",
|
||||||
|
"env[1].ValueFrom.SecretKeyRef.Name": "secret-name",
|
||||||
|
"env[1].ValueFrom.SecretKeyRef.Optional": "true",
|
||||||
|
"env[2].Name": "ENV_VAR_NAME_3",
|
||||||
|
"env[2].Value": "",
|
||||||
|
"env[3].Name": "ENV_VAR_NAME_4",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
|
||||||
|
|
||||||
|
var deployment appsv1.Deployment
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||||
|
|
||||||
|
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name)
|
||||||
|
|
||||||
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 7)
|
||||||
|
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||||
|
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
|
||||||
|
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].Name)
|
||||||
|
assert.Equal(t, "secret-name", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Name)
|
||||||
|
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Key)
|
||||||
|
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Optional)
|
||||||
|
assert.Equal(t, "ENV_VAR_NAME_3", deployment.Spec.Template.Spec.Containers[0].Env[5].Name)
|
||||||
|
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[5].Value)
|
||||||
|
assert.Equal(t, "ENV_VAR_NAME_4", deployment.Spec.Template.Spec.Containers[0].Env[6].Name)
|
||||||
|
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[6].ValueFrom)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-arc"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"flags.watchSingleNamespace": "demo",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role.yaml"})
|
||||||
|
assert.ErrorContains(t, err, "could not find template templates/manager_cluster_role.yaml in chart", "We should get an error because the template should be skipped")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTemplate_WatchSingleNamespace_NotManagerClusterRoleBinding(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-arc"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"serviceAccount.create": "true",
|
||||||
|
"flags.watchSingleNamespace": "demo",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role_binding.yaml"})
|
||||||
|
assert.ErrorContains(t, err, "could not find template templates/manager_cluster_role_binding.yaml in chart", "We should get an error because the template should be skipped")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-arc"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"flags.watchSingleNamespace": "demo",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"})
|
||||||
|
|
||||||
|
var managerSingleNamespaceControllerRole rbacv1.Role
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRole)
|
||||||
|
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceControllerRole.Name)
|
||||||
|
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRole.Namespace)
|
||||||
|
assert.Equal(t, 10, len(managerSingleNamespaceControllerRole.Rules))
|
||||||
|
|
||||||
|
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role.yaml"})
|
||||||
|
|
||||||
|
var managerSingleNamespaceWatchRole rbacv1.Role
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRole)
|
||||||
|
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRole.Name)
|
||||||
|
assert.Equal(t, "demo", managerSingleNamespaceWatchRole.Namespace)
|
||||||
|
assert.Equal(t, 14, len(managerSingleNamespaceWatchRole.Rules))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-arc"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"flags.watchSingleNamespace": "demo",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role_binding.yaml"})
|
||||||
|
|
||||||
|
var managerSingleNamespaceControllerRoleBinding rbacv1.RoleBinding
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRoleBinding)
|
||||||
|
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-rolebinding", managerSingleNamespaceControllerRoleBinding.Name)
|
||||||
|
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Namespace)
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceControllerRoleBinding.RoleRef.Name)
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerSingleNamespaceControllerRoleBinding.Subjects[0].Name)
|
||||||
|
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Subjects[0].Namespace)
|
||||||
|
|
||||||
|
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role_binding.yaml"})
|
||||||
|
|
||||||
|
var managerSingleNamespaceWatchRoleBinding rbacv1.RoleBinding
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRoleBinding)
|
||||||
|
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-rolebinding", managerSingleNamespaceWatchRoleBinding.Name)
|
||||||
|
assert.Equal(t, "demo", managerSingleNamespaceWatchRoleBinding.Namespace)
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRoleBinding.RoleRef.Name)
|
||||||
|
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerSingleNamespaceWatchRoleBinding.Subjects[0].Name)
|
||||||
|
assert.Equal(t, namespaceName, managerSingleNamespaceWatchRoleBinding.Subjects[0].Namespace)
|
||||||
|
}
|
||||||
|
|||||||
@@ -18,6 +18,17 @@ imagePullSecrets: []
|
|||||||
nameOverride: ""
|
nameOverride: ""
|
||||||
fullnameOverride: ""
|
fullnameOverride: ""
|
||||||
|
|
||||||
|
env:
|
||||||
|
## Define environment variables for the controller pod
|
||||||
|
# - name: "ENV_VAR_NAME_1"
|
||||||
|
# value: "ENV_VAR_VALUE_1"
|
||||||
|
# - name: "ENV_VAR_NAME_2"
|
||||||
|
# valueFrom:
|
||||||
|
# secretKeyRef:
|
||||||
|
# key: ENV_VAR_NAME_2
|
||||||
|
# name: secret-name
|
||||||
|
# optional: true
|
||||||
|
|
||||||
serviceAccount:
|
serviceAccount:
|
||||||
# Specifies whether a service account should be created for running the controller pod
|
# Specifies whether a service account should be created for running the controller pod
|
||||||
create: true
|
create: true
|
||||||
@@ -31,27 +42,27 @@ serviceAccount:
|
|||||||
podAnnotations: {}
|
podAnnotations: {}
|
||||||
|
|
||||||
podSecurityContext: {}
|
podSecurityContext: {}
|
||||||
# fsGroup: 2000
|
# fsGroup: 2000
|
||||||
|
|
||||||
securityContext: {}
|
securityContext: {}
|
||||||
# capabilities:
|
# capabilities:
|
||||||
# drop:
|
# drop:
|
||||||
# - ALL
|
# - ALL
|
||||||
# readOnlyRootFilesystem: true
|
# readOnlyRootFilesystem: true
|
||||||
# runAsNonRoot: true
|
# runAsNonRoot: true
|
||||||
# runAsUser: 1000
|
# runAsUser: 1000
|
||||||
|
|
||||||
resources: {}
|
resources: {}
|
||||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
## We usually recommend not to specify default resources and to leave this as a conscious
|
||||||
# choice for the user. This also increases chances charts run on environments with little
|
## choice for the user. This also increases chances charts run on environments with little
|
||||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
## resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||||
# limits:
|
# limits:
|
||||||
# cpu: 100m
|
# cpu: 100m
|
||||||
# memory: 128Mi
|
# memory: 128Mi
|
||||||
# requests:
|
# requests:
|
||||||
# cpu: 100m
|
# cpu: 100m
|
||||||
# memory: 128Mi
|
# memory: 128Mi
|
||||||
|
|
||||||
nodeSelector: {}
|
nodeSelector: {}
|
||||||
|
|
||||||
@@ -68,3 +79,7 @@ flags:
|
|||||||
# Log level can be set here with one of the following values: "debug", "info", "warn", "error".
|
# Log level can be set here with one of the following values: "debug", "info", "warn", "error".
|
||||||
# Defaults to "debug".
|
# Defaults to "debug".
|
||||||
logLevel: "debug"
|
logLevel: "debug"
|
||||||
|
|
||||||
|
## Restricts the controller to only watch resources in the desired namespace.
|
||||||
|
## Defaults to watch all namespaces when unset.
|
||||||
|
# watchSingleNamespace: ""
|
||||||
|
|||||||
@@ -15,13 +15,13 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.3.0
|
version: 0.4.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "0.3.0"
|
appVersion: "0.4.0"
|
||||||
|
|
||||||
home: https://github.com/actions/dev-arc
|
home: https://github.com/actions/dev-arc
|
||||||
|
|
||||||
|
|||||||
@@ -11,17 +11,9 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
|
|||||||
If release name contains chart name it will be used as a full name.
|
If release name contains chart name it will be used as a full name.
|
||||||
*/}}
|
*/}}
|
||||||
{{- define "gha-runner-scale-set.fullname" -}}
|
{{- define "gha-runner-scale-set.fullname" -}}
|
||||||
{{- if .Values.fullnameOverride }}
|
{{- $name := default .Chart.Name }}
|
||||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
|
||||||
{{- else }}
|
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
|
||||||
{{- if contains $name .Release.Name }}
|
|
||||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
|
||||||
{{- else }}
|
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{/*
|
{{/*
|
||||||
Create chart name and version as used by the chart label.
|
Create chart name and version as used by the chart label.
|
||||||
@@ -40,6 +32,9 @@ helm.sh/chart: {{ include "gha-runner-scale-set.chart" . }}
|
|||||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
app.kubernetes.io/part-of: gha-runner-scale-set
|
||||||
|
actions.github.com/scale-set-name: {{ .Release.Name }}
|
||||||
|
actions.github.com/scale-set-namespace: {{ .Release.Namespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{/*
|
{{/*
|
||||||
@@ -70,24 +65,24 @@ app.kubernetes.io/instance: {{ .Release.Name }}
|
|||||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role
|
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "gha-runner-scale-set.kubeModeRoleBindingName" -}}
|
||||||
|
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role-binding
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}}
|
{{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}}
|
||||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-service-account
|
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-service-account
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set.dind-init-container" -}}
|
{{- define "gha-runner-scale-set.dind-init-container" -}}
|
||||||
{{- range $i, $val := .Values.template.spec.containers -}}
|
{{- range $i, $val := .Values.template.spec.containers }}
|
||||||
{{- if eq $val.name "runner" -}}
|
{{- if eq $val.name "runner" }}
|
||||||
image: {{ $val.image }}
|
image: {{ $val.image }}
|
||||||
{{- if $val.imagePullSecrets }}
|
|
||||||
imagePullSecrets:
|
|
||||||
{{ $val.imagePullSecrets | toYaml -}}
|
|
||||||
{{- end }}
|
|
||||||
command: ["cp"]
|
command: ["cp"]
|
||||||
args: ["-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"]
|
args: ["-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"]
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: dind-externals
|
- name: dind-externals
|
||||||
mountPath: /home/runner/tmpDir
|
mountPath: /home/runner/tmpDir
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
@@ -124,7 +119,7 @@ volumeMounts:
|
|||||||
{{- $createWorkVolume := 1 }}
|
{{- $createWorkVolume := 1 }}
|
||||||
{{- range $i, $volume := .Values.template.spec.volumes }}
|
{{- range $i, $volume := .Values.template.spec.volumes }}
|
||||||
{{- if eq $volume.name "work" }}
|
{{- if eq $volume.name "work" }}
|
||||||
{{- $createWorkVolume = 0 -}}
|
{{- $createWorkVolume = 0 }}
|
||||||
- {{ $volume | toYaml | nindent 2 }}
|
- {{ $volume | toYaml | nindent 2 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -138,7 +133,7 @@ volumeMounts:
|
|||||||
{{- $createWorkVolume := 1 }}
|
{{- $createWorkVolume := 1 }}
|
||||||
{{- range $i, $volume := .Values.template.spec.volumes }}
|
{{- range $i, $volume := .Values.template.spec.volumes }}
|
||||||
{{- if eq $volume.name "work" }}
|
{{- if eq $volume.name "work" }}
|
||||||
{{- $createWorkVolume = 0 -}}
|
{{- $createWorkVolume = 0 }}
|
||||||
- {{ $volume | toYaml | nindent 2 }}
|
- {{ $volume | toYaml | nindent 2 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -160,25 +155,28 @@ volumeMounts:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set.non-runner-containers" -}}
|
{{- define "gha-runner-scale-set.non-runner-containers" -}}
|
||||||
{{- range $i, $container := .Values.template.spec.containers -}}
|
{{- range $i, $container := .Values.template.spec.containers }}
|
||||||
{{- if ne $container.name "runner" -}}
|
{{- if ne $container.name "runner" }}
|
||||||
- name: {{ $container.name }}
|
- {{ $container | toYaml | nindent 2 }}
|
||||||
{{- range $key, $val := $container }}
|
|
||||||
{{- if ne $key "name" }}
|
|
||||||
{{ $key }}: {{ $val }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "gha-runner-scale-set.non-runner-non-dind-containers" -}}
|
||||||
|
{{- range $i, $container := .Values.template.spec.containers }}
|
||||||
|
{{- if and (ne $container.name "runner") (ne $container.name "dind") }}
|
||||||
|
- {{ $container | toYaml | nindent 2 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set.dind-runner-container" -}}
|
{{- define "gha-runner-scale-set.dind-runner-container" -}}
|
||||||
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
||||||
{{- range $i, $container := .Values.template.spec.containers -}}
|
{{- range $i, $container := .Values.template.spec.containers }}
|
||||||
{{- if eq $container.name "runner" -}}
|
{{- if eq $container.name "runner" }}
|
||||||
{{- range $key, $val := $container }}
|
{{- range $key, $val := $container }}
|
||||||
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
|
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
|
||||||
{{ $key }}: {{ $val }}
|
{{ $key }}: {{ $val | toYaml | nindent 2 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- $setDockerHost := 1 }}
|
{{- $setDockerHost := 1 }}
|
||||||
@@ -195,29 +193,24 @@ env:
|
|||||||
{{- with $container.env }}
|
{{- with $container.env }}
|
||||||
{{- range $i, $env := . }}
|
{{- range $i, $env := . }}
|
||||||
{{- if eq $env.name "DOCKER_HOST" }}
|
{{- if eq $env.name "DOCKER_HOST" }}
|
||||||
{{- $setDockerHost = 0 -}}
|
{{- $setDockerHost = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $env.name "DOCKER_TLS_VERIFY" }}
|
{{- if eq $env.name "DOCKER_TLS_VERIFY" }}
|
||||||
{{- $setDockerTlsVerify = 0 -}}
|
{{- $setDockerTlsVerify = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $env.name "DOCKER_CERT_PATH" }}
|
{{- if eq $env.name "DOCKER_CERT_PATH" }}
|
||||||
{{- $setDockerCertPath = 0 -}}
|
{{- $setDockerCertPath = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $env.name "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS" }}
|
{{- if eq $env.name "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS" }}
|
||||||
{{- $setRunnerWaitDocker = 0 -}}
|
{{- $setRunnerWaitDocker = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
|
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
|
||||||
{{- $setNodeExtraCaCerts = 0 -}}
|
{{- $setNodeExtraCaCerts = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
|
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
|
||||||
{{- $setRunnerUpdateCaCerts = 0 -}}
|
{{- $setRunnerUpdateCaCerts = 0 }}
|
||||||
{{- end }}
|
|
||||||
- name: {{ $env.name }}
|
|
||||||
{{- range $envKey, $envVal := $env }}
|
|
||||||
{{- if ne $envKey "name" }}
|
|
||||||
{{ $envKey }}: {{ $envVal | toYaml | nindent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
- {{ $env | toYaml | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if $setDockerHost }}
|
{{- if $setDockerHost }}
|
||||||
@@ -254,20 +247,15 @@ volumeMounts:
|
|||||||
{{- with $container.volumeMounts }}
|
{{- with $container.volumeMounts }}
|
||||||
{{- range $i, $volMount := . }}
|
{{- range $i, $volMount := . }}
|
||||||
{{- if eq $volMount.name "work" }}
|
{{- if eq $volMount.name "work" }}
|
||||||
{{- $mountWork = 0 -}}
|
{{- $mountWork = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $volMount.name "dind-cert" }}
|
{{- if eq $volMount.name "dind-cert" }}
|
||||||
{{- $mountDindCert = 0 -}}
|
{{- $mountDindCert = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $volMount.name "github-server-tls-cert" }}
|
{{- if eq $volMount.name "github-server-tls-cert" }}
|
||||||
{{- $mountGitHubServerTLS = 0 -}}
|
{{- $mountGitHubServerTLS = 0 }}
|
||||||
{{- end }}
|
|
||||||
- name: {{ $volMount.name }}
|
|
||||||
{{- range $mountKey, $mountVal := $volMount }}
|
|
||||||
{{- if ne $mountKey "name" }}
|
|
||||||
{{ $mountKey }}: {{ $mountVal | toYaml | nindent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
- {{ $volMount | toYaml | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if $mountWork }}
|
{{- if $mountWork }}
|
||||||
@@ -290,11 +278,11 @@ volumeMounts:
|
|||||||
|
|
||||||
{{- define "gha-runner-scale-set.kubernetes-mode-runner-container" -}}
|
{{- define "gha-runner-scale-set.kubernetes-mode-runner-container" -}}
|
||||||
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
||||||
{{- range $i, $container := .Values.template.spec.containers -}}
|
{{- range $i, $container := .Values.template.spec.containers }}
|
||||||
{{- if eq $container.name "runner" -}}
|
{{- if eq $container.name "runner" }}
|
||||||
{{- range $key, $val := $container }}
|
{{- range $key, $val := $container }}
|
||||||
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
|
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
|
||||||
{{ $key }}: {{ $val }}
|
{{ $key }}: {{ $val | toYaml | nindent 2 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- $setContainerHooks := 1 }}
|
{{- $setContainerHooks := 1 }}
|
||||||
@@ -310,26 +298,21 @@ env:
|
|||||||
{{- with $container.env }}
|
{{- with $container.env }}
|
||||||
{{- range $i, $env := . }}
|
{{- range $i, $env := . }}
|
||||||
{{- if eq $env.name "ACTIONS_RUNNER_CONTAINER_HOOKS" }}
|
{{- if eq $env.name "ACTIONS_RUNNER_CONTAINER_HOOKS" }}
|
||||||
{{- $setContainerHooks = 0 -}}
|
{{- $setContainerHooks = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $env.name "ACTIONS_RUNNER_POD_NAME" }}
|
{{- if eq $env.name "ACTIONS_RUNNER_POD_NAME" }}
|
||||||
{{- $setPodName = 0 -}}
|
{{- $setPodName = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $env.name "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER" }}
|
{{- if eq $env.name "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER" }}
|
||||||
{{- $setRequireJobContainer = 0 -}}
|
{{- $setRequireJobContainer = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
|
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
|
||||||
{{- $setNodeExtraCaCerts = 0 -}}
|
{{- $setNodeExtraCaCerts = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
|
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
|
||||||
{{- $setRunnerUpdateCaCerts = 0 -}}
|
{{- $setRunnerUpdateCaCerts = 0 }}
|
||||||
{{- end }}
|
|
||||||
- name: {{ $env.name }}
|
|
||||||
{{- range $envKey, $envVal := $env }}
|
|
||||||
{{- if ne $envKey "name" }}
|
|
||||||
{{ $envKey }}: {{ $envVal | toYaml | nindent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
- {{ $env | toYaml | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if $setContainerHooks }}
|
{{- if $setContainerHooks }}
|
||||||
@@ -363,17 +346,12 @@ volumeMounts:
|
|||||||
{{- with $container.volumeMounts }}
|
{{- with $container.volumeMounts }}
|
||||||
{{- range $i, $volMount := . }}
|
{{- range $i, $volMount := . }}
|
||||||
{{- if eq $volMount.name "work" }}
|
{{- if eq $volMount.name "work" }}
|
||||||
{{- $mountWork = 0 -}}
|
{{- $mountWork = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $volMount.name "github-server-tls-cert" }}
|
{{- if eq $volMount.name "github-server-tls-cert" }}
|
||||||
{{- $mountGitHubServerTLS = 0 -}}
|
{{- $mountGitHubServerTLS = 0 }}
|
||||||
{{- end }}
|
|
||||||
- name: {{ $volMount.name }}
|
|
||||||
{{- range $mountKey, $mountVal := $volMount }}
|
|
||||||
{{- if ne $mountKey "name" }}
|
|
||||||
{{ $mountKey }}: {{ $mountVal | toYaml | nindent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
- {{ $volMount | toYaml | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if $mountWork }}
|
{{- if $mountWork }}
|
||||||
@@ -391,14 +369,14 @@ volumeMounts:
|
|||||||
|
|
||||||
{{- define "gha-runner-scale-set.default-mode-runner-containers" -}}
|
{{- define "gha-runner-scale-set.default-mode-runner-containers" -}}
|
||||||
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
||||||
{{- range $i, $container := .Values.template.spec.containers -}}
|
{{- range $i, $container := .Values.template.spec.containers }}
|
||||||
{{- if ne $container.name "runner" -}}
|
{{- if ne $container.name "runner" }}
|
||||||
- {{ $container | toYaml | nindent 2 }}
|
- {{ $container | toYaml | nindent 2 }}
|
||||||
{{- else }}
|
{{- else }}
|
||||||
- name: {{ $container.name }}
|
- name: {{ $container.name }}
|
||||||
{{- range $key, $val := $container }}
|
{{- range $key, $val := $container }}
|
||||||
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
|
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
|
||||||
{{ $key }}: {{ $val }}
|
{{ $key }}: {{ $val | toYaml | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- $setNodeExtraCaCerts := 0 }}
|
{{- $setNodeExtraCaCerts := 0 }}
|
||||||
@@ -411,17 +389,12 @@ volumeMounts:
|
|||||||
{{- with $container.env }}
|
{{- with $container.env }}
|
||||||
{{- range $i, $env := . }}
|
{{- range $i, $env := . }}
|
||||||
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
|
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
|
||||||
{{- $setNodeExtraCaCerts = 0 -}}
|
{{- $setNodeExtraCaCerts = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
|
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
|
||||||
{{- $setRunnerUpdateCaCerts = 0 -}}
|
{{- $setRunnerUpdateCaCerts = 0 }}
|
||||||
{{- end }}
|
|
||||||
- name: {{ $env.name }}
|
|
||||||
{{- range $envKey, $envVal := $env }}
|
|
||||||
{{- if ne $envKey "name" }}
|
|
||||||
{{ $envKey }}: {{ $envVal | toYaml | nindent 10 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
- {{ $env | toYaml | nindent 6 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if $setNodeExtraCaCerts }}
|
{{- if $setNodeExtraCaCerts }}
|
||||||
@@ -440,14 +413,9 @@ volumeMounts:
|
|||||||
{{- with $container.volumeMounts }}
|
{{- with $container.volumeMounts }}
|
||||||
{{- range $i, $volMount := . }}
|
{{- range $i, $volMount := . }}
|
||||||
{{- if eq $volMount.name "github-server-tls-cert" }}
|
{{- if eq $volMount.name "github-server-tls-cert" }}
|
||||||
{{- $mountGitHubServerTLS = 0 -}}
|
{{- $mountGitHubServerTLS = 0 }}
|
||||||
{{- end }}
|
|
||||||
- name: {{ $volMount.name }}
|
|
||||||
{{- range $mountKey, $mountVal := $volMount }}
|
|
||||||
{{- if ne $mountKey "name" }}
|
|
||||||
{{ $mountKey }}: {{ $mountVal | toYaml | nindent 10 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
- {{ $volMount | toYaml | nindent 6 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if $mountGitHubServerTLS }}
|
{{- if $mountGitHubServerTLS }}
|
||||||
@@ -458,3 +426,125 @@ volumeMounts:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "gha-runner-scale-set.managerRoleName" -}}
|
||||||
|
{{- include "gha-runner-scale-set.fullname" . }}-manager-role
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "gha-runner-scale-set.managerRoleBindingName" -}}
|
||||||
|
{{- include "gha-runner-scale-set.fullname" . }}-manager-role-binding
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "gha-runner-scale-set.managerServiceAccountName" -}}
|
||||||
|
{{- $searchControllerDeployment := 1 }}
|
||||||
|
{{- if .Values.controllerServiceAccount }}
|
||||||
|
{{- if .Values.controllerServiceAccount.name }}
|
||||||
|
{{- $searchControllerDeployment = 0 }}
|
||||||
|
{{- .Values.controllerServiceAccount.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if eq $searchControllerDeployment 1 }}
|
||||||
|
{{- $multiNamespacesCounter := 0 }}
|
||||||
|
{{- $singleNamespaceCounter := 0 }}
|
||||||
|
{{- $controllerDeployment := dict }}
|
||||||
|
{{- $singleNamespaceControllerDeployments := dict }}
|
||||||
|
{{- $managerServiceAccountName := "" }}
|
||||||
|
{{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }}
|
||||||
|
{{- if kindIs "map" $deployment.metadata.labels }}
|
||||||
|
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-runner-scale-set-controller" }}
|
||||||
|
{{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }}
|
||||||
|
{{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }}
|
||||||
|
{{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}}
|
||||||
|
{{- else }}
|
||||||
|
{{- $multiNamespacesCounter = add $multiNamespacesCounter 1 }}
|
||||||
|
{{- $controllerDeployment = $deployment }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
|
||||||
|
{{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
|
||||||
|
{{- fail "Found both gha-runner-scale-set-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if gt $multiNamespacesCounter 1 }}
|
||||||
|
{{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if eq $multiNamespacesCounter 1 }}
|
||||||
|
{{- with $controllerDeployment.metadata }}
|
||||||
|
{{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }}
|
||||||
|
{{- end }}
|
||||||
|
{{- else if gt $singleNamespaceCounter 0 }}
|
||||||
|
{{- if hasKey $singleNamespaceControllerDeployments .Release.Namespace }}
|
||||||
|
{{- $controllerDeployment = get $singleNamespaceControllerDeployments .Release.Namespace }}
|
||||||
|
{{- with $controllerDeployment.metadata }}
|
||||||
|
{{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }}
|
||||||
|
{{- end }}
|
||||||
|
{{- else }}
|
||||||
|
{{- fail "No gha-runner-scale-set-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if eq $managerServiceAccountName "" }}
|
||||||
|
{{- fail "No service account name found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-name), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
|
{{- end }}
|
||||||
|
{{- $managerServiceAccountName }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "gha-runner-scale-set.managerServiceAccountNamespace" -}}
|
||||||
|
{{- $searchControllerDeployment := 1 }}
|
||||||
|
{{- if .Values.controllerServiceAccount }}
|
||||||
|
{{- if .Values.controllerServiceAccount.namespace }}
|
||||||
|
{{- $searchControllerDeployment = 0 }}
|
||||||
|
{{- .Values.controllerServiceAccount.namespace }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if eq $searchControllerDeployment 1 }}
|
||||||
|
{{- $multiNamespacesCounter := 0 }}
|
||||||
|
{{- $singleNamespaceCounter := 0 }}
|
||||||
|
{{- $controllerDeployment := dict }}
|
||||||
|
{{- $singleNamespaceControllerDeployments := dict }}
|
||||||
|
{{- $managerServiceAccountNamespace := "" }}
|
||||||
|
{{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }}
|
||||||
|
{{- if kindIs "map" $deployment.metadata.labels }}
|
||||||
|
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-runner-scale-set-controller" }}
|
||||||
|
{{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }}
|
||||||
|
{{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }}
|
||||||
|
{{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}}
|
||||||
|
{{- else }}
|
||||||
|
{{- $multiNamespacesCounter = add $multiNamespacesCounter 1 }}
|
||||||
|
{{- $controllerDeployment = $deployment }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
|
||||||
|
{{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
|
||||||
|
{{- fail "Found both gha-runner-scale-set-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if gt $multiNamespacesCounter 1 }}
|
||||||
|
{{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if eq $multiNamespacesCounter 1 }}
|
||||||
|
{{- with $controllerDeployment.metadata }}
|
||||||
|
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
|
||||||
|
{{- end }}
|
||||||
|
{{- else if gt $singleNamespaceCounter 0 }}
|
||||||
|
{{- if hasKey $singleNamespaceControllerDeployments .Release.Namespace }}
|
||||||
|
{{- $controllerDeployment = get $singleNamespaceControllerDeployments .Release.Namespace }}
|
||||||
|
{{- with $controllerDeployment.metadata }}
|
||||||
|
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
|
||||||
|
{{- end }}
|
||||||
|
{{- else }}
|
||||||
|
{{- fail "No gha-runner-scale-set-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if eq $managerServiceAccountNamespace "" }}
|
||||||
|
{{- fail "No service account namespace found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
|
{{- end }}
|
||||||
|
{{- $managerServiceAccountNamespace }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -10,7 +10,23 @@ metadata:
|
|||||||
name: {{ .Release.Name }}
|
name: {{ .Release.Name }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
labels:
|
labels:
|
||||||
|
app.kubernetes.io/component: "autoscaling-runner-set"
|
||||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||||
|
annotations:
|
||||||
|
{{- $containerMode := .Values.containerMode }}
|
||||||
|
{{- if not (kindIs "string" .Values.githubConfigSecret) }}
|
||||||
|
actions.github.com/cleanup-github-secret-name: {{ include "gha-runner-scale-set.githubsecret" . }}
|
||||||
|
{{- end }}
|
||||||
|
actions.github.com/cleanup-manager-role-binding: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
|
||||||
|
actions.github.com/cleanup-manager-role-name: {{ include "gha-runner-scale-set.managerRoleName" . }}
|
||||||
|
{{- if and $containerMode (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||||
|
actions.github.com/cleanup-kubernetes-mode-role-binding-name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
|
||||||
|
actions.github.com/cleanup-kubernetes-mode-role-name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
||||||
|
actions.github.com/cleanup-kubernetes-mode-service-account-name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||||
|
actions.github.com/cleanup-no-permission-service-account-name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }}
|
||||||
|
{{- end }}
|
||||||
spec:
|
spec:
|
||||||
githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }}
|
githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }}
|
||||||
githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }}
|
githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }}
|
||||||
@@ -36,17 +52,21 @@ spec:
|
|||||||
{{- if .Values.proxy.http }}
|
{{- if .Values.proxy.http }}
|
||||||
http:
|
http:
|
||||||
url: {{ .Values.proxy.http.url }}
|
url: {{ .Values.proxy.http.url }}
|
||||||
|
{{- if .Values.proxy.http.credentialSecretRef }}
|
||||||
credentialSecretRef: {{ .Values.proxy.http.credentialSecretRef }}
|
credentialSecretRef: {{ .Values.proxy.http.credentialSecretRef }}
|
||||||
{{ end }}
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
{{- if .Values.proxy.https }}
|
{{- if .Values.proxy.https }}
|
||||||
https:
|
https:
|
||||||
url: {{ .Values.proxy.https.url }}
|
url: {{ .Values.proxy.https.url }}
|
||||||
|
{{- if .Values.proxy.https.credentialSecretRef }}
|
||||||
credentialSecretRef: {{ .Values.proxy.https.credentialSecretRef }}
|
credentialSecretRef: {{ .Values.proxy.https.credentialSecretRef }}
|
||||||
{{ end }}
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
{{- if and .Values.proxy.noProxy (kindIs "slice" .Values.proxy.noProxy) }}
|
{{- if and .Values.proxy.noProxy (kindIs "slice" .Values.proxy.noProxy) }}
|
||||||
noProxy: {{ .Values.proxy.noProxy | toYaml | nindent 6}}
|
noProxy: {{ .Values.proxy.noProxy | toYaml | nindent 6}}
|
||||||
{{ end }}
|
{{- end }}
|
||||||
{{ end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- if and (or (kindIs "int64" .Values.minRunners) (kindIs "float64" .Values.minRunners)) (or (kindIs "int64" .Values.maxRunners) (kindIs "float64" .Values.maxRunners)) }}
|
{{- if and (or (kindIs "int64" .Values.minRunners) (kindIs "float64" .Values.minRunners)) (or (kindIs "int64" .Values.maxRunners) (kindIs "float64" .Values.maxRunners)) }}
|
||||||
{{- if gt .Values.minRunners .Values.maxRunners }}
|
{{- if gt .Values.minRunners .Values.maxRunners }}
|
||||||
@@ -86,14 +106,15 @@ spec:
|
|||||||
{{ $key }}: {{ $val | toYaml | nindent 8 }}
|
{{ $key }}: {{ $val | toYaml | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq .Values.containerMode.type "kubernetes" }}
|
{{- $containerMode := .Values.containerMode }}
|
||||||
|
{{- if eq $containerMode.type "kubernetes" }}
|
||||||
serviceAccountName: {{ default (include "gha-runner-scale-set.kubeModeServiceAccountName" .) .Values.template.spec.serviceAccountName }}
|
serviceAccountName: {{ default (include "gha-runner-scale-set.kubeModeServiceAccountName" .) .Values.template.spec.serviceAccountName }}
|
||||||
{{- else }}
|
{{- else }}
|
||||||
serviceAccountName: {{ default (include "gha-runner-scale-set.noPermissionServiceAccountName" .) .Values.template.spec.serviceAccountName }}
|
serviceAccountName: {{ default (include "gha-runner-scale-set.noPermissionServiceAccountName" .) .Values.template.spec.serviceAccountName }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if or .Values.template.spec.initContainers (eq .Values.containerMode.type "dind") }}
|
{{- if or .Values.template.spec.initContainers (eq $containerMode.type "dind") }}
|
||||||
initContainers:
|
initContainers:
|
||||||
{{- if eq .Values.containerMode.type "dind" }}
|
{{- if eq $containerMode.type "dind" }}
|
||||||
- name: init-dind-externals
|
- name: init-dind-externals
|
||||||
{{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }}
|
{{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -102,13 +123,13 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
containers:
|
containers:
|
||||||
{{- if eq .Values.containerMode.type "dind" }}
|
{{- if eq $containerMode.type "dind" }}
|
||||||
- name: runner
|
- name: runner
|
||||||
{{- include "gha-runner-scale-set.dind-runner-container" . | nindent 8 }}
|
{{- include "gha-runner-scale-set.dind-runner-container" . | nindent 8 }}
|
||||||
- name: dind
|
- name: dind
|
||||||
{{- include "gha-runner-scale-set.dind-container" . | nindent 8 }}
|
{{- include "gha-runner-scale-set.dind-container" . | nindent 8 }}
|
||||||
{{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }}
|
{{- include "gha-runner-scale-set.non-runner-non-dind-containers" . | nindent 6 }}
|
||||||
{{- else if eq .Values.containerMode.type "kubernetes" }}
|
{{- else if eq $containerMode.type "kubernetes" }}
|
||||||
- name: runner
|
- name: runner
|
||||||
{{- include "gha-runner-scale-set.kubernetes-mode-runner-container" . | nindent 8 }}
|
{{- include "gha-runner-scale-set.kubernetes-mode-runner-container" . | nindent 8 }}
|
||||||
{{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }}
|
{{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }}
|
||||||
@@ -116,16 +137,16 @@ spec:
|
|||||||
{{- include "gha-runner-scale-set.default-mode-runner-containers" . | nindent 6 }}
|
{{- include "gha-runner-scale-set.default-mode-runner-containers" . | nindent 6 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
||||||
{{- if or .Values.template.spec.volumes (eq .Values.containerMode.type "dind") (eq .Values.containerMode.type "kubernetes") $tlsConfig.runnerMountPath }}
|
{{- if or .Values.template.spec.volumes (eq $containerMode.type "dind") (eq $containerMode.type "kubernetes") $tlsConfig.runnerMountPath }}
|
||||||
volumes:
|
volumes:
|
||||||
{{- if $tlsConfig.runnerMountPath }}
|
{{- if $tlsConfig.runnerMountPath }}
|
||||||
{{- include "gha-runner-scale-set.tls-volume" $tlsConfig | nindent 6 }}
|
{{- include "gha-runner-scale-set.tls-volume" $tlsConfig | nindent 6 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq .Values.containerMode.type "dind" }}
|
{{- if eq $containerMode.type "dind" }}
|
||||||
{{- include "gha-runner-scale-set.dind-volume" . | nindent 6 }}
|
{{- include "gha-runner-scale-set.dind-volume" . | nindent 6 }}
|
||||||
{{- include "gha-runner-scale-set.dind-work-volume" . | nindent 6 }}
|
{{- include "gha-runner-scale-set.dind-work-volume" . | nindent 6 }}
|
||||||
{{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }}
|
{{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }}
|
||||||
{{- else if eq .Values.containerMode.type "kubernetes" }}
|
{{- else if eq $containerMode.type "kubernetes" }}
|
||||||
{{- include "gha-runner-scale-set.kubernetes-mode-work-volume" . | nindent 6 }}
|
{{- include "gha-runner-scale-set.kubernetes-mode-work-volume" . | nindent 6 }}
|
||||||
{{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }}
|
{{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }}
|
||||||
{{- else }}
|
{{- else }}
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ metadata:
|
|||||||
labels:
|
labels:
|
||||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||||
finalizers:
|
finalizers:
|
||||||
- actions.github.com/secret-protection
|
- actions.github.com/cleanup-protection
|
||||||
data:
|
data:
|
||||||
{{- $hasToken := false }}
|
{{- $hasToken := false }}
|
||||||
{{- $hasAppId := false }}
|
{{- $hasAppId := false }}
|
||||||
|
|||||||
@@ -1,10 +1,13 @@
|
|||||||
{{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
{{- $containerMode := .Values.containerMode }}
|
||||||
|
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||||
# default permission for runner pod service account in kubernetes mode (container hook)
|
# default permission for runner pod service account in kubernetes mode (container hook)
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: Role
|
kind: Role
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
|
finalizers:
|
||||||
|
- actions.github.com/cleanup-protection
|
||||||
rules:
|
rules:
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["pods"]
|
resources: ["pods"]
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
{{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
{{- $containerMode := .Values.containerMode }}
|
||||||
|
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: RoleBinding
|
kind: RoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
|
finalizers:
|
||||||
|
- actions.github.com/cleanup-protection
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
kind: Role
|
kind: Role
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
{{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
{{- $containerMode := .Values.containerMode }}
|
||||||
|
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
|
name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
|
finalizers:
|
||||||
|
- actions.github.com/cleanup-protection
|
||||||
labels:
|
labels:
|
||||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
75
charts/gha-runner-scale-set/templates/manager_role.yaml
Normal file
75
charts/gha-runner-scale-set/templates/manager_role.yaml
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
name: {{ include "gha-runner-scale-set.managerRoleName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||||
|
app.kubernetes.io/component: manager-role
|
||||||
|
finalizers:
|
||||||
|
- actions.github.com/cleanup-protection
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- serviceaccounts
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- rolebindings
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- roles
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
{{- if .Values.githubServerTLS }}
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- configmaps
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,18 @@
|
|||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
name: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||||
|
app.kubernetes.io/component: manager-role-binding
|
||||||
|
finalizers:
|
||||||
|
- actions.github.com/cleanup-protection
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: {{ include "gha-runner-scale-set.managerRoleName" . }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ include "gha-runner-scale-set.managerServiceAccountName" . | nindent 4 }}
|
||||||
|
namespace: {{ include "gha-runner-scale-set.managerServiceAccountNamespace" . | nindent 4 }}
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
{{- if and (ne .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
{{- $containerMode := .Values.containerMode }}
|
||||||
|
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
@@ -6,4 +7,6 @@ metadata:
|
|||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||||
|
finalizers:
|
||||||
|
- actions.github.com/cleanup-protection
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -1,11 +1,13 @@
|
|||||||
package tests
|
package tests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||||
|
actionsgithubcom "github.com/actions/actions-runner-controller/controllers/actions.github.com"
|
||||||
"github.com/gruntwork-io/terratest/modules/helm"
|
"github.com/gruntwork-io/terratest/modules/helm"
|
||||||
"github.com/gruntwork-io/terratest/modules/k8s"
|
"github.com/gruntwork-io/terratest/modules/k8s"
|
||||||
"github.com/gruntwork-io/terratest/modules/random"
|
"github.com/gruntwork-io/terratest/modules/random"
|
||||||
@@ -29,6 +31,8 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) {
|
|||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -41,7 +45,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) {
|
|||||||
assert.Equal(t, namespaceName, githubSecret.Namespace)
|
assert.Equal(t, namespaceName, githubSecret.Namespace)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", githubSecret.Name)
|
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", githubSecret.Name)
|
||||||
assert.Equal(t, "gh_token12345", string(githubSecret.Data["github_token"]))
|
assert.Equal(t, "gh_token12345", string(githubSecret.Data["github_token"]))
|
||||||
assert.Equal(t, "actions.github.com/secret-protection", githubSecret.Finalizers[0])
|
assert.Equal(t, "actions.github.com/cleanup-protection", githubSecret.Finalizers[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) {
|
func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) {
|
||||||
@@ -60,6 +64,8 @@ func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) {
|
|||||||
"githubConfigSecret.github_app_id": "10",
|
"githubConfigSecret.github_app_id": "10",
|
||||||
"githubConfigSecret.github_app_installation_id": "100",
|
"githubConfigSecret.github_app_installation_id": "100",
|
||||||
"githubConfigSecret.github_app_private_key": "private_key",
|
"githubConfigSecret.github_app_private_key": "private_key",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -90,6 +96,8 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAuthInput(t *testing.T) {
|
|||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_app_id": "",
|
"githubConfigSecret.github_app_id": "",
|
||||||
"githubConfigSecret.github_token": "",
|
"githubConfigSecret.github_token": "",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -114,6 +122,8 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAppInput(t *testing.T) {
|
|||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_app_id": "10",
|
"githubConfigSecret.github_app_id": "10",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -138,6 +148,8 @@ func TestTemplateNotRenderedGitHubSecretWithPredefinedSecret(t *testing.T) {
|
|||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "pre-defined-secret",
|
"githubConfigSecret": "pre-defined-secret",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -160,6 +172,8 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
|
|||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -176,6 +190,7 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||||
|
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", ars.Spec.Template.Spec.ServiceAccountName)
|
assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", ars.Spec.Template.Spec.ServiceAccountName)
|
||||||
|
assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) // no finalizer protections in place
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||||
@@ -193,6 +208,8 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
|||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
"containerMode.type": "kubernetes",
|
"containerMode.type": "kubernetes",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -203,6 +220,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", serviceAccount.Name)
|
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", serviceAccount.Name)
|
||||||
|
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
||||||
|
|
||||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
|
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
|
||||||
var role rbacv1.Role
|
var role rbacv1.Role
|
||||||
@@ -210,6 +228,9 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(t, namespaceName, role.Namespace)
|
assert.Equal(t, namespaceName, role.Namespace)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", role.Name)
|
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", role.Name)
|
||||||
|
|
||||||
|
assert.Equal(t, "actions.github.com/cleanup-protection", role.Finalizers[0])
|
||||||
|
|
||||||
assert.Len(t, role.Rules, 5, "kube mode role should have 5 rules")
|
assert.Len(t, role.Rules, 5, "kube mode role should have 5 rules")
|
||||||
assert.Equal(t, "pods", role.Rules[0].Resources[0])
|
assert.Equal(t, "pods", role.Rules[0].Resources[0])
|
||||||
assert.Equal(t, "pods/exec", role.Rules[1].Resources[0])
|
assert.Equal(t, "pods/exec", role.Rules[1].Resources[0])
|
||||||
@@ -222,18 +243,21 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &roleBinding)
|
helm.UnmarshalK8SYaml(t, output, &roleBinding)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, roleBinding.Namespace)
|
assert.Equal(t, namespaceName, roleBinding.Namespace)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.Name)
|
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role-binding", roleBinding.Name)
|
||||||
assert.Len(t, roleBinding.Subjects, 1)
|
assert.Len(t, roleBinding.Subjects, 1)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", roleBinding.Subjects[0].Name)
|
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", roleBinding.Subjects[0].Name)
|
||||||
assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace)
|
assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.RoleRef.Name)
|
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.RoleRef.Name)
|
||||||
assert.Equal(t, "Role", roleBinding.RoleRef.Kind)
|
assert.Equal(t, "Role", roleBinding.RoleRef.Kind)
|
||||||
|
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
||||||
|
|
||||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||||
var ars v1alpha1.AutoscalingRunnerSet
|
var ars v1alpha1.AutoscalingRunnerSet
|
||||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||||
|
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", ars.Spec.Template.Spec.ServiceAccountName)
|
expectedServiceAccountName := "test-runners-gha-runner-scale-set-kube-mode-service-account"
|
||||||
|
assert.Equal(t, expectedServiceAccountName, ars.Spec.Template.Spec.ServiceAccountName)
|
||||||
|
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
|
func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
|
||||||
@@ -251,6 +275,8 @@ func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
|
|||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
"template.spec.serviceAccountName": "test-service-account",
|
"template.spec.serviceAccountName": "test-service-account",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -263,6 +289,7 @@ func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||||
|
|
||||||
assert.Equal(t, "test-service-account", ars.Spec.Template.Spec.ServiceAccountName)
|
assert.Equal(t, "test-service-account", ars.Spec.Template.Spec.ServiceAccountName)
|
||||||
|
assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
|
func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
|
||||||
@@ -279,6 +306,8 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
|
|||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -293,6 +322,10 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||||
|
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/part-of"])
|
||||||
|
assert.Equal(t, "autoscaling-runner-set", ars.Labels["app.kubernetes.io/component"])
|
||||||
|
assert.NotEmpty(t, ars.Labels["app.kubernetes.io/version"])
|
||||||
|
|
||||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret)
|
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret)
|
||||||
|
|
||||||
@@ -325,6 +358,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) {
|
|||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
"runnerScaleSetName": "test-runner-scale-set-name",
|
"runnerScaleSetName": "test-runner-scale-set-name",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -375,6 +410,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_ProvideMetadata(t *testing.T) {
|
|||||||
"template.metadata.labels.test2": "test2",
|
"template.metadata.labels.test2": "test2",
|
||||||
"template.metadata.annotations.test3": "test3",
|
"template.metadata.annotations.test3": "test3",
|
||||||
"template.metadata.annotations.test4": "test4",
|
"template.metadata.annotations.test4": "test4",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -417,6 +454,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_MaxRunnersValidationError(t *testi
|
|||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
"maxRunners": "-1",
|
"maxRunners": "-1",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -443,6 +482,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinRunnersValidationError(t *testi
|
|||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
"maxRunners": "1",
|
"maxRunners": "1",
|
||||||
"minRunners": "-1",
|
"minRunners": "-1",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -469,6 +510,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationError(t *te
|
|||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
"maxRunners": "0",
|
"maxRunners": "0",
|
||||||
"minRunners": "1",
|
"minRunners": "1",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -495,6 +538,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationSameValue(t
|
|||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
"maxRunners": "0",
|
"maxRunners": "0",
|
||||||
"minRunners": "0",
|
"minRunners": "0",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -523,6 +568,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMin(t
|
|||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
"minRunners": "5",
|
"minRunners": "5",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -551,6 +598,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMax(t
|
|||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
"maxRunners": "5",
|
"maxRunners": "5",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -605,6 +654,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraVolumes(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
ValuesFiles: []string{testValuesPath},
|
ValuesFiles: []string{testValuesPath},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -635,6 +688,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
ValuesFiles: []string{testValuesPath},
|
ValuesFiles: []string{testValuesPath},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -667,6 +724,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_K8S_ExtraVolumes(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
ValuesFiles: []string{testValuesPath},
|
ValuesFiles: []string{testValuesPath},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -698,6 +759,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
|
|||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
"containerMode.type": "dind",
|
"containerMode.type": "dind",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -787,6 +850,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T)
|
|||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
"containerMode.type": "kubernetes",
|
"containerMode.type": "kubernetes",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -841,6 +906,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_UsePredefinedSecret(t *testing.T)
|
|||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "pre-defined-secrets",
|
"githubConfigSecret": "pre-defined-secrets",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -873,6 +940,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_ErrorOnEmptyPredefinedSecret(t *te
|
|||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "",
|
"githubConfigSecret": "",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -897,6 +966,8 @@ func TestTemplateRenderedWithProxy(t *testing.T) {
|
|||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "pre-defined-secrets",
|
"githubConfigSecret": "pre-defined-secrets",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
"proxy.http.url": "http://proxy.example.com",
|
"proxy.http.url": "http://proxy.example.com",
|
||||||
"proxy.http.credentialSecretRef": "http-secret",
|
"proxy.http.credentialSecretRef": "http-secret",
|
||||||
"proxy.https.url": "https://proxy.example.com",
|
"proxy.https.url": "https://proxy.example.com",
|
||||||
@@ -961,6 +1032,8 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
|||||||
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
|
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
|
||||||
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
||||||
"githubServerTLS.runnerMountPath": "/runner/mount/path",
|
"githubServerTLS.runnerMountPath": "/runner/mount/path",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -1018,6 +1091,8 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
|||||||
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
||||||
"githubServerTLS.runnerMountPath": "/runner/mount/path/",
|
"githubServerTLS.runnerMountPath": "/runner/mount/path/",
|
||||||
"containerMode.type": "dind",
|
"containerMode.type": "dind",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -1075,6 +1150,8 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
|||||||
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
||||||
"githubServerTLS.runnerMountPath": "/runner/mount/path",
|
"githubServerTLS.runnerMountPath": "/runner/mount/path",
|
||||||
"containerMode.type": "kubernetes",
|
"containerMode.type": "kubernetes",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -1132,6 +1209,8 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
|||||||
"githubConfigSecret": "pre-defined-secrets",
|
"githubConfigSecret": "pre-defined-secrets",
|
||||||
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
|
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
|
||||||
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -1185,6 +1264,8 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
|||||||
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
|
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
|
||||||
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
||||||
"containerMode.type": "dind",
|
"containerMode.type": "dind",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -1238,6 +1319,8 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
|||||||
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
|
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
|
||||||
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
||||||
"containerMode.type": "kubernetes",
|
"containerMode.type": "kubernetes",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -1295,6 +1378,8 @@ func TestTemplateNamingConstraints(t *testing.T) {
|
|||||||
setValues := map[string]string{
|
setValues := map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "",
|
"githubConfigSecret": "",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
}
|
}
|
||||||
|
|
||||||
tt := map[string]struct {
|
tt := map[string]struct {
|
||||||
@@ -1341,6 +1426,8 @@ func TestTemplateRenderedGitHubConfigUrlEndsWIthSlash(t *testing.T) {
|
|||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions/",
|
"githubConfigUrl": "https://github.com/actions/",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -1354,3 +1441,371 @@ func TestTemplateRenderedGitHubConfigUrlEndsWIthSlash(t *testing.T) {
|
|||||||
assert.Equal(t, "test-runners", ars.Name)
|
assert.Equal(t, "test-runners", ars.Name)
|
||||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTemplate_CreateManagerRole(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-runners"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"})
|
||||||
|
|
||||||
|
var managerRole rbacv1.Role
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &managerRole)
|
||||||
|
|
||||||
|
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
|
||||||
|
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
|
||||||
|
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
|
||||||
|
assert.Equal(t, 6, len(managerRole.Rules))
|
||||||
|
|
||||||
|
var ars v1alpha1.AutoscalingRunnerSet
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-runners"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
"githubServerTLS.certificateFrom.configMapKeyRef.name": "test",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"})
|
||||||
|
|
||||||
|
var managerRole rbacv1.Role
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &managerRole)
|
||||||
|
|
||||||
|
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
|
||||||
|
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
|
||||||
|
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
|
||||||
|
assert.Equal(t, 7, len(managerRole.Rules))
|
||||||
|
assert.Equal(t, "configmaps", managerRole.Rules[6].Resources[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTemplate_CreateManagerRoleBinding(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-runners"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role_binding.yaml"})
|
||||||
|
|
||||||
|
var managerRoleBinding rbacv1.RoleBinding
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &managerRoleBinding)
|
||||||
|
|
||||||
|
assert.Equal(t, namespaceName, managerRoleBinding.Namespace, "namespace should match the namespace of the Helm release")
|
||||||
|
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role-binding", managerRoleBinding.Name)
|
||||||
|
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRoleBinding.RoleRef.Name)
|
||||||
|
assert.Equal(t, "actions.github.com/cleanup-protection", managerRoleBinding.Finalizers[0])
|
||||||
|
assert.Equal(t, "arc", managerRoleBinding.Subjects[0].Name)
|
||||||
|
assert.Equal(t, "arc-system", managerRoleBinding.Subjects[0].Namespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTemplateRenderedAutoScalingRunnerSet_ExtraContainers(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
testValuesPath, err := filepath.Abs("../tests/values_extra_containers.yaml")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-runners"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
|
ValuesFiles: []string{testValuesPath},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}, "--debug")
|
||||||
|
|
||||||
|
var ars v1alpha1.AutoscalingRunnerSet
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||||
|
|
||||||
|
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "There should be 2 containers")
|
||||||
|
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
|
||||||
|
assert.Equal(t, "other", ars.Spec.Template.Spec.Containers[1].Name, "Container name should be other")
|
||||||
|
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set")
|
||||||
|
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set")
|
||||||
|
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[1].Resources.Limits.Cpu().String(), "CPU Limit should be set")
|
||||||
|
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[1].Resources.Limits.Memory().String(), "Memory Limit should be set")
|
||||||
|
assert.Equal(t, "SOME_ENV", ars.Spec.Template.Spec.Containers[0].Env[0].Name, "SOME_ENV should be set")
|
||||||
|
assert.Equal(t, "SOME_VALUE", ars.Spec.Template.Spec.Containers[0].Env[0].Value, "SOME_ENV should be set to `SOME_VALUE`")
|
||||||
|
assert.Equal(t, "MY_NODE_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name, "MY_NODE_NAME should be set")
|
||||||
|
assert.Equal(t, "spec.nodeName", ars.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath, "MY_NODE_NAME should be set to `spec.nodeName`")
|
||||||
|
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name, "VolumeMount name should be work")
|
||||||
|
assert.Equal(t, "/work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath, "VolumeMount mountPath should be /work")
|
||||||
|
assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others")
|
||||||
|
assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others")
|
||||||
|
assert.Equal(t, "work", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be work")
|
||||||
|
assert.Equal(t, corev1.DNSNone, ars.Spec.Template.Spec.DNSPolicy, "DNS Policy should be None")
|
||||||
|
assert.Equal(t, "192.0.2.1", ars.Spec.Template.Spec.DNSConfig.Nameservers[0], "DNS Nameserver should be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTemplateRenderedAutoScalingRunnerSet_ExtraPodSpec(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
testValuesPath, err := filepath.Abs("../tests/values_extra_pod_spec.yaml")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-runners"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
|
ValuesFiles: []string{testValuesPath},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||||
|
|
||||||
|
var ars v1alpha1.AutoscalingRunnerSet
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||||
|
|
||||||
|
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "There should be 1 containers")
|
||||||
|
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
|
||||||
|
assert.Equal(t, corev1.DNSNone, ars.Spec.Template.Spec.DNSPolicy, "DNS Policy should be None")
|
||||||
|
assert.Equal(t, "192.0.2.1", ars.Spec.Template.Spec.DNSConfig.Nameservers[0], "DNS Nameserver should be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTemplateRenderedAutoScalingRunnerSet_DinDMergePodSpec(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
testValuesPath, err := filepath.Abs("../tests/values_dind_merge_spec.yaml")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-runners"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
|
ValuesFiles: []string{testValuesPath},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}, "--debug")
|
||||||
|
|
||||||
|
var ars v1alpha1.AutoscalingRunnerSet
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||||
|
|
||||||
|
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "There should be 2 containers")
|
||||||
|
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
|
||||||
|
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set")
|
||||||
|
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set")
|
||||||
|
assert.Equal(t, "DOCKER_HOST", ars.Spec.Template.Spec.Containers[0].Env[0].Name, "DOCKER_HOST should be set")
|
||||||
|
assert.Equal(t, "tcp://localhost:9999", ars.Spec.Template.Spec.Containers[0].Env[0].Value, "DOCKER_HOST should be set to `tcp://localhost:9999`")
|
||||||
|
assert.Equal(t, "MY_NODE_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name, "MY_NODE_NAME should be set")
|
||||||
|
assert.Equal(t, "spec.nodeName", ars.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath, "MY_NODE_NAME should be set to `spec.nodeName`")
|
||||||
|
assert.Equal(t, "DOCKER_TLS_VERIFY", ars.Spec.Template.Spec.Containers[0].Env[2].Name, "DOCKER_TLS_VERIFY should be set")
|
||||||
|
assert.Equal(t, "1", ars.Spec.Template.Spec.Containers[0].Env[2].Value, "DOCKER_TLS_VERIFY should be set to `1`")
|
||||||
|
assert.Equal(t, "DOCKER_CERT_PATH", ars.Spec.Template.Spec.Containers[0].Env[3].Name, "DOCKER_CERT_PATH should be set")
|
||||||
|
assert.Equal(t, "/certs/client", ars.Spec.Template.Spec.Containers[0].Env[3].Value, "DOCKER_CERT_PATH should be set to `/certs/client`")
|
||||||
|
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name, "VolumeMount name should be work")
|
||||||
|
assert.Equal(t, "/work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath, "VolumeMount mountPath should be /work")
|
||||||
|
assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others")
|
||||||
|
assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTemplateRenderedAutoScalingRunnerSet_KubeModeMergePodSpec(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
testValuesPath, err := filepath.Abs("../tests/values_k8s_merge_spec.yaml")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-runners"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
|
ValuesFiles: []string{testValuesPath},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}, "--debug")
|
||||||
|
|
||||||
|
var ars v1alpha1.AutoscalingRunnerSet
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||||
|
|
||||||
|
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "There should be 1 containers")
|
||||||
|
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
|
||||||
|
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set")
|
||||||
|
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set")
|
||||||
|
assert.Equal(t, "ACTIONS_RUNNER_CONTAINER_HOOKS", ars.Spec.Template.Spec.Containers[0].Env[0].Name, "ACTIONS_RUNNER_CONTAINER_HOOKS should be set")
|
||||||
|
assert.Equal(t, "/k8s/index.js", ars.Spec.Template.Spec.Containers[0].Env[0].Value, "ACTIONS_RUNNER_CONTAINER_HOOKS should be set to `/k8s/index.js`")
|
||||||
|
assert.Equal(t, "MY_NODE_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name, "MY_NODE_NAME should be set")
|
||||||
|
assert.Equal(t, "spec.nodeName", ars.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath, "MY_NODE_NAME should be set to `spec.nodeName`")
|
||||||
|
assert.Equal(t, "ACTIONS_RUNNER_POD_NAME", ars.Spec.Template.Spec.Containers[0].Env[2].Name, "ACTIONS_RUNNER_POD_NAME should be set")
|
||||||
|
assert.Equal(t, "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER", ars.Spec.Template.Spec.Containers[0].Env[3].Name, "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER should be set")
|
||||||
|
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name, "VolumeMount name should be work")
|
||||||
|
assert.Equal(t, "/work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath, "VolumeMount mountPath should be /work")
|
||||||
|
assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others")
|
||||||
|
assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-runners"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
annotationExpectedTests := map[string]*helm.Options{
|
||||||
|
"GitHub token": {
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
},
|
||||||
|
"GitHub app": {
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
|
"githubConfigSecret.github_app_id": "10",
|
||||||
|
"githubConfigSecret.github_app_installation_id": "100",
|
||||||
|
"githubConfigSecret.github_app_private_key": "private_key",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, options := range annotationExpectedTests {
|
||||||
|
t.Run("Annotation set: "+name, func(t *testing.T) {
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||||
|
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||||
|
|
||||||
|
assert.NotEmpty(t, autoscalingRunnerSet.Annotations[actionsgithubcom.AnnotationKeyGitHubSecretName])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("Annotation should not be set", func(t *testing.T) {
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
|
"githubConfigSecret": "pre-defined-secret",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||||
|
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||||
|
|
||||||
|
assert.Empty(t, autoscalingRunnerSet.Annotations[actionsgithubcom.AnnotationKeyGitHubSecretName])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-runners"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
"containerMode.type": "kubernetes",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||||
|
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||||
|
|
||||||
|
annotationValues := map[string]string{
|
||||||
|
actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-runner-scale-set-github-secret",
|
||||||
|
actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-runner-scale-set-manager-role",
|
||||||
|
actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-runner-scale-set-manager-role-binding",
|
||||||
|
actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-runner-scale-set-kube-mode-service-account",
|
||||||
|
actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-runner-scale-set-kube-mode-role",
|
||||||
|
actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-runner-scale-set-kube-mode-role-binding",
|
||||||
|
}
|
||||||
|
|
||||||
|
for annotation, value := range annotationValues {
|
||||||
|
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,3 +3,6 @@ githubConfigSecret:
|
|||||||
github_token: test
|
github_token: test
|
||||||
maxRunners: 10
|
maxRunners: 10
|
||||||
minRunners: 5
|
minRunners: 5
|
||||||
|
controllerServiceAccount:
|
||||||
|
name: "arc"
|
||||||
|
namespace: "arc-system"
|
||||||
@@ -0,0 +1,31 @@
|
|||||||
|
githubConfigUrl: https://github.com/actions/actions-runner-controller
|
||||||
|
githubConfigSecret:
|
||||||
|
github_token: test
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: runner
|
||||||
|
image: runner-image:latest
|
||||||
|
env:
|
||||||
|
- name: DOCKER_HOST
|
||||||
|
value: tcp://localhost:9999
|
||||||
|
- name: MY_NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
volumeMounts:
|
||||||
|
- name: work
|
||||||
|
mountPath: /work
|
||||||
|
- name: others
|
||||||
|
mountPath: /others
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: "64Mi"
|
||||||
|
cpu: "250m"
|
||||||
|
volumes:
|
||||||
|
- name: work
|
||||||
|
hostPath:
|
||||||
|
path: /data
|
||||||
|
type: Directory
|
||||||
|
containerMode:
|
||||||
|
type: dind
|
||||||
@@ -0,0 +1,46 @@
|
|||||||
|
githubConfigUrl: https://github.com/actions/actions-runner-controller
|
||||||
|
githubConfigSecret:
|
||||||
|
github_token: test
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: runner
|
||||||
|
image: runner-image:latest
|
||||||
|
env:
|
||||||
|
- name: SOME_ENV
|
||||||
|
value: SOME_VALUE
|
||||||
|
- name: MY_NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
volumeMounts:
|
||||||
|
- name: work
|
||||||
|
mountPath: /work
|
||||||
|
- name: others
|
||||||
|
mountPath: /others
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: "64Mi"
|
||||||
|
cpu: "250m"
|
||||||
|
- name: other
|
||||||
|
image: other-image:latest
|
||||||
|
volumeMounts:
|
||||||
|
- name: work
|
||||||
|
mountPath: /work
|
||||||
|
- name: others
|
||||||
|
mountPath: /others
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: "64Mi"
|
||||||
|
cpu: "250m"
|
||||||
|
volumes:
|
||||||
|
- name: work
|
||||||
|
hostPath:
|
||||||
|
path: /data
|
||||||
|
type: Directory
|
||||||
|
dnsPolicy: "None"
|
||||||
|
dnsConfig:
|
||||||
|
nameservers:
|
||||||
|
- 192.0.2.1
|
||||||
|
containerMode:
|
||||||
|
type: none
|
||||||
12
charts/gha-runner-scale-set/tests/values_extra_pod_spec.yaml
Normal file
12
charts/gha-runner-scale-set/tests/values_extra_pod_spec.yaml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
githubConfigUrl: https://github.com/actions/actions-runner-controller
|
||||||
|
githubConfigSecret:
|
||||||
|
github_token: test
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: runner
|
||||||
|
image: runner-image:latest
|
||||||
|
dnsPolicy: "None"
|
||||||
|
dnsConfig:
|
||||||
|
nameservers:
|
||||||
|
- 192.0.2.1
|
||||||
31
charts/gha-runner-scale-set/tests/values_k8s_merge_spec.yaml
Normal file
31
charts/gha-runner-scale-set/tests/values_k8s_merge_spec.yaml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
githubConfigUrl: https://github.com/actions/actions-runner-controller
|
||||||
|
githubConfigSecret:
|
||||||
|
github_token: test
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: runner
|
||||||
|
image: runner-image:latest
|
||||||
|
env:
|
||||||
|
- name: ACTIONS_RUNNER_CONTAINER_HOOKS
|
||||||
|
value: /k8s/index.js
|
||||||
|
- name: MY_NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
volumeMounts:
|
||||||
|
- name: work
|
||||||
|
mountPath: /work
|
||||||
|
- name: others
|
||||||
|
mountPath: /others
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: "64Mi"
|
||||||
|
cpu: "250m"
|
||||||
|
volumes:
|
||||||
|
- name: work
|
||||||
|
hostPath:
|
||||||
|
path: /data
|
||||||
|
type: Directory
|
||||||
|
containerMode:
|
||||||
|
type: kubernetes
|
||||||
@@ -65,28 +65,32 @@ githubConfigSecret:
|
|||||||
# certificateFrom:
|
# certificateFrom:
|
||||||
# configMapKeyRef:
|
# configMapKeyRef:
|
||||||
# name: config-map-name
|
# name: config-map-name
|
||||||
# key: ca.pem
|
# key: ca.crt
|
||||||
# runnerMountPath: /usr/local/share/ca-certificates/
|
# runnerMountPath: /usr/local/share/ca-certificates/
|
||||||
|
|
||||||
|
# containerMode:
|
||||||
|
# type: "dind" ## type can be set to dind or kubernetes
|
||||||
|
# ## the following is required when containerMode.type=kubernetes
|
||||||
|
# kubernetesModeWorkVolumeClaim:
|
||||||
|
# accessModes: ["ReadWriteOnce"]
|
||||||
|
# # For local testing, use https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md to provide dynamic provision volume with storageClassName: openebs-hostpath
|
||||||
|
# storageClassName: "dynamic-blob-storage"
|
||||||
|
# resources:
|
||||||
|
# requests:
|
||||||
|
# storage: 1Gi
|
||||||
|
|
||||||
## template is the PodSpec for each runner Pod
|
## template is the PodSpec for each runner Pod
|
||||||
template:
|
template:
|
||||||
spec:
|
## template.spec will be modified if you change the container mode
|
||||||
containers:
|
|
||||||
- name: runner
|
|
||||||
image: ghcr.io/actions/actions-runner:latest
|
|
||||||
command: ["/home/runner/run.sh"]
|
|
||||||
|
|
||||||
containerMode:
|
|
||||||
type: "" ## type can be set to dind or kubernetes
|
|
||||||
## with containerMode.type=dind, we will populate the template.spec with following pod spec
|
## with containerMode.type=dind, we will populate the template.spec with following pod spec
|
||||||
## template:
|
## template:
|
||||||
## spec:
|
## spec:
|
||||||
## initContainers:
|
## initContainers:
|
||||||
## - name: initExternalsInternalVolume
|
## - name: init-dind-externals
|
||||||
## image: ghcr.io/actions/actions-runner:latest
|
## image: ghcr.io/actions/actions-runner:latest
|
||||||
## command: ["cp", "-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"]
|
## command: ["cp", "-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"]
|
||||||
## volumeMounts:
|
## volumeMounts:
|
||||||
## - name: externalsInternal
|
## - name: dind-externals
|
||||||
## mountPath: /home/runner/tmpDir
|
## mountPath: /home/runner/tmpDir
|
||||||
## containers:
|
## containers:
|
||||||
## - name: runner
|
## - name: runner
|
||||||
@@ -99,9 +103,9 @@ containerMode:
|
|||||||
## - name: DOCKER_CERT_PATH
|
## - name: DOCKER_CERT_PATH
|
||||||
## value: /certs/client
|
## value: /certs/client
|
||||||
## volumeMounts:
|
## volumeMounts:
|
||||||
## - name: workingDirectoryInternal
|
## - name: work
|
||||||
## mountPath: /home/runner/_work
|
## mountPath: /home/runner/_work
|
||||||
## - name: dinDInternal
|
## - name: dind-cert
|
||||||
## mountPath: /certs/client
|
## mountPath: /certs/client
|
||||||
## readOnly: true
|
## readOnly: true
|
||||||
## - name: dind
|
## - name: dind
|
||||||
@@ -109,18 +113,18 @@ containerMode:
|
|||||||
## securityContext:
|
## securityContext:
|
||||||
## privileged: true
|
## privileged: true
|
||||||
## volumeMounts:
|
## volumeMounts:
|
||||||
## - mountPath: /certs/client
|
## - name: work
|
||||||
## name: dinDInternal
|
## mountPath: /home/runner/_work
|
||||||
## - mountPath: /home/runner/_work
|
## - name: dind-cert
|
||||||
## name: workingDirectoryInternal
|
## mountPath: /certs/client
|
||||||
## - mountPath: /home/runner/externals
|
## - name: dind-externals
|
||||||
## name: externalsInternal
|
## mountPath: /home/runner/externals
|
||||||
## volumes:
|
## volumes:
|
||||||
## - name: dinDInternal
|
## - name: work
|
||||||
## emptyDir: {}
|
## emptyDir: {}
|
||||||
## - name: workingDirectoryInternal
|
## - name: dind-cert
|
||||||
## emptyDir: {}
|
## emptyDir: {}
|
||||||
## - name: externalsInternal
|
## - name: dind-externals
|
||||||
## emptyDir: {}
|
## emptyDir: {}
|
||||||
######################################################################################################
|
######################################################################################################
|
||||||
## with containerMode.type=kubernetes, we will populate the template.spec with following pod spec
|
## with containerMode.type=kubernetes, we will populate the template.spec with following pod spec
|
||||||
@@ -151,13 +155,18 @@ containerMode:
|
|||||||
## resources:
|
## resources:
|
||||||
## requests:
|
## requests:
|
||||||
## storage: 1Gi
|
## storage: 1Gi
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: runner
|
||||||
|
image: ghcr.io/actions/actions-runner:latest
|
||||||
|
command: ["/home/runner/run.sh"]
|
||||||
|
|
||||||
## the following is required when containerMode.type=kubernetes
|
## Optional controller service account that needs to have required Role and RoleBinding
|
||||||
kubernetesModeWorkVolumeClaim:
|
## to operate this gha-runner-scale-set installation.
|
||||||
accessModes: ["ReadWriteOnce"]
|
## The helm chart will try to find the controller deployment and its service account at installation time.
|
||||||
# For testing, use https://github.com/rancher/local-path-provisioner to provide dynamic provision volume
|
## In case the helm chart can't find the right service account, you can explicitly pass in the following value
|
||||||
# TODO: remove before release
|
## to help it finish RoleBinding with the right service account.
|
||||||
storageClassName: "dynamic-blob-storage"
|
## Note: if your controller is installed to only watch a single namespace, you have to pass these values explicitly.
|
||||||
resources:
|
# controllerServiceAccount:
|
||||||
requests:
|
# namespace: arc-system
|
||||||
storage: 1Gi
|
# name: test-arc-gha-runner-scale-set-controller
|
||||||
|
|||||||
@@ -144,7 +144,7 @@ func TestCustomerServerRootCA(t *testing.T) {
|
|||||||
|
|
||||||
client, err := newActionsClientFromConfig(config, creds)
|
client, err := newActionsClientFromConfig(config, creds)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = client.GetRunnerScaleSet(ctx, "test")
|
_, err = client.GetRunnerScaleSet(ctx, 1, "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.True(t, serverCalledSuccessfully)
|
assert.True(t, serverCalledSuccessfully)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -80,6 +80,9 @@ spec:
|
|||||||
image:
|
image:
|
||||||
description: Required
|
description: Required
|
||||||
type: string
|
type: string
|
||||||
|
imagePullPolicy:
|
||||||
|
description: Required
|
||||||
|
type: string
|
||||||
imagePullSecrets:
|
imagePullSecrets:
|
||||||
description: Required
|
description: Required
|
||||||
items:
|
items:
|
||||||
|
|||||||
@@ -17,16 +17,28 @@ spec:
|
|||||||
- additionalPrinterColumns:
|
- additionalPrinterColumns:
|
||||||
- jsonPath: .spec.minRunners
|
- jsonPath: .spec.minRunners
|
||||||
name: Minimum Runners
|
name: Minimum Runners
|
||||||
type: number
|
type: integer
|
||||||
- jsonPath: .spec.maxRunners
|
- jsonPath: .spec.maxRunners
|
||||||
name: Maximum Runners
|
name: Maximum Runners
|
||||||
type: number
|
type: integer
|
||||||
- jsonPath: .status.currentRunners
|
- jsonPath: .status.currentRunners
|
||||||
name: Current Runners
|
name: Current Runners
|
||||||
type: number
|
type: integer
|
||||||
- jsonPath: .status.state
|
- jsonPath: .status.state
|
||||||
name: State
|
name: State
|
||||||
type: string
|
type: string
|
||||||
|
- jsonPath: .status.pendingEphemeralRunners
|
||||||
|
name: Pending Runners
|
||||||
|
type: integer
|
||||||
|
- jsonPath: .status.runningEphemeralRunners
|
||||||
|
name: Running Runners
|
||||||
|
type: integer
|
||||||
|
- jsonPath: .status.finishedEphemeralRunners
|
||||||
|
name: Finished Runners
|
||||||
|
type: integer
|
||||||
|
- jsonPath: .status.deletingEphemeralRunners
|
||||||
|
name: Deleting Runners
|
||||||
|
type: integer
|
||||||
name: v1alpha1
|
name: v1alpha1
|
||||||
schema:
|
schema:
|
||||||
openAPIV3Schema:
|
openAPIV3Schema:
|
||||||
@@ -4306,6 +4318,12 @@ spec:
|
|||||||
properties:
|
properties:
|
||||||
currentRunners:
|
currentRunners:
|
||||||
type: integer
|
type: integer
|
||||||
|
failedEphemeralRunners:
|
||||||
|
type: integer
|
||||||
|
pendingEphemeralRunners:
|
||||||
|
type: integer
|
||||||
|
runningEphemeralRunners:
|
||||||
|
type: integer
|
||||||
state:
|
state:
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
|
|||||||
@@ -21,6 +21,18 @@ spec:
|
|||||||
- jsonPath: .status.currentReplicas
|
- jsonPath: .status.currentReplicas
|
||||||
name: CurrentReplicas
|
name: CurrentReplicas
|
||||||
type: integer
|
type: integer
|
||||||
|
- jsonPath: .status.pendingEphemeralRunners
|
||||||
|
name: Pending Runners
|
||||||
|
type: integer
|
||||||
|
- jsonPath: .status.runningEphemeralRunners
|
||||||
|
name: Running Runners
|
||||||
|
type: integer
|
||||||
|
- jsonPath: .status.finishedEphemeralRunners
|
||||||
|
name: Finished Runners
|
||||||
|
type: integer
|
||||||
|
- jsonPath: .status.deletingEphemeralRunners
|
||||||
|
name: Deleting Runners
|
||||||
|
type: integer
|
||||||
name: v1alpha1
|
name: v1alpha1
|
||||||
schema:
|
schema:
|
||||||
openAPIV3Schema:
|
openAPIV3Schema:
|
||||||
@@ -4296,6 +4308,14 @@ spec:
|
|||||||
currentReplicas:
|
currentReplicas:
|
||||||
description: CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet.
|
description: CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet.
|
||||||
type: integer
|
type: integer
|
||||||
|
failedEphemeralRunners:
|
||||||
|
type: integer
|
||||||
|
pendingEphemeralRunners:
|
||||||
|
type: integer
|
||||||
|
runningEphemeralRunners:
|
||||||
|
type: integer
|
||||||
|
required:
|
||||||
|
- currentReplicas
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
served: true
|
served: true
|
||||||
|
|||||||
@@ -17,6 +17,21 @@ spec:
|
|||||||
scope: Namespaced
|
scope: Namespaced
|
||||||
versions:
|
versions:
|
||||||
- additionalPrinterColumns:
|
- additionalPrinterColumns:
|
||||||
|
- jsonPath: .spec.template.spec.enterprise
|
||||||
|
name: Enterprise
|
||||||
|
type: string
|
||||||
|
- jsonPath: .spec.template.spec.organization
|
||||||
|
name: Organization
|
||||||
|
type: string
|
||||||
|
- jsonPath: .spec.template.spec.repository
|
||||||
|
name: Repository
|
||||||
|
type: string
|
||||||
|
- jsonPath: .spec.template.spec.group
|
||||||
|
name: Group
|
||||||
|
type: string
|
||||||
|
- jsonPath: .spec.template.spec.labels
|
||||||
|
name: Labels
|
||||||
|
type: string
|
||||||
- jsonPath: .spec.replicas
|
- jsonPath: .spec.replicas
|
||||||
name: Desired
|
name: Desired
|
||||||
type: number
|
type: number
|
||||||
|
|||||||
10
config/manager/env-replacement.yaml
Normal file
10
config/manager/env-replacement.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
source:
|
||||||
|
kind: Deployment
|
||||||
|
name: controller-manager
|
||||||
|
fieldPath: spec.template.spec.containers.[name=manager].image
|
||||||
|
targets:
|
||||||
|
- select:
|
||||||
|
kind: Deployment
|
||||||
|
name: controller-manager
|
||||||
|
fieldPaths:
|
||||||
|
- spec.template.spec.containers.[name=manager].env.[name=CONTROLLER_MANAGER_CONTAINER_IMAGE].value
|
||||||
@@ -5,4 +5,7 @@ kind: Kustomization
|
|||||||
images:
|
images:
|
||||||
- name: controller
|
- name: controller
|
||||||
newName: summerwind/actions-runner-controller
|
newName: summerwind/actions-runner-controller
|
||||||
newTag: dev
|
newTag: latest
|
||||||
|
|
||||||
|
replacements:
|
||||||
|
- path: env-replacement.yaml
|
||||||
|
|||||||
@@ -50,14 +50,14 @@ spec:
|
|||||||
optional: true
|
optional: true
|
||||||
- name: GITHUB_APP_PRIVATE_KEY
|
- name: GITHUB_APP_PRIVATE_KEY
|
||||||
value: /etc/actions-runner-controller/github_app_private_key
|
value: /etc/actions-runner-controller/github_app_private_key
|
||||||
- name: CONTROLLER_MANAGER_POD_NAME
|
- name: CONTROLLER_MANAGER_CONTAINER_IMAGE
|
||||||
valueFrom:
|
value: CONTROLLER_MANAGER_CONTAINER_IMAGE
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.name
|
|
||||||
- name: CONTROLLER_MANAGER_POD_NAMESPACE
|
- name: CONTROLLER_MANAGER_POD_NAMESPACE
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: metadata.namespace
|
fieldPath: metadata.namespace
|
||||||
|
- name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY
|
||||||
|
value: IfNotPresent
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: controller-manager
|
- name: controller-manager
|
||||||
mountPath: "/etc/actions-runner-controller"
|
mountPath: "/etc/actions-runner-controller"
|
||||||
|
|||||||
@@ -102,6 +102,13 @@ rules:
|
|||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
- watch
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.github.com
|
||||||
|
resources:
|
||||||
|
- ephemeralrunnersets/finalizers
|
||||||
|
verbs:
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- actions.github.com
|
- actions.github.com
|
||||||
resources:
|
resources:
|
||||||
|
|||||||
@@ -31,6 +31,6 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
|||||||
| `autoscaler.enabled` | Enable the HorizontalRunnerAutoscaler, if its enabled then replica count will not be used | true |
|
| `autoscaler.enabled` | Enable the HorizontalRunnerAutoscaler, if its enabled then replica count will not be used | true |
|
||||||
| `autoscaler.minReplicas` | Minimum no of replicas | 1 |
|
| `autoscaler.minReplicas` | Minimum no of replicas | 1 |
|
||||||
| `autoscaler.maxReplicas` | Maximum no of replicas | 5 |
|
| `autoscaler.maxReplicas` | Maximum no of replicas | 5 |
|
||||||
| `autoscaler.scaleDownDelaySecondsAfterScaleOut` | [Anti-Flapping Configuration](https://github.com/actions/actions-runner-controller#anti-flapping-configuration) | 120 |
|
| `autoscaler.scaleDownDelaySecondsAfterScaleOut` | [Anti-Flapping Configuration](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#anti-flapping-configuration) | 120 |
|
||||||
| `autoscaler.metrics` | [Pull driven scaling](https://github.com/actions/actions-runner-controller#pull-driven-scaling) | default |
|
| `autoscaler.metrics` | [Pull driven scaling](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#pull-driven-scaling) | default |
|
||||||
| `autoscaler.scaleUpTriggers` | [Webhook driven scaling](https://github.com/actions/actions-runner-controller#webhook-driven-scaling) | |
|
| `autoscaler.scaleUpTriggers` | [Webhook driven scaling](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#webhook-driven-scaling) | |
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ runnerLabels:
|
|||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
|
|
||||||
# The Runner Group that the runner(s) should be associated with.
|
# The Runner Group that the runner(s) should be associated with.
|
||||||
# See https://docs.github.com/en/github-ae@latest/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups.
|
# See https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/managing-access-to-self-hosted-runners-using-groups.
|
||||||
group: Default
|
group: Default
|
||||||
|
|
||||||
autoscaler:
|
autoscaler:
|
||||||
|
|||||||
@@ -41,7 +41,6 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
autoscalingListenerContainerName = "autoscaler"
|
autoscalingListenerContainerName = "autoscaler"
|
||||||
autoscalingListenerOwnerKey = ".metadata.controller"
|
|
||||||
autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer"
|
autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -86,7 +85,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
|||||||
}
|
}
|
||||||
if !done {
|
if !done {
|
||||||
log.Info("Waiting for resources to be deleted before removing finalizer")
|
log.Info("Waiting for resources to be deleted before removing finalizer")
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{Requeue: true}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Removing finalizer")
|
log.Info("Removing finalizer")
|
||||||
@@ -204,7 +203,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
|||||||
return r.createRoleBindingForListener(ctx, autoscalingListener, listenerRole, serviceAccount, log)
|
return r.createRoleBindingForListener(ctx, autoscalingListener, listenerRole, serviceAccount, log)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a secret containing proxy config if specifiec
|
// Create a secret containing proxy config if specified
|
||||||
if autoscalingListener.Spec.Proxy != nil {
|
if autoscalingListener.Spec.Proxy != nil {
|
||||||
proxySecret := new(corev1.Secret)
|
proxySecret := new(corev1.Secret)
|
||||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: proxyListenerSecretName(autoscalingListener)}, proxySecret); err != nil {
|
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: proxyListenerSecretName(autoscalingListener)}, proxySecret); err != nil {
|
||||||
@@ -246,66 +245,6 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetupWithManager sets up the controller with the Manager.
|
|
||||||
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|
||||||
groupVersionIndexer := func(rawObj client.Object) []string {
|
|
||||||
groupVersion := v1alpha1.GroupVersion.String()
|
|
||||||
owner := metav1.GetControllerOf(rawObj)
|
|
||||||
if owner == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ...make sure it is owned by this controller
|
|
||||||
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ...and if so, return it
|
|
||||||
return []string{owner.Name}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
labelBasedWatchFunc := func(obj client.Object) []reconcile.Request {
|
|
||||||
var requests []reconcile.Request
|
|
||||||
labels := obj.GetLabels()
|
|
||||||
namespace, ok := labels["auto-scaling-listener-namespace"]
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
name, ok := labels["auto-scaling-listener-name"]
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
requests = append(requests,
|
|
||||||
reconcile.Request{
|
|
||||||
NamespacedName: types.NamespacedName{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
return requests
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
|
||||||
For(&v1alpha1.AutoscalingListener{}).
|
|
||||||
Owns(&corev1.Pod{}).
|
|
||||||
Owns(&corev1.ServiceAccount{}).
|
|
||||||
Owns(&corev1.Secret{}).
|
|
||||||
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
|
||||||
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
|
||||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
|
||||||
Complete(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) {
|
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) {
|
||||||
logger.Info("Cleaning up the listener pod")
|
logger.Info("Cleaning up the listener pod")
|
||||||
listenerPod := new(corev1.Pod)
|
listenerPod := new(corev1.Pod)
|
||||||
@@ -503,7 +442,7 @@ func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Con
|
|||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("Created listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
|
logger.Info("Created listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{Requeue: true}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||||
@@ -524,8 +463,8 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a
|
|||||||
Name: proxyListenerSecretName(autoscalingListener),
|
Name: proxyListenerSecretName(autoscalingListener),
|
||||||
Namespace: autoscalingListener.Namespace,
|
Namespace: autoscalingListener.Namespace,
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||||
"auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetName,
|
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Data: data,
|
Data: data,
|
||||||
@@ -542,7 +481,7 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a
|
|||||||
|
|
||||||
logger.Info("Created listener proxy secret", "namespace", newProxySecret.Namespace, "name", newProxySecret.Name)
|
logger.Info("Created listener proxy secret", "namespace", newProxySecret.Namespace, "name", newProxySecret.Name)
|
||||||
|
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{Requeue: true}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||||
@@ -558,7 +497,7 @@ func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Con
|
|||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("Updated listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash)
|
logger.Info("Updated listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash)
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{Requeue: true}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||||
@@ -616,3 +555,62 @@ func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context
|
|||||||
"serviceAccount", serviceAccount.Name)
|
"serviceAccount", serviceAccount.Name)
|
||||||
return ctrl.Result{Requeue: true}, nil
|
return ctrl.Result{Requeue: true}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetupWithManager sets up the controller with the Manager.
|
||||||
|
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
groupVersionIndexer := func(rawObj client.Object) []string {
|
||||||
|
groupVersion := v1alpha1.GroupVersion.String()
|
||||||
|
owner := metav1.GetControllerOf(rawObj)
|
||||||
|
if owner == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ...make sure it is owned by this controller
|
||||||
|
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ...and if so, return it
|
||||||
|
return []string{owner.Name}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, resourceOwnerKey, groupVersionIndexer); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, resourceOwnerKey, groupVersionIndexer); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
labelBasedWatchFunc := func(obj client.Object) []reconcile.Request {
|
||||||
|
var requests []reconcile.Request
|
||||||
|
labels := obj.GetLabels()
|
||||||
|
namespace, ok := labels["auto-scaling-listener-namespace"]
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
name, ok := labels["auto-scaling-listener-name"]
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
requests = append(requests,
|
||||||
|
reconcile.Request{
|
||||||
|
NamespacedName: types.NamespacedName{
|
||||||
|
Name: name,
|
||||||
|
Namespace: namespace,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
return requests
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
|
For(&v1alpha1.AutoscalingListener{}).
|
||||||
|
Owns(&corev1.Pod{}).
|
||||||
|
Owns(&corev1.ServiceAccount{}).
|
||||||
|
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||||
|
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||||
|
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||||
|
Complete(r)
|
||||||
|
}
|
||||||
|
|||||||
@@ -213,7 +213,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
|||||||
Eventually(
|
Eventually(
|
||||||
func() error {
|
func() error {
|
||||||
podList := new(corev1.PodList)
|
podList := new(corev1.PodList)
|
||||||
err := k8sClient.List(ctx, podList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name})
|
err := k8sClient.List(ctx, podList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingListener.Name})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -231,7 +231,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
|||||||
Eventually(
|
Eventually(
|
||||||
func() error {
|
func() error {
|
||||||
serviceAccountList := new(corev1.ServiceAccountList)
|
serviceAccountList := new(corev1.ServiceAccountList)
|
||||||
err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name})
|
err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingListener.Name})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,9 +24,11 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||||
|
"github.com/actions/actions-runner-controller/build"
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@@ -41,13 +43,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// TODO: Replace with shared image.
|
labelKeyRunnerSpecHash = "runner-spec-hash"
|
||||||
autoscalingRunnerSetOwnerKey = ".metadata.controller"
|
|
||||||
LabelKeyRunnerSpecHash = "runner-spec-hash"
|
|
||||||
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
|
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
|
||||||
runnerScaleSetIdKey = "runner-scale-set-id"
|
runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
|
||||||
runnerScaleSetNameKey = "runner-scale-set-name"
|
runnerScaleSetNameAnnotationKey = "runner-scale-set-name"
|
||||||
runnerScaleSetRunnerGroupNameKey = "runner-scale-set-runner-group-name"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object
|
// AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object
|
||||||
@@ -114,6 +113,17 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
requeue, err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err, "Failed to remove finalizers on dependent resources")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if requeue {
|
||||||
|
log.Info("Waiting for dependent resources to be deleted")
|
||||||
|
return ctrl.Result{Requeue: true}, nil
|
||||||
|
}
|
||||||
|
|
||||||
log.Info("Removing finalizer")
|
log.Info("Removing finalizer")
|
||||||
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName)
|
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName)
|
||||||
@@ -127,6 +137,22 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion] != build.Version {
|
||||||
|
if err := r.Delete(ctx, autoscalingRunnerSet); err != nil {
|
||||||
|
log.Error(err, "Failed to delete autoscaling runner set on version mismatch",
|
||||||
|
"targetVersion", build.Version,
|
||||||
|
"actualVersion", autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||||
|
)
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Autoscaling runner set version doesn't match the build version. Deleting the resource.",
|
||||||
|
"targetVersion", build.Version,
|
||||||
|
"actualVersion", autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||||
|
)
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) {
|
if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) {
|
||||||
log.Info("Adding finalizer")
|
log.Info("Adding finalizer")
|
||||||
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||||
@@ -140,7 +166,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
scaleSetIdRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]
|
scaleSetIdRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
|
||||||
if !ok {
|
if !ok {
|
||||||
// Need to create a new runner scale set on Actions service
|
// Need to create a new runner scale set on Actions service
|
||||||
log.Info("Runner scale set id annotation does not exist. Creating a new runner scale set.")
|
log.Info("Runner scale set id annotation does not exist. Creating a new runner scale set.")
|
||||||
@@ -154,14 +180,14 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the runner group of the scale set is up to date
|
// Make sure the runner group of the scale set is up to date
|
||||||
currentRunnerGroupName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetRunnerGroupNameKey]
|
currentRunnerGroupName, ok := autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName]
|
||||||
if !ok || (len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 && !strings.EqualFold(currentRunnerGroupName, autoscalingRunnerSet.Spec.RunnerGroup)) {
|
if !ok || (len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 && !strings.EqualFold(currentRunnerGroupName, autoscalingRunnerSet.Spec.RunnerGroup)) {
|
||||||
log.Info("AutoScalingRunnerSet runner group changed. Updating the runner scale set.")
|
log.Info("AutoScalingRunnerSet runner group changed. Updating the runner scale set.")
|
||||||
return r.updateRunnerScaleSetRunnerGroup(ctx, autoscalingRunnerSet, log)
|
return r.updateRunnerScaleSetRunnerGroup(ctx, autoscalingRunnerSet, log)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the runner scale set name is up to date
|
// Make sure the runner scale set name is up to date
|
||||||
currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetNameKey]
|
currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetNameAnnotationKey]
|
||||||
if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) {
|
if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) {
|
||||||
log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.")
|
log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.")
|
||||||
return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log)
|
return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log)
|
||||||
@@ -189,10 +215,10 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
|
|
||||||
desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||||
for _, runnerSet := range existingRunnerSets.all() {
|
for _, runnerSet := range existingRunnerSets.all() {
|
||||||
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[LabelKeyRunnerSpecHash])
|
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[labelKeyRunnerSpecHash])
|
||||||
}
|
}
|
||||||
|
|
||||||
if desiredSpecHash != latestRunnerSet.Labels[LabelKeyRunnerSpecHash] {
|
if desiredSpecHash != latestRunnerSet.Labels[labelKeyRunnerSpecHash] {
|
||||||
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set")
|
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set")
|
||||||
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
|
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
|
||||||
}
|
}
|
||||||
@@ -220,7 +246,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Our listener pod is out of date, so we need to delete it to get a new recreate.
|
// Our listener pod is out of date, so we need to delete it to get a new recreate.
|
||||||
if listener.Labels[LabelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() {
|
if listener.Labels[labelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() {
|
||||||
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
|
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
|
||||||
if err := r.Delete(ctx, listener); err != nil {
|
if err := r.Delete(ctx, listener); err != nil {
|
||||||
if kerrors.IsNotFound(err) {
|
if kerrors.IsNotFound(err) {
|
||||||
@@ -238,6 +264,9 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
if latestRunnerSet.Status.CurrentReplicas != autoscalingRunnerSet.Status.CurrentRunners {
|
if latestRunnerSet.Status.CurrentReplicas != autoscalingRunnerSet.Status.CurrentRunners {
|
||||||
if err := patchSubResource(ctx, r.Status(), autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
if err := patchSubResource(ctx, r.Status(), autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||||
obj.Status.CurrentRunners = latestRunnerSet.Status.CurrentReplicas
|
obj.Status.CurrentRunners = latestRunnerSet.Status.CurrentReplicas
|
||||||
|
obj.Status.PendingEphemeralRunners = latestRunnerSet.Status.PendingEphemeralRunners
|
||||||
|
obj.Status.RunningEphemeralRunners = latestRunnerSet.Status.RunningEphemeralRunners
|
||||||
|
obj.Status.FailedEphemeralRunners = latestRunnerSet.Status.FailedEphemeralRunners
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(err, "Failed to update autoscaling runner set status with current runner count")
|
log.Error(err, "Failed to update autoscaling runner set status with current runner count")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -303,6 +332,29 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (requeue bool, err error) {
|
||||||
|
c := autoscalingRunnerSetFinalizerDependencyCleaner{
|
||||||
|
client: r.Client,
|
||||||
|
autoscalingRunnerSet: autoscalingRunnerSet,
|
||||||
|
logger: logger,
|
||||||
|
}
|
||||||
|
|
||||||
|
c.removeKubernetesModeRoleBindingFinalizer(ctx)
|
||||||
|
c.removeKubernetesModeRoleFinalizer(ctx)
|
||||||
|
c.removeKubernetesModeServiceAccountFinalizer(ctx)
|
||||||
|
c.removeNoPermissionServiceAccountFinalizer(ctx)
|
||||||
|
c.removeGitHubSecretFinalizer(ctx)
|
||||||
|
c.removeManagerRoleBindingFinalizer(ctx)
|
||||||
|
c.removeManagerRoleFinalizer(ctx)
|
||||||
|
|
||||||
|
requeue, err = c.result()
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "Failed to cleanup finalizer from dependent resource")
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
return requeue, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
||||||
logger.Info("Creating a new runner scale set")
|
logger.Info("Creating a new runner scale set")
|
||||||
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||||
@@ -313,14 +365,8 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
|
|||||||
logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set")
|
logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, autoscalingRunnerSet.Spec.RunnerScaleSetName)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(err, "Failed to get runner scale set from Actions service")
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
runnerGroupId := 1
|
runnerGroupId := 1
|
||||||
if runnerScaleSet == nil {
|
|
||||||
if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 {
|
if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 {
|
||||||
runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup)
|
runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -331,6 +377,17 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
|
|||||||
runnerGroupId = int(runnerGroup.ID)
|
runnerGroupId = int(runnerGroup.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, runnerGroupId, autoscalingRunnerSet.Spec.RunnerScaleSetName)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "Failed to get runner scale set from Actions service",
|
||||||
|
"runnerGroupId",
|
||||||
|
strconv.Itoa(runnerGroupId),
|
||||||
|
"runnerScaleSetName",
|
||||||
|
autoscalingRunnerSet.Spec.RunnerScaleSetName)
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if runnerScaleSet == nil {
|
||||||
runnerScaleSet, err = actionsClient.CreateRunnerScaleSet(
|
runnerScaleSet, err = actionsClient.CreateRunnerScaleSet(
|
||||||
ctx,
|
ctx,
|
||||||
&actions.RunnerScaleSet{
|
&actions.RunnerScaleSet{
|
||||||
@@ -357,12 +414,18 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
|
|||||||
if autoscalingRunnerSet.Annotations == nil {
|
if autoscalingRunnerSet.Annotations == nil {
|
||||||
autoscalingRunnerSet.Annotations = map[string]string{}
|
autoscalingRunnerSet.Annotations = map[string]string{}
|
||||||
}
|
}
|
||||||
|
if autoscalingRunnerSet.Labels == nil {
|
||||||
|
autoscalingRunnerSet.Labels = map[string]string{}
|
||||||
|
}
|
||||||
|
|
||||||
logger.Info("Adding runner scale set ID, name and runner group name as an annotation")
|
logger.Info("Adding runner scale set ID, name and runner group name as an annotation and url labels")
|
||||||
if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||||
obj.Annotations[runnerScaleSetNameKey] = runnerScaleSet.Name
|
obj.Annotations[runnerScaleSetNameAnnotationKey] = runnerScaleSet.Name
|
||||||
obj.Annotations[runnerScaleSetIdKey] = strconv.Itoa(runnerScaleSet.Id)
|
obj.Annotations[runnerScaleSetIdAnnotationKey] = strconv.Itoa(runnerScaleSet.Id)
|
||||||
obj.Annotations[runnerScaleSetRunnerGroupNameKey] = runnerScaleSet.RunnerGroupName
|
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = runnerScaleSet.RunnerGroupName
|
||||||
|
if err := applyGitHubURLLabels(obj.Spec.GitHubConfigUrl, obj.Labels); err != nil { // should never happen
|
||||||
|
logger.Error(err, "Failed to apply GitHub URL labels")
|
||||||
|
}
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
logger.Error(err, "Failed to add runner scale set ID, name and runner group name as an annotation")
|
logger.Error(err, "Failed to add runner scale set ID, name and runner group name as an annotation")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -376,7 +439,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
||||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
|
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(err, "Failed to parse runner scale set ID")
|
logger.Error(err, "Failed to parse runner scale set ID")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -407,7 +470,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
|
|||||||
|
|
||||||
logger.Info("Updating runner scale set runner group name as an annotation")
|
logger.Info("Updating runner scale set runner group name as an annotation")
|
||||||
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||||
obj.Annotations[runnerScaleSetRunnerGroupNameKey] = updatedRunnerScaleSet.RunnerGroupName
|
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = updatedRunnerScaleSet.RunnerGroupName
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
logger.Error(err, "Failed to update runner group name annotation")
|
logger.Error(err, "Failed to update runner group name annotation")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -418,7 +481,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
||||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
|
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(err, "Failed to parse runner scale set ID")
|
logger.Error(err, "Failed to parse runner scale set ID")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -443,7 +506,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
|
|||||||
|
|
||||||
logger.Info("Updating runner scale set name as an annotation")
|
logger.Info("Updating runner scale set name as an annotation")
|
||||||
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||||
obj.Annotations[runnerScaleSetNameKey] = updatedRunnerScaleSet.Name
|
obj.Annotations[runnerScaleSetNameAnnotationKey] = updatedRunnerScaleSet.Name
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
logger.Error(err, "Failed to update runner scale set name annotation")
|
logger.Error(err, "Failed to update runner scale set name annotation")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -454,12 +517,28 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
|
func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
|
||||||
|
scaleSetId, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
|
||||||
|
if !ok {
|
||||||
|
// Annotation not being present can occur in 3 scenarios
|
||||||
|
// 1. Scale set is never created.
|
||||||
|
// In this case, we don't need to fetch the actions client to delete the scale set that does not exist
|
||||||
|
//
|
||||||
|
// 2. The scale set has been deleted by the controller.
|
||||||
|
// In that case, the controller will clean up annotation because the scale set does not exist anymore.
|
||||||
|
// Removal of the scale set id is also useful because permission cleanup will eventually lose permission
|
||||||
|
// assigned to it on a GitHub secret, causing actions client from secret to result in permission denied
|
||||||
|
//
|
||||||
|
// 3. Annotation is removed manually.
|
||||||
|
// In this case, the controller will treat this as if the scale set is being removed from the actions service
|
||||||
|
// Then, manual deletion of the scale set is required.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
logger.Info("Deleting the runner scale set from Actions service")
|
logger.Info("Deleting the runner scale set from Actions service")
|
||||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
|
runnerScaleSetId, err := strconv.Atoi(scaleSetId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the annotation is not set correctly, or if it does not exist, we are going to get stuck in a loop trying to parse the scale set id.
|
// If the annotation is not set correctly, we are going to get stuck in a loop trying to parse the scale set id.
|
||||||
// If the configuration is invalid (secret does not exist for example), we never get to the point to create runner set. But then, manual cleanup
|
// If the configuration is invalid (secret does not exist for example), we never got to the point to create runner set.
|
||||||
// would get stuck finalizing the resource trying to parse annotation indefinitely
|
// But then, manual cleanup would get stuck finalizing the resource trying to parse annotation indefinitely
|
||||||
logger.Info("autoscaling runner set does not have annotation describing scale set id. Skip deletion", "err", err.Error())
|
logger.Info("autoscaling runner set does not have annotation describing scale set id. Skip deletion", "err", err.Error())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -476,6 +555,14 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||||
|
delete(obj.Annotations, runnerScaleSetIdAnnotationKey)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "Failed to patch autoscaling runner set with annotation removed", "annotation", runnerScaleSetIdAnnotationKey)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
logger.Info("Deleted the runner scale set from Actions service")
|
logger.Info("Deleted the runner scale set from Actions service")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -528,7 +615,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
|
|||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) {
|
func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) {
|
||||||
list := new(v1alpha1.EphemeralRunnerSetList)
|
list := new(v1alpha1.EphemeralRunnerSetList)
|
||||||
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingRunnerSet.Name}); err != nil {
|
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingRunnerSet.Name}); err != nil {
|
||||||
return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
|
return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -621,7 +708,7 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro
|
|||||||
return []string{owner.Name}
|
return []string{owner.Name}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, autoscalingRunnerSetOwnerKey, groupVersionIndexer); err != nil {
|
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, resourceOwnerKey, groupVersionIndexer); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -645,6 +732,328 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro
|
|||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type autoscalingRunnerSetFinalizerDependencyCleaner struct {
|
||||||
|
// configuration fields
|
||||||
|
client client.Client
|
||||||
|
autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
|
||||||
|
logger logr.Logger
|
||||||
|
|
||||||
|
// fields to operate on
|
||||||
|
requeue bool
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) result() (requeue bool, err error) {
|
||||||
|
return c.requeue, c.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleBindingFinalizer(ctx context.Context) {
|
||||||
|
if c.requeue || c.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
roleBindingName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName]
|
||||||
|
if !ok {
|
||||||
|
c.logger.Info(
|
||||||
|
"Skipping cleaning up kubernetes mode service account",
|
||||||
|
"reason",
|
||||||
|
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeRoleBindingName),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Info("Removing finalizer from container mode kubernetes role binding", "name", roleBindingName)
|
||||||
|
|
||||||
|
roleBinding := new(rbacv1.RoleBinding)
|
||||||
|
err := c.client.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
if !controllerutil.ContainsFinalizer(roleBinding, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||||
|
c.logger.Info("Kubernetes mode role binding finalizer has already been removed", "name", roleBindingName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
|
||||||
|
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.requeue = true
|
||||||
|
c.logger.Info("Removed finalizer from container mode kubernetes role binding", "name", roleBindingName)
|
||||||
|
return
|
||||||
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
|
c.err = fmt.Errorf("failed to fetch kubernetes mode role binding: %w", err)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
c.logger.Info("Container mode kubernetes role binding has already been deleted", "name", roleBindingName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleFinalizer(ctx context.Context) {
|
||||||
|
if c.requeue || c.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
roleName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName]
|
||||||
|
if !ok {
|
||||||
|
c.logger.Info(
|
||||||
|
"Skipping cleaning up kubernetes mode role",
|
||||||
|
"reason",
|
||||||
|
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeRoleName),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Info("Removing finalizer from container mode kubernetes role", "name", roleName)
|
||||||
|
role := new(rbacv1.Role)
|
||||||
|
err := c.client.Get(ctx, types.NamespacedName{Name: roleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
if !controllerutil.ContainsFinalizer(role, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||||
|
c.logger.Info("Kubernetes mode role finalizer has already been removed", "name", roleName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
|
||||||
|
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.requeue = true
|
||||||
|
c.logger.Info("Removed finalizer from container mode kubernetes role")
|
||||||
|
return
|
||||||
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
|
c.err = fmt.Errorf("failed to fetch kubernetes mode role: %w", err)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
c.logger.Info("Container mode kubernetes role has already been deleted", "name", roleName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeServiceAccountFinalizer(ctx context.Context) {
|
||||||
|
if c.requeue || c.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
serviceAccountName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName]
|
||||||
|
if !ok {
|
||||||
|
c.logger.Info(
|
||||||
|
"Skipping cleaning up kubernetes mode role binding",
|
||||||
|
"reason",
|
||||||
|
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeServiceAccountName),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Info("Removing finalizer from container mode kubernetes service account", "name", serviceAccountName)
|
||||||
|
|
||||||
|
serviceAccount := new(corev1.ServiceAccount)
|
||||||
|
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
if !controllerutil.ContainsFinalizer(serviceAccount, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||||
|
c.logger.Info("Kubernetes mode service account finalizer has already been removed", "name", serviceAccountName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
|
||||||
|
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.requeue = true
|
||||||
|
c.logger.Info("Removed finalizer from container mode kubernetes service account")
|
||||||
|
return
|
||||||
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
|
c.err = fmt.Errorf("failed to fetch kubernetes mode service account: %w", err)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
c.logger.Info("Container mode kubernetes service account has already been deleted", "name", serviceAccountName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServiceAccountFinalizer(ctx context.Context) {
|
||||||
|
if c.requeue || c.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
serviceAccountName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName]
|
||||||
|
if !ok {
|
||||||
|
c.logger.Info(
|
||||||
|
"Skipping cleaning up no permission service account",
|
||||||
|
"reason",
|
||||||
|
fmt.Sprintf("annotation key %q not present", AnnotationKeyNoPermissionServiceAccountName),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Info("Removing finalizer from no permission service account", "name", serviceAccountName)
|
||||||
|
|
||||||
|
serviceAccount := new(corev1.ServiceAccount)
|
||||||
|
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
if !controllerutil.ContainsFinalizer(serviceAccount, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||||
|
c.logger.Info("No permission service account finalizer has already been removed", "name", serviceAccountName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
|
||||||
|
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.requeue = true
|
||||||
|
c.logger.Info("Removed finalizer from no permission service account", "name", serviceAccountName)
|
||||||
|
return
|
||||||
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
|
c.err = fmt.Errorf("failed to fetch service account: %w", err)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
c.logger.Info("No permission service account has already been deleted", "name", serviceAccountName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinalizer(ctx context.Context) {
|
||||||
|
if c.requeue || c.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
githubSecretName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName]
|
||||||
|
if !ok {
|
||||||
|
c.logger.Info(
|
||||||
|
"Skipping cleaning up no permission service account",
|
||||||
|
"reason",
|
||||||
|
fmt.Sprintf("annotation key %q not present", AnnotationKeyGitHubSecretName),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Info("Removing finalizer from GitHub secret", "name", githubSecretName)
|
||||||
|
|
||||||
|
githubSecret := new(corev1.Secret)
|
||||||
|
err := c.client.Get(ctx, types.NamespacedName{Name: githubSecretName, Namespace: c.autoscalingRunnerSet.Namespace}, githubSecret)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
if !controllerutil.ContainsFinalizer(githubSecret, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||||
|
c.logger.Info("GitHub secret finalizer has already been removed", "name", githubSecretName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = patch(ctx, c.client, githubSecret, func(obj *corev1.Secret) {
|
||||||
|
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.requeue = true
|
||||||
|
c.logger.Info("Removed finalizer from GitHub secret", "name", githubSecretName)
|
||||||
|
return
|
||||||
|
case err != nil && !kerrors.IsNotFound(err) && !kerrors.IsForbidden(err):
|
||||||
|
c.err = fmt.Errorf("failed to fetch GitHub secret: %w", err)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
c.logger.Info("GitHub secret has already been deleted", "name", githubSecretName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindingFinalizer(ctx context.Context) {
|
||||||
|
if c.requeue || c.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
managerRoleBindingName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName]
|
||||||
|
if !ok {
|
||||||
|
c.logger.Info(
|
||||||
|
"Skipping cleaning up manager role binding",
|
||||||
|
"reason",
|
||||||
|
fmt.Sprintf("annotation key %q not present", AnnotationKeyManagerRoleBindingName),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Info("Removing finalizer from manager role binding", "name", managerRoleBindingName)
|
||||||
|
|
||||||
|
roleBinding := new(rbacv1.RoleBinding)
|
||||||
|
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
if !controllerutil.ContainsFinalizer(roleBinding, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||||
|
c.logger.Info("Manager role binding finalizer has already been removed", "name", managerRoleBindingName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
|
||||||
|
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.requeue = true
|
||||||
|
c.logger.Info("Removed finalizer from manager role binding", "name", managerRoleBindingName)
|
||||||
|
return
|
||||||
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
|
c.err = fmt.Errorf("failed to fetch manager role binding: %w", err)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
c.logger.Info("Manager role binding has already been deleted", "name", managerRoleBindingName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinalizer(ctx context.Context) {
|
||||||
|
if c.requeue || c.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
managerRoleName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName]
|
||||||
|
if !ok {
|
||||||
|
c.logger.Info(
|
||||||
|
"Skipping cleaning up manager role",
|
||||||
|
"reason",
|
||||||
|
fmt.Sprintf("annotation key %q not present", AnnotationKeyManagerRoleName),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Info("Removing finalizer from manager role", "name", managerRoleName)
|
||||||
|
|
||||||
|
role := new(rbacv1.Role)
|
||||||
|
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
if !controllerutil.ContainsFinalizer(role, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||||
|
c.logger.Info("Manager role finalizer has already been removed", "name", managerRoleName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
|
||||||
|
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.requeue = true
|
||||||
|
c.logger.Info("Removed finalizer from manager role", "name", managerRoleName)
|
||||||
|
return
|
||||||
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
|
c.err = fmt.Errorf("failed to fetch manager role: %w", err)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
c.logger.Info("Manager role has already been deleted", "name", managerRoleName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// NOTE: if this is logic should be used for other resources,
|
// NOTE: if this is logic should be used for other resources,
|
||||||
// consider using generics
|
// consider using generics
|
||||||
type EphemeralRunnerSets struct {
|
type EphemeralRunnerSets struct {
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||||
@@ -23,8 +24,10 @@ import (
|
|||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||||
|
"github.com/actions/actions-runner-controller/build"
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/actions/actions-runner-controller/github/actions/fake"
|
"github.com/actions/actions-runner-controller/github/actions/fake"
|
||||||
"github.com/actions/actions-runner-controller/github/actions/testserver"
|
"github.com/actions/actions-runner-controller/github/actions/testserver"
|
||||||
@@ -36,13 +39,25 @@ const (
|
|||||||
autoscalingRunnerSetTestGitHubToken = "gh_token"
|
autoscalingRunnerSetTestGitHubToken = "gh_token"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
||||||
var ctx context.Context
|
var ctx context.Context
|
||||||
var mgr ctrl.Manager
|
var mgr ctrl.Manager
|
||||||
var autoscalingNS *corev1.Namespace
|
var autoscalingNS *corev1.Namespace
|
||||||
var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
|
var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
|
||||||
var configSecret *corev1.Secret
|
var configSecret *corev1.Secret
|
||||||
|
|
||||||
|
var originalBuildVersion string
|
||||||
|
buildVersion := "0.1.0"
|
||||||
|
|
||||||
|
BeforeAll(func() {
|
||||||
|
originalBuildVersion = build.Version
|
||||||
|
build.Version = buildVersion
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterAll(func() {
|
||||||
|
build.Version = originalBuildVersion
|
||||||
|
})
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
ctx = context.Background()
|
ctx = context.Background()
|
||||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||||
@@ -65,6 +80,9 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "test-asrs",
|
Name: "test-asrs",
|
||||||
Namespace: autoscalingNS.Name,
|
Namespace: autoscalingNS.Name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
LabelKeyKubernetesVersion: buildVersion,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||||
@@ -117,19 +135,39 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := created.Annotations[runnerScaleSetIdKey]; !ok {
|
if _, ok := created.Annotations[runnerScaleSetIdAnnotationKey]; !ok {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := created.Annotations[runnerScaleSetRunnerGroupNameKey]; !ok {
|
if _, ok := created.Annotations[AnnotationKeyGitHubRunnerGroupName]; !ok {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("%s_%s", created.Annotations[runnerScaleSetIdKey], created.Annotations[runnerScaleSetRunnerGroupNameKey]), nil
|
return fmt.Sprintf("%s_%s", created.Annotations[runnerScaleSetIdAnnotationKey], created.Annotations[AnnotationKeyGitHubRunnerGroupName]), nil
|
||||||
},
|
},
|
||||||
autoscalingRunnerSetTestTimeout,
|
autoscalingRunnerSetTestTimeout,
|
||||||
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("1_testgroup"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's annotation")
|
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("1_testgroup"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's annotation")
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() (string, error) {
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := created.Labels[LabelKeyGitHubOrganization]; !ok {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := created.Labels[LabelKeyGitHubRepository]; !ok {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%s/%s", created.Labels[LabelKeyGitHubOrganization], created.Labels[LabelKeyGitHubRepository]), nil
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("owner/repo"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's label")
|
||||||
|
|
||||||
// Check if ephemeral runner set is created
|
// Check if ephemeral runner set is created
|
||||||
Eventually(
|
Eventually(
|
||||||
func() (int, error) {
|
func() (int, error) {
|
||||||
@@ -157,23 +195,6 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
|||||||
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
|
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet")
|
Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet")
|
||||||
Expect(len(runnerSetList.Items)).To(BeEquivalentTo(1), "Only one EphemeralRunnerSet should be created")
|
Expect(len(runnerSetList.Items)).To(BeEquivalentTo(1), "Only one EphemeralRunnerSet should be created")
|
||||||
runnerSet := runnerSetList.Items[0]
|
|
||||||
statusUpdate := runnerSet.DeepCopy()
|
|
||||||
statusUpdate.Status.CurrentReplicas = 100
|
|
||||||
err = k8sClient.Status().Patch(ctx, statusUpdate, client.MergeFrom(&runnerSet))
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to patch EphemeralRunnerSet status")
|
|
||||||
|
|
||||||
Eventually(
|
|
||||||
func() (int, error) {
|
|
||||||
updated := new(v1alpha1.AutoscalingRunnerSet)
|
|
||||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, updated)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("failed to get AutoScalingRunnerSet: %w", err)
|
|
||||||
}
|
|
||||||
return updated.Status.CurrentRunners, nil
|
|
||||||
},
|
|
||||||
autoscalingRunnerSetTestTimeout,
|
|
||||||
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo(100), "AutoScalingRunnerSet status should be updated")
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -275,10 +296,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
|||||||
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
|
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
|
||||||
}
|
}
|
||||||
|
|
||||||
return runnerSetList.Items[0].Labels[LabelKeyRunnerSpecHash], nil
|
return runnerSetList.Items[0].Labels[labelKeyRunnerSpecHash], nil
|
||||||
},
|
},
|
||||||
autoscalingRunnerSetTestTimeout,
|
autoscalingRunnerSetTestTimeout,
|
||||||
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[LabelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
|
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[labelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
|
||||||
|
|
||||||
// We should create a new listener
|
// We should create a new listener
|
||||||
Eventually(
|
Eventually(
|
||||||
@@ -368,18 +389,18 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := updated.Annotations[runnerScaleSetRunnerGroupNameKey]; !ok {
|
if _, ok := updated.Annotations[AnnotationKeyGitHubRunnerGroupName]; !ok {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return updated.Annotations[runnerScaleSetRunnerGroupNameKey], nil
|
return updated.Annotations[AnnotationKeyGitHubRunnerGroupName], nil
|
||||||
},
|
},
|
||||||
autoscalingRunnerSetTestTimeout,
|
autoscalingRunnerSetTestTimeout,
|
||||||
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("testgroup2"), "AutoScalingRunnerSet should have the new runner group in its annotation")
|
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("testgroup2"), "AutoScalingRunnerSet should have the new runner group in its annotation")
|
||||||
|
|
||||||
// delete the annotation and it should be re-added
|
// delete the annotation and it should be re-added
|
||||||
patched = autoscalingRunnerSet.DeepCopy()
|
patched = autoscalingRunnerSet.DeepCopy()
|
||||||
delete(patched.Annotations, runnerScaleSetRunnerGroupNameKey)
|
delete(patched.Annotations, AnnotationKeyGitHubRunnerGroupName)
|
||||||
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
|
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
||||||
|
|
||||||
@@ -391,19 +412,97 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := updated.Annotations[runnerScaleSetRunnerGroupNameKey]; !ok {
|
if _, ok := updated.Annotations[AnnotationKeyGitHubRunnerGroupName]; !ok {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return updated.Annotations[runnerScaleSetRunnerGroupNameKey], nil
|
return updated.Annotations[AnnotationKeyGitHubRunnerGroupName], nil
|
||||||
},
|
},
|
||||||
autoscalingRunnerSetTestTimeout,
|
autoscalingRunnerSetTestTimeout,
|
||||||
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("testgroup2"), "AutoScalingRunnerSet should have the runner group in its annotation")
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(BeEquivalentTo("testgroup2"), "AutoScalingRunnerSet should have the runner group in its annotation")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("Should update Status on EphemeralRunnerSet status Update", func() {
|
||||||
|
ars := new(v1alpha1.AutoscalingRunnerSet)
|
||||||
|
Eventually(
|
||||||
|
func() (bool, error) {
|
||||||
|
err := k8sClient.Get(
|
||||||
|
ctx,
|
||||||
|
client.ObjectKey{
|
||||||
|
Name: autoscalingRunnerSet.Name,
|
||||||
|
Namespace: autoscalingRunnerSet.Namespace,
|
||||||
|
},
|
||||||
|
ars,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(BeTrue(), "AutoscalingRunnerSet should be created")
|
||||||
|
|
||||||
|
runnerSetList := new(v1alpha1.EphemeralRunnerSetList)
|
||||||
|
Eventually(func() (int, error) {
|
||||||
|
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(ars.Namespace))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return len(runnerSetList.Items), nil
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(BeEquivalentTo(1), "Failed to fetch runner set list")
|
||||||
|
|
||||||
|
runnerSet := runnerSetList.Items[0]
|
||||||
|
statusUpdate := runnerSet.DeepCopy()
|
||||||
|
statusUpdate.Status.CurrentReplicas = 6
|
||||||
|
statusUpdate.Status.FailedEphemeralRunners = 1
|
||||||
|
statusUpdate.Status.RunningEphemeralRunners = 2
|
||||||
|
statusUpdate.Status.PendingEphemeralRunners = 3
|
||||||
|
|
||||||
|
desiredStatus := v1alpha1.AutoscalingRunnerSetStatus{
|
||||||
|
CurrentRunners: statusUpdate.Status.CurrentReplicas,
|
||||||
|
State: "",
|
||||||
|
PendingEphemeralRunners: statusUpdate.Status.PendingEphemeralRunners,
|
||||||
|
RunningEphemeralRunners: statusUpdate.Status.RunningEphemeralRunners,
|
||||||
|
FailedEphemeralRunners: statusUpdate.Status.FailedEphemeralRunners,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := k8sClient.Status().Patch(ctx, statusUpdate, client.MergeFrom(&runnerSet))
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "Failed to patch runner set status")
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() (v1alpha1.AutoscalingRunnerSetStatus, error) {
|
||||||
|
updated := new(v1alpha1.AutoscalingRunnerSet)
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, updated)
|
||||||
|
if err != nil {
|
||||||
|
return v1alpha1.AutoscalingRunnerSetStatus{}, fmt.Errorf("failed to get AutoScalingRunnerSet: %w", err)
|
||||||
|
}
|
||||||
|
return updated.Status, nil
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(BeEquivalentTo(desiredStatus), "AutoScalingRunnerSet status should be updated")
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
var _ = Describe("Test AutoScalingController updates", func() {
|
var _ = Describe("Test AutoScalingController updates", Ordered, func() {
|
||||||
|
var originalBuildVersion string
|
||||||
|
buildVersion := "0.1.0"
|
||||||
|
|
||||||
|
BeforeAll(func() {
|
||||||
|
originalBuildVersion = build.Version
|
||||||
|
build.Version = buildVersion
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterAll(func() {
|
||||||
|
build.Version = originalBuildVersion
|
||||||
|
})
|
||||||
|
|
||||||
Context("Creating autoscaling runner set with RunnerScaleSetName set", func() {
|
Context("Creating autoscaling runner set with RunnerScaleSetName set", func() {
|
||||||
var ctx context.Context
|
var ctx context.Context
|
||||||
var mgr ctrl.Manager
|
var mgr ctrl.Manager
|
||||||
@@ -412,6 +511,7 @@ var _ = Describe("Test AutoScalingController updates", func() {
|
|||||||
var configSecret *corev1.Secret
|
var configSecret *corev1.Secret
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
|
originalBuildVersion = build.Version
|
||||||
ctx = context.Background()
|
ctx = context.Background()
|
||||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||||
@@ -457,6 +557,9 @@ var _ = Describe("Test AutoScalingController updates", func() {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "test-asrs",
|
Name: "test-asrs",
|
||||||
Namespace: autoscalingNS.Name,
|
Namespace: autoscalingNS.Name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
LabelKeyKubernetesVersion: buildVersion,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||||
@@ -490,7 +593,7 @@ var _ = Describe("Test AutoScalingController updates", func() {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if val, ok := ars.Annotations[runnerScaleSetNameKey]; ok {
|
if val, ok := ars.Annotations[runnerScaleSetNameAnnotationKey]; ok {
|
||||||
return val, nil
|
return val, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -502,6 +605,7 @@ var _ = Describe("Test AutoScalingController updates", func() {
|
|||||||
|
|
||||||
update := autoscalingRunnerSet.DeepCopy()
|
update := autoscalingRunnerSet.DeepCopy()
|
||||||
update.Spec.RunnerScaleSetName = "testset_update"
|
update.Spec.RunnerScaleSetName = "testset_update"
|
||||||
|
|
||||||
err = k8sClient.Patch(ctx, update, client.MergeFrom(autoscalingRunnerSet))
|
err = k8sClient.Patch(ctx, update, client.MergeFrom(autoscalingRunnerSet))
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to update AutoScalingRunnerSet")
|
Expect(err).NotTo(HaveOccurred(), "failed to update AutoScalingRunnerSet")
|
||||||
|
|
||||||
@@ -513,7 +617,7 @@ var _ = Describe("Test AutoScalingController updates", func() {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if val, ok := ars.Annotations[runnerScaleSetNameKey]; ok {
|
if val, ok := ars.Annotations[runnerScaleSetNameAnnotationKey]; ok {
|
||||||
return val, nil
|
return val, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -526,7 +630,18 @@ var _ = Describe("Test AutoScalingController updates", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
var _ = Describe("Test AutoscalingController creation failures", func() {
|
var _ = Describe("Test AutoscalingController creation failures", Ordered, func() {
|
||||||
|
var originalBuildVersion string
|
||||||
|
buildVersion := "0.1.0"
|
||||||
|
|
||||||
|
BeforeAll(func() {
|
||||||
|
originalBuildVersion = build.Version
|
||||||
|
build.Version = buildVersion
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterAll(func() {
|
||||||
|
build.Version = originalBuildVersion
|
||||||
|
})
|
||||||
Context("When autoscaling runner set creation fails on the client", func() {
|
Context("When autoscaling runner set creation fails on the client", func() {
|
||||||
var ctx context.Context
|
var ctx context.Context
|
||||||
var mgr ctrl.Manager
|
var mgr ctrl.Manager
|
||||||
@@ -557,6 +672,9 @@ var _ = Describe("Test AutoscalingController creation failures", func() {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "test-asrs",
|
Name: "test-asrs",
|
||||||
Namespace: autoscalingNS.Name,
|
Namespace: autoscalingNS.Name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
LabelKeyKubernetesVersion: buildVersion,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||||
@@ -635,7 +753,18 @@ var _ = Describe("Test AutoscalingController creation failures", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
var _ = Describe("Test Client optional configuration", func() {
|
var _ = Describe("Test client optional configuration", Ordered, func() {
|
||||||
|
var originalBuildVersion string
|
||||||
|
buildVersion := "0.1.0"
|
||||||
|
|
||||||
|
BeforeAll(func() {
|
||||||
|
originalBuildVersion = build.Version
|
||||||
|
build.Version = buildVersion
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterAll(func() {
|
||||||
|
build.Version = originalBuildVersion
|
||||||
|
})
|
||||||
Context("When specifying a proxy", func() {
|
Context("When specifying a proxy", func() {
|
||||||
var ctx context.Context
|
var ctx context.Context
|
||||||
var mgr ctrl.Manager
|
var mgr ctrl.Manager
|
||||||
@@ -675,6 +804,9 @@ var _ = Describe("Test Client optional configuration", func() {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "test-asrs",
|
Name: "test-asrs",
|
||||||
Namespace: autoscalingNS.Name,
|
Namespace: autoscalingNS.Name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
LabelKeyKubernetesVersion: buildVersion,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||||
GitHubConfigUrl: "http://example.com/org/repo",
|
GitHubConfigUrl: "http://example.com/org/repo",
|
||||||
@@ -751,6 +883,9 @@ var _ = Describe("Test Client optional configuration", func() {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "test-asrs",
|
Name: "test-asrs",
|
||||||
Namespace: autoscalingNS.Name,
|
Namespace: autoscalingNS.Name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
LabelKeyKubernetesVersion: buildVersion,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||||
GitHubConfigUrl: "http://example.com/org/repo",
|
GitHubConfigUrl: "http://example.com/org/repo",
|
||||||
@@ -867,6 +1002,9 @@ var _ = Describe("Test Client optional configuration", func() {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "test-asrs",
|
Name: "test-asrs",
|
||||||
Namespace: autoscalingNS.Name,
|
Namespace: autoscalingNS.Name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
LabelKeyKubernetesVersion: buildVersion,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||||
GitHubConfigUrl: server.ConfigURLForOrg("my-org"),
|
GitHubConfigUrl: server.ConfigURLForOrg("my-org"),
|
||||||
@@ -917,6 +1055,9 @@ var _ = Describe("Test Client optional configuration", func() {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "test-asrs",
|
Name: "test-asrs",
|
||||||
Namespace: autoscalingNS.Name,
|
Namespace: autoscalingNS.Name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
LabelKeyKubernetesVersion: buildVersion,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||||
@@ -967,7 +1108,7 @@ var _ = Describe("Test Client optional configuration", func() {
|
|||||||
g.Expect(listener.Spec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "listener does not have TLS config")
|
g.Expect(listener.Spec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "listener does not have TLS config")
|
||||||
},
|
},
|
||||||
autoscalingRunnerSetTestTimeout,
|
autoscalingRunnerSetTestTimeout,
|
||||||
autoscalingListenerTestInterval,
|
autoscalingRunnerSetTestInterval,
|
||||||
).Should(Succeed(), "tls config is incorrect")
|
).Should(Succeed(), "tls config is incorrect")
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -978,6 +1119,9 @@ var _ = Describe("Test Client optional configuration", func() {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "test-asrs",
|
Name: "test-asrs",
|
||||||
Namespace: autoscalingNS.Name,
|
Namespace: autoscalingNS.Name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
LabelKeyKubernetesVersion: buildVersion,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||||
@@ -1024,8 +1168,459 @@ var _ = Describe("Test Client optional configuration", func() {
|
|||||||
g.Expect(runnerSet.Spec.EphemeralRunnerSpec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "EphemeralRunnerSpec does not have TLS config")
|
g.Expect(runnerSet.Spec.EphemeralRunnerSpec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "EphemeralRunnerSpec does not have TLS config")
|
||||||
},
|
},
|
||||||
autoscalingRunnerSetTestTimeout,
|
autoscalingRunnerSetTestTimeout,
|
||||||
autoscalingListenerTestInterval,
|
autoscalingRunnerSetTestInterval,
|
||||||
).Should(Succeed())
|
).Should(Succeed())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
var _ = Describe("Test external permissions cleanup", Ordered, func() {
|
||||||
|
var originalBuildVersion string
|
||||||
|
buildVersion := "0.1.0"
|
||||||
|
|
||||||
|
BeforeAll(func() {
|
||||||
|
originalBuildVersion = build.Version
|
||||||
|
build.Version = buildVersion
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterAll(func() {
|
||||||
|
build.Version = originalBuildVersion
|
||||||
|
})
|
||||||
|
|
||||||
|
It("Should clean up kubernetes mode permissions", func() {
|
||||||
|
ctx := context.Background()
|
||||||
|
autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient)
|
||||||
|
|
||||||
|
configSecret := createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||||
|
|
||||||
|
controller := &AutoscalingRunnerSetReconciler{
|
||||||
|
Client: mgr.GetClient(),
|
||||||
|
Scheme: mgr.GetScheme(),
|
||||||
|
Log: logf.Log,
|
||||||
|
ControllerNamespace: autoscalingNS.Name,
|
||||||
|
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||||
|
ActionsClient: fake.NewMultiClient(),
|
||||||
|
}
|
||||||
|
err := controller.SetupWithManager(mgr)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||||
|
|
||||||
|
startManagers(GinkgoT(), mgr)
|
||||||
|
|
||||||
|
min := 1
|
||||||
|
max := 10
|
||||||
|
autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-asrs",
|
||||||
|
Namespace: autoscalingNS.Name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
"app.kubernetes.io/name": "gha-runner-scale-set",
|
||||||
|
LabelKeyKubernetesVersion: buildVersion,
|
||||||
|
},
|
||||||
|
Annotations: map[string]string{
|
||||||
|
AnnotationKeyKubernetesModeRoleBindingName: "kube-mode-role-binding",
|
||||||
|
AnnotationKeyKubernetesModeRoleName: "kube-mode-role",
|
||||||
|
AnnotationKeyKubernetesModeServiceAccountName: "kube-mode-service-account",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||||
|
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||||
|
GitHubConfigSecret: configSecret.Name,
|
||||||
|
MaxRunners: &max,
|
||||||
|
MinRunners: &min,
|
||||||
|
RunnerGroup: "testgroup",
|
||||||
|
Template: corev1.PodTemplateSpec{
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
Image: "ghcr.io/actions/runner",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
role := &rbacv1.Role{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName],
|
||||||
|
Namespace: autoscalingRunnerSet.Namespace,
|
||||||
|
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = k8sClient.Create(ctx, role)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode role")
|
||||||
|
|
||||||
|
serviceAccount := &corev1.ServiceAccount{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName],
|
||||||
|
Namespace: autoscalingRunnerSet.Namespace,
|
||||||
|
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = k8sClient.Create(ctx, serviceAccount)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode service account")
|
||||||
|
|
||||||
|
roleBinding := &rbacv1.RoleBinding{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName],
|
||||||
|
Namespace: autoscalingRunnerSet.Namespace,
|
||||||
|
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||||
|
},
|
||||||
|
Subjects: []rbacv1.Subject{
|
||||||
|
{
|
||||||
|
Kind: "ServiceAccount",
|
||||||
|
Name: serviceAccount.Name,
|
||||||
|
Namespace: serviceAccount.Namespace,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RoleRef: rbacv1.RoleRef{
|
||||||
|
APIGroup: rbacv1.GroupName,
|
||||||
|
// Kind is the type of resource being referenced
|
||||||
|
Kind: "Role",
|
||||||
|
Name: role.Name,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = k8sClient.Create(ctx, roleBinding)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode role binding")
|
||||||
|
|
||||||
|
err = k8sClient.Create(ctx, autoscalingRunnerSet)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() (string, error) {
|
||||||
|
created := new(v1alpha1.AutoscalingRunnerSet)
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if len(created.Finalizers) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
return created.Finalizers[0], nil
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
|
||||||
|
|
||||||
|
err = k8sClient.Delete(ctx, autoscalingRunnerSet)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to delete autoscaling runner set")
|
||||||
|
|
||||||
|
err = k8sClient.Delete(ctx, roleBinding)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode role binding")
|
||||||
|
|
||||||
|
err = k8sClient.Delete(ctx, role)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode role")
|
||||||
|
|
||||||
|
err = k8sClient.Delete(ctx, serviceAccount)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode service account")
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() bool {
|
||||||
|
r := new(rbacv1.RoleBinding)
|
||||||
|
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||||
|
Name: roleBinding.Name,
|
||||||
|
Namespace: roleBinding.Namespace,
|
||||||
|
}, r)
|
||||||
|
|
||||||
|
return errors.IsNotFound(err)
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(BeTrue(), "Expected role binding to be cleaned up")
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() bool {
|
||||||
|
r := new(rbacv1.Role)
|
||||||
|
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||||
|
Name: role.Name,
|
||||||
|
Namespace: role.Namespace,
|
||||||
|
}, r)
|
||||||
|
|
||||||
|
return errors.IsNotFound(err)
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(BeTrue(), "Expected role to be cleaned up")
|
||||||
|
})
|
||||||
|
|
||||||
|
It("Should clean up manager permissions and no-permission service account", func() {
|
||||||
|
ctx := context.Background()
|
||||||
|
autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient)
|
||||||
|
|
||||||
|
controller := &AutoscalingRunnerSetReconciler{
|
||||||
|
Client: mgr.GetClient(),
|
||||||
|
Scheme: mgr.GetScheme(),
|
||||||
|
Log: logf.Log,
|
||||||
|
ControllerNamespace: autoscalingNS.Name,
|
||||||
|
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||||
|
ActionsClient: fake.NewMultiClient(),
|
||||||
|
}
|
||||||
|
err := controller.SetupWithManager(mgr)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||||
|
|
||||||
|
startManagers(GinkgoT(), mgr)
|
||||||
|
|
||||||
|
min := 1
|
||||||
|
max := 10
|
||||||
|
autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-asrs",
|
||||||
|
Namespace: autoscalingNS.Name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
"app.kubernetes.io/name": "gha-runner-scale-set",
|
||||||
|
LabelKeyKubernetesVersion: buildVersion,
|
||||||
|
},
|
||||||
|
Annotations: map[string]string{
|
||||||
|
AnnotationKeyManagerRoleName: "manager-role",
|
||||||
|
AnnotationKeyManagerRoleBindingName: "manager-role-binding",
|
||||||
|
AnnotationKeyGitHubSecretName: "gh-secret-name",
|
||||||
|
AnnotationKeyNoPermissionServiceAccountName: "no-permission-sa",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||||
|
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||||
|
MaxRunners: &max,
|
||||||
|
MinRunners: &min,
|
||||||
|
RunnerGroup: "testgroup",
|
||||||
|
Template: corev1.PodTemplateSpec{
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
Image: "ghcr.io/actions/runner",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
secret := &corev1.Secret{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName],
|
||||||
|
Namespace: autoscalingRunnerSet.Namespace,
|
||||||
|
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||||
|
},
|
||||||
|
Data: map[string][]byte{
|
||||||
|
"github_token": []byte(defaultGitHubToken),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = k8sClient.Create(context.Background(), secret)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create github secret")
|
||||||
|
|
||||||
|
autoscalingRunnerSet.Spec.GitHubConfigSecret = secret.Name
|
||||||
|
|
||||||
|
role := &rbacv1.Role{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName],
|
||||||
|
Namespace: autoscalingRunnerSet.Namespace,
|
||||||
|
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = k8sClient.Create(ctx, role)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create manager role")
|
||||||
|
|
||||||
|
roleBinding := &rbacv1.RoleBinding{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName],
|
||||||
|
Namespace: autoscalingRunnerSet.Namespace,
|
||||||
|
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||||
|
},
|
||||||
|
RoleRef: rbacv1.RoleRef{
|
||||||
|
APIGroup: rbacv1.GroupName,
|
||||||
|
Kind: "Role",
|
||||||
|
Name: role.Name,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = k8sClient.Create(ctx, roleBinding)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create manager role binding")
|
||||||
|
|
||||||
|
noPermissionServiceAccount := &corev1.ServiceAccount{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName],
|
||||||
|
Namespace: autoscalingRunnerSet.Namespace,
|
||||||
|
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = k8sClient.Create(ctx, noPermissionServiceAccount)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create no permission service account")
|
||||||
|
|
||||||
|
err = k8sClient.Create(ctx, autoscalingRunnerSet)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() (string, error) {
|
||||||
|
created := new(v1alpha1.AutoscalingRunnerSet)
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if len(created.Finalizers) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
return created.Finalizers[0], nil
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
|
||||||
|
|
||||||
|
err = k8sClient.Delete(ctx, autoscalingRunnerSet)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to delete autoscaling runner set")
|
||||||
|
|
||||||
|
err = k8sClient.Delete(ctx, noPermissionServiceAccount)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to delete no permission service account")
|
||||||
|
|
||||||
|
err = k8sClient.Delete(ctx, secret)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to delete GitHub secret")
|
||||||
|
|
||||||
|
err = k8sClient.Delete(ctx, roleBinding)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to delete manager role binding")
|
||||||
|
|
||||||
|
err = k8sClient.Delete(ctx, role)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to delete manager role")
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() bool {
|
||||||
|
r := new(corev1.ServiceAccount)
|
||||||
|
err := k8sClient.Get(
|
||||||
|
ctx,
|
||||||
|
types.NamespacedName{
|
||||||
|
Name: noPermissionServiceAccount.Name,
|
||||||
|
Namespace: noPermissionServiceAccount.Namespace,
|
||||||
|
},
|
||||||
|
r,
|
||||||
|
)
|
||||||
|
return errors.IsNotFound(err)
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(BeTrue(), "Expected no permission service account to be cleaned up")
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() bool {
|
||||||
|
r := new(corev1.Secret)
|
||||||
|
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||||
|
Name: secret.Name,
|
||||||
|
Namespace: secret.Namespace,
|
||||||
|
}, r)
|
||||||
|
|
||||||
|
return errors.IsNotFound(err)
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(BeTrue(), "Expected role binding to be cleaned up")
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() bool {
|
||||||
|
r := new(rbacv1.RoleBinding)
|
||||||
|
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||||
|
Name: roleBinding.Name,
|
||||||
|
Namespace: roleBinding.Namespace,
|
||||||
|
}, r)
|
||||||
|
|
||||||
|
return errors.IsNotFound(err)
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(BeTrue(), "Expected role binding to be cleaned up")
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() bool {
|
||||||
|
r := new(rbacv1.Role)
|
||||||
|
err := k8sClient.Get(
|
||||||
|
ctx,
|
||||||
|
types.NamespacedName{
|
||||||
|
Name: role.Name,
|
||||||
|
Namespace: role.Namespace,
|
||||||
|
},
|
||||||
|
r,
|
||||||
|
)
|
||||||
|
|
||||||
|
return errors.IsNotFound(err)
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(BeTrue(), "Expected role to be cleaned up")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
var _ = Describe("Test resource version and build version mismatch", func() {
|
||||||
|
It("Should delete and recreate the autoscaling runner set to match the build version", func() {
|
||||||
|
ctx := context.Background()
|
||||||
|
autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient)
|
||||||
|
|
||||||
|
configSecret := createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||||
|
|
||||||
|
controller := &AutoscalingRunnerSetReconciler{
|
||||||
|
Client: mgr.GetClient(),
|
||||||
|
Scheme: mgr.GetScheme(),
|
||||||
|
Log: logf.Log,
|
||||||
|
ControllerNamespace: autoscalingNS.Name,
|
||||||
|
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||||
|
ActionsClient: fake.NewMultiClient(),
|
||||||
|
}
|
||||||
|
err := controller.SetupWithManager(mgr)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||||
|
|
||||||
|
originalVersion := build.Version
|
||||||
|
defer func() {
|
||||||
|
build.Version = originalVersion
|
||||||
|
}()
|
||||||
|
build.Version = "0.2.0"
|
||||||
|
|
||||||
|
min := 1
|
||||||
|
max := 10
|
||||||
|
autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-asrs",
|
||||||
|
Namespace: autoscalingNS.Name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
"app.kubernetes.io/name": "gha-runner-scale-set",
|
||||||
|
"app.kubernetes.io/version": "0.1.0",
|
||||||
|
},
|
||||||
|
Annotations: map[string]string{
|
||||||
|
AnnotationKeyKubernetesModeRoleBindingName: "kube-mode-role-binding",
|
||||||
|
AnnotationKeyKubernetesModeRoleName: "kube-mode-role",
|
||||||
|
AnnotationKeyKubernetesModeServiceAccountName: "kube-mode-service-account",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||||
|
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||||
|
GitHubConfigSecret: configSecret.Name,
|
||||||
|
MaxRunners: &max,
|
||||||
|
MinRunners: &min,
|
||||||
|
RunnerGroup: "testgroup",
|
||||||
|
Template: corev1.PodTemplateSpec{
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
Image: "ghcr.io/actions/runner",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// create autoscaling runner set before starting a manager
|
||||||
|
err = k8sClient.Create(ctx, autoscalingRunnerSet)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
startManagers(GinkgoT(), mgr)
|
||||||
|
|
||||||
|
Eventually(func() bool {
|
||||||
|
ars := new(v1alpha1.AutoscalingRunnerSet)
|
||||||
|
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Name}, ars)
|
||||||
|
return errors.IsNotFound(err)
|
||||||
|
}).Should(BeTrue())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
package actionsgithubcom
|
package actionsgithubcom
|
||||||
|
|
||||||
|
import corev1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
const (
|
const (
|
||||||
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
||||||
LabelKeyPodTemplateHash = "pod-template-hash"
|
LabelKeyPodTemplateHash = "pod-template-hash"
|
||||||
@@ -16,3 +18,47 @@ const (
|
|||||||
EnvVarHTTPSProxy = "https_proxy"
|
EnvVarHTTPSProxy = "https_proxy"
|
||||||
EnvVarNoProxy = "no_proxy"
|
EnvVarNoProxy = "no_proxy"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Labels applied to resources
|
||||||
|
const (
|
||||||
|
// Kubernetes labels
|
||||||
|
LabelKeyKubernetesPartOf = "app.kubernetes.io/part-of"
|
||||||
|
LabelKeyKubernetesComponent = "app.kubernetes.io/component"
|
||||||
|
LabelKeyKubernetesVersion = "app.kubernetes.io/version"
|
||||||
|
|
||||||
|
// Github labels
|
||||||
|
LabelKeyGitHubScaleSetName = "actions.github.com/scale-set-name"
|
||||||
|
LabelKeyGitHubScaleSetNamespace = "actions.github.com/scale-set-namespace"
|
||||||
|
LabelKeyGitHubEnterprise = "actions.github.com/enterprise"
|
||||||
|
LabelKeyGitHubOrganization = "actions.github.com/organization"
|
||||||
|
LabelKeyGitHubRepository = "actions.github.com/repository"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running
|
||||||
|
const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
|
||||||
|
|
||||||
|
const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
|
||||||
|
|
||||||
|
// Labels applied to listener roles
|
||||||
|
const (
|
||||||
|
labelKeyListenerName = "auto-scaling-listener-name"
|
||||||
|
labelKeyListenerNamespace = "auto-scaling-listener-namespace"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Annotations applied for later cleanup of resources
|
||||||
|
const (
|
||||||
|
AnnotationKeyManagerRoleBindingName = "actions.github.com/cleanup-manager-role-binding"
|
||||||
|
AnnotationKeyManagerRoleName = "actions.github.com/cleanup-manager-role-name"
|
||||||
|
AnnotationKeyKubernetesModeRoleName = "actions.github.com/cleanup-kubernetes-mode-role-name"
|
||||||
|
AnnotationKeyKubernetesModeRoleBindingName = "actions.github.com/cleanup-kubernetes-mode-role-binding-name"
|
||||||
|
AnnotationKeyKubernetesModeServiceAccountName = "actions.github.com/cleanup-kubernetes-mode-service-account-name"
|
||||||
|
AnnotationKeyGitHubSecretName = "actions.github.com/cleanup-github-secret-name"
|
||||||
|
AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultScaleSetListenerImagePullPolicy is the default pull policy applied
|
||||||
|
// to the listener when ImagePullPolicy is not specified
|
||||||
|
const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent
|
||||||
|
|
||||||
|
// ownerKey is field selector matching the owner name of a particular resource
|
||||||
|
const resourceOwnerKey = ".metadata.controller"
|
||||||
|
|||||||
@@ -107,7 +107,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||||||
}
|
}
|
||||||
if !done {
|
if !done {
|
||||||
log.Info("Waiting for ephemeral runner owned resources to be deleted")
|
log.Info("Waiting for ephemeral runner owned resources to be deleted")
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{Requeue: true}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
done, err = r.cleanupContainerHooksResources(ctx, ephemeralRunner, log)
|
done, err = r.cleanupContainerHooksResources(ctx, ephemeralRunner, log)
|
||||||
@@ -643,7 +643,7 @@ func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name)
|
log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name)
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{Requeue: true}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateRunStatusFromPod is responsible for updating non-exiting statuses.
|
// updateRunStatusFromPod is responsible for updating non-exiting statuses.
|
||||||
@@ -792,7 +792,6 @@ func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||||||
return ctrl.NewControllerManagedBy(mgr).
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
For(&v1alpha1.EphemeralRunner{}).
|
For(&v1alpha1.EphemeralRunner{}).
|
||||||
Owns(&corev1.Pod{}).
|
Owns(&corev1.Pod{}).
|
||||||
Owns(&corev1.Secret{}).
|
|
||||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||||
Named("ephemeral-runner-controller").
|
Named("ephemeral-runner-controller").
|
||||||
Complete(r)
|
Complete(r)
|
||||||
|
|||||||
@@ -40,7 +40,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ephemeralRunnerSetReconcilerOwnerKey = ".metadata.controller"
|
|
||||||
ephemeralRunnerSetFinalizerName = "ephemeralrunner.actions.github.com/finalizer"
|
ephemeralRunnerSetFinalizerName = "ephemeralrunner.actions.github.com/finalizer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -56,6 +55,7 @@ type EphemeralRunnerSetReconciler struct {
|
|||||||
|
|
||||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete
|
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete
|
||||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/status,verbs=get;update;patch
|
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/status,verbs=get;update;patch
|
||||||
|
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/finalizers,verbs=update;patch
|
||||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
|
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
|
||||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get
|
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get
|
||||||
|
|
||||||
@@ -146,7 +146,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
|
|||||||
ctx,
|
ctx,
|
||||||
ephemeralRunnerList,
|
ephemeralRunnerList,
|
||||||
client.InNamespace(req.Namespace),
|
client.InNamespace(req.Namespace),
|
||||||
client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: req.Name},
|
client.MatchingFields{resourceOwnerKey: req.Name},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err, "Unable to list child ephemeral runners")
|
log.Error(err, "Unable to list child ephemeral runners")
|
||||||
@@ -200,11 +200,18 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
desiredStatus := v1alpha1.EphemeralRunnerSetStatus{
|
||||||
|
CurrentReplicas: total,
|
||||||
|
PendingEphemeralRunners: len(pendingEphemeralRunners),
|
||||||
|
RunningEphemeralRunners: len(runningEphemeralRunners),
|
||||||
|
FailedEphemeralRunners: len(failedEphemeralRunners),
|
||||||
|
}
|
||||||
|
|
||||||
// Update the status if needed.
|
// Update the status if needed.
|
||||||
if ephemeralRunnerSet.Status.CurrentReplicas != total {
|
if ephemeralRunnerSet.Status != desiredStatus {
|
||||||
log.Info("Updating status with current runners count", "count", total)
|
log.Info("Updating status with current runners count", "count", total)
|
||||||
if err := patchSubResource(ctx, r.Status(), ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
|
if err := patchSubResource(ctx, r.Status(), ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
|
||||||
obj.Status.CurrentReplicas = total
|
obj.Status = desiredStatus
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(err, "Failed to update status with current runners count")
|
log.Error(err, "Failed to update status with current runners count")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -235,7 +242,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, e
|
|||||||
|
|
||||||
func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) {
|
func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) {
|
||||||
ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
|
ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
|
||||||
err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: ephemeralRunnerSet.Name})
|
err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: ephemeralRunnerSet.Name})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("failed to list child ephemeral runners: %v", err)
|
return false, fmt.Errorf("failed to list child ephemeral runners: %v", err)
|
||||||
}
|
}
|
||||||
@@ -350,9 +357,8 @@ func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ep
|
|||||||
Name: proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet),
|
Name: proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet),
|
||||||
Namespace: ephemeralRunnerSet.Namespace,
|
Namespace: ephemeralRunnerSet.Namespace,
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
// TODO: figure out autoScalingRunnerSet name and set it as a label for this secret
|
LabelKeyGitHubScaleSetName: ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName],
|
||||||
// "auto-scaling-runner-set-namespace": ephemeralRunnerSet.Namespace,
|
LabelKeyGitHubScaleSetNamespace: ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace],
|
||||||
// "auto-scaling-runner-set-name": ephemeralRunnerSet.Name,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Data: proxySecretData,
|
Data: proxySecretData,
|
||||||
@@ -515,7 +521,7 @@ func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Conte
|
|||||||
// SetupWithManager sets up the controller with the Manager.
|
// SetupWithManager sets up the controller with the Manager.
|
||||||
func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
// Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups.
|
// Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups.
|
||||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, ephemeralRunnerSetReconcilerOwnerKey, func(rawObj client.Object) []string {
|
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, resourceOwnerKey, func(rawObj client.Object) []string {
|
||||||
groupVersion := v1alpha1.GroupVersion.String()
|
groupVersion := v1alpha1.GroupVersion.String()
|
||||||
|
|
||||||
// grab the job object, extract the owner...
|
// grab the job object, extract the owner...
|
||||||
|
|||||||
@@ -559,6 +559,181 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
|
|||||||
ephemeralRunnerSetTestTimeout,
|
ephemeralRunnerSetTestTimeout,
|
||||||
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(0), "0 EphemeralRunner should be created")
|
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(0), "0 EphemeralRunner should be created")
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("Should update status on Ephemeral Runner state changes", func() {
|
||||||
|
created := new(actionsv1alpha1.EphemeralRunnerSet)
|
||||||
|
Eventually(
|
||||||
|
func() error {
|
||||||
|
return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created)
|
||||||
|
},
|
||||||
|
ephemeralRunnerSetTestTimeout,
|
||||||
|
ephemeralRunnerSetTestInterval,
|
||||||
|
).Should(Succeed(), "EphemeralRunnerSet should be created")
|
||||||
|
|
||||||
|
// Scale up the EphemeralRunnerSet
|
||||||
|
updated := created.DeepCopy()
|
||||||
|
updated.Spec.Replicas = 3
|
||||||
|
err := k8sClient.Update(ctx, updated)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet replica count")
|
||||||
|
|
||||||
|
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
|
||||||
|
Eventually(
|
||||||
|
func() (bool, error) {
|
||||||
|
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(runnerList.Items) != 3 {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var pendingOriginal *v1alpha1.EphemeralRunner
|
||||||
|
var runningOriginal *v1alpha1.EphemeralRunner
|
||||||
|
var failedOriginal *v1alpha1.EphemeralRunner
|
||||||
|
var empty []*v1alpha1.EphemeralRunner
|
||||||
|
for _, runner := range runnerList.Items {
|
||||||
|
switch runner.Status.RunnerId {
|
||||||
|
case 101:
|
||||||
|
pendingOriginal = runner.DeepCopy()
|
||||||
|
case 102:
|
||||||
|
runningOriginal = runner.DeepCopy()
|
||||||
|
case 103:
|
||||||
|
failedOriginal = runner.DeepCopy()
|
||||||
|
default:
|
||||||
|
empty = append(empty, runner.DeepCopy())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
refetch := false
|
||||||
|
if pendingOriginal == nil { // if NO pending
|
||||||
|
refetch = true
|
||||||
|
pendingOriginal = empty[0]
|
||||||
|
empty = empty[1:]
|
||||||
|
|
||||||
|
pending := pendingOriginal.DeepCopy()
|
||||||
|
pending.Status.RunnerId = 101
|
||||||
|
pending.Status.Phase = corev1.PodPending
|
||||||
|
|
||||||
|
err = k8sClient.Status().Patch(ctx, pending, client.MergeFrom(pendingOriginal))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if runningOriginal == nil { // if NO running
|
||||||
|
refetch = true
|
||||||
|
runningOriginal = empty[0]
|
||||||
|
empty = empty[1:]
|
||||||
|
running := runningOriginal.DeepCopy()
|
||||||
|
running.Status.RunnerId = 102
|
||||||
|
running.Status.Phase = corev1.PodRunning
|
||||||
|
|
||||||
|
err = k8sClient.Status().Patch(ctx, running, client.MergeFrom(runningOriginal))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if failedOriginal == nil { // if NO failed
|
||||||
|
refetch = true
|
||||||
|
failedOriginal = empty[0]
|
||||||
|
|
||||||
|
failed := pendingOriginal.DeepCopy()
|
||||||
|
failed.Status.RunnerId = 103
|
||||||
|
failed.Status.Phase = corev1.PodFailed
|
||||||
|
|
||||||
|
err = k8sClient.Status().Patch(ctx, failed, client.MergeFrom(failedOriginal))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return !refetch, nil
|
||||||
|
},
|
||||||
|
ephemeralRunnerSetTestTimeout,
|
||||||
|
ephemeralRunnerSetTestInterval,
|
||||||
|
).Should(BeTrue(), "Failed to eventually update to one pending, one running and one failed")
|
||||||
|
|
||||||
|
desiredStatus := v1alpha1.EphemeralRunnerSetStatus{
|
||||||
|
CurrentReplicas: 3,
|
||||||
|
PendingEphemeralRunners: 1,
|
||||||
|
RunningEphemeralRunners: 1,
|
||||||
|
FailedEphemeralRunners: 1,
|
||||||
|
}
|
||||||
|
Eventually(
|
||||||
|
func() (v1alpha1.EphemeralRunnerSetStatus, error) {
|
||||||
|
updated := new(v1alpha1.EphemeralRunnerSet)
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, updated)
|
||||||
|
if err != nil {
|
||||||
|
return v1alpha1.EphemeralRunnerSetStatus{}, err
|
||||||
|
}
|
||||||
|
return updated.Status, nil
|
||||||
|
},
|
||||||
|
ephemeralRunnerSetTestTimeout,
|
||||||
|
ephemeralRunnerSetTestInterval,
|
||||||
|
).Should(BeEquivalentTo(desiredStatus), "Status is not eventually updated to the desired one")
|
||||||
|
|
||||||
|
updated = new(v1alpha1.EphemeralRunnerSet)
|
||||||
|
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, updated)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "Failed to fetch ephemeral runner set")
|
||||||
|
|
||||||
|
updatedOriginal := updated.DeepCopy()
|
||||||
|
updated.Spec.Replicas = 0
|
||||||
|
|
||||||
|
err = k8sClient.Patch(ctx, updated, client.MergeFrom(updatedOriginal))
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "Failed to patch ephemeral runner set with 0 replicas")
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() (int, error) {
|
||||||
|
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
|
||||||
|
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
return len(runnerList.Items), nil
|
||||||
|
},
|
||||||
|
ephemeralRunnerSetTestTimeout,
|
||||||
|
ephemeralRunnerSetTestInterval,
|
||||||
|
).Should(BeEquivalentTo(1), "Failed to eventually scale down")
|
||||||
|
|
||||||
|
desiredStatus = v1alpha1.EphemeralRunnerSetStatus{
|
||||||
|
CurrentReplicas: 1,
|
||||||
|
PendingEphemeralRunners: 0,
|
||||||
|
RunningEphemeralRunners: 0,
|
||||||
|
FailedEphemeralRunners: 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() (v1alpha1.EphemeralRunnerSetStatus, error) {
|
||||||
|
updated := new(v1alpha1.EphemeralRunnerSet)
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, updated)
|
||||||
|
if err != nil {
|
||||||
|
return v1alpha1.EphemeralRunnerSetStatus{}, err
|
||||||
|
}
|
||||||
|
return updated.Status, nil
|
||||||
|
},
|
||||||
|
ephemeralRunnerSetTestTimeout,
|
||||||
|
ephemeralRunnerSetTestInterval,
|
||||||
|
).Should(BeEquivalentTo(desiredStatus), "Status is not eventually updated to the desired one")
|
||||||
|
|
||||||
|
err = k8sClient.Delete(ctx, &runnerList.Items[0])
|
||||||
|
Expect(err).To(BeNil(), "Failed to delete failed ephemeral runner")
|
||||||
|
|
||||||
|
desiredStatus = v1alpha1.EphemeralRunnerSetStatus{} // empty
|
||||||
|
Eventually(
|
||||||
|
func() (v1alpha1.EphemeralRunnerSetStatus, error) {
|
||||||
|
updated := new(v1alpha1.EphemeralRunnerSet)
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, updated)
|
||||||
|
if err != nil {
|
||||||
|
return v1alpha1.EphemeralRunnerSetStatus{}, err
|
||||||
|
}
|
||||||
|
return updated.Status, nil
|
||||||
|
},
|
||||||
|
ephemeralRunnerSetTestTimeout,
|
||||||
|
ephemeralRunnerSetTestInterval,
|
||||||
|
).Should(BeEquivalentTo(desiredStatus), "Status is not eventually updated to the desired one")
|
||||||
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -821,12 +996,13 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
|
|||||||
err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0]))
|
err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0]))
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status")
|
Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status")
|
||||||
|
|
||||||
updatedRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
|
runnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
|
||||||
err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, updatedRunnerSet)
|
err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, runnerSet)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
|
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
|
||||||
|
|
||||||
|
updatedRunnerSet := runnerSet.DeepCopy()
|
||||||
updatedRunnerSet.Spec.Replicas = 0
|
updatedRunnerSet.Spec.Replicas = 0
|
||||||
err = k8sClient.Update(ctx, updatedRunnerSet)
|
err = k8sClient.Patch(ctx, updatedRunnerSet, client.MergeFrom(runnerSet))
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
|
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
|
||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
@@ -965,12 +1141,13 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
|
|||||||
err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0]))
|
err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0]))
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status")
|
Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status")
|
||||||
|
|
||||||
updatedRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
|
currentRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
|
||||||
err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, updatedRunnerSet)
|
err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, currentRunnerSet)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
|
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
|
||||||
|
|
||||||
|
updatedRunnerSet := currentRunnerSet.DeepCopy()
|
||||||
updatedRunnerSet.Spec.Replicas = 0
|
updatedRunnerSet.Spec.Replicas = 0
|
||||||
err = k8sClient.Update(ctx, updatedRunnerSet)
|
err = k8sClient.Patch(ctx, updatedRunnerSet, client.MergeFrom(currentRunnerSet))
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
|
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
|
||||||
|
|
||||||
// wait for server to be called
|
// wait for server to be called
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
|
|
||||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||||
"github.com/actions/actions-runner-controller/build"
|
"github.com/actions/actions-runner-controller/build"
|
||||||
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/actions/actions-runner-controller/hash"
|
"github.com/actions/actions-runner-controller/hash"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
@@ -19,14 +20,90 @@ const (
|
|||||||
jitTokenKey = "jitToken"
|
jitTokenKey = "jitToken"
|
||||||
)
|
)
|
||||||
|
|
||||||
// labels applied to resources
|
var commonLabelKeys = [...]string{
|
||||||
const (
|
LabelKeyKubernetesPartOf,
|
||||||
LabelKeyAutoScaleRunnerSetName = "auto-scaling-runner-set-name"
|
LabelKeyKubernetesComponent,
|
||||||
LabelKeyAutoScaleRunnerSetNamespace = "auto-scaling-runner-set-namespace"
|
LabelKeyKubernetesVersion,
|
||||||
)
|
LabelKeyGitHubScaleSetName,
|
||||||
|
LabelKeyGitHubScaleSetNamespace,
|
||||||
|
LabelKeyGitHubEnterprise,
|
||||||
|
LabelKeyGitHubOrganization,
|
||||||
|
LabelKeyGitHubRepository,
|
||||||
|
}
|
||||||
|
|
||||||
|
const labelValueKubernetesPartOf = "gha-runner-scale-set"
|
||||||
|
|
||||||
|
// scaleSetListenerImagePullPolicy is applied to all listeners
|
||||||
|
var scaleSetListenerImagePullPolicy = DefaultScaleSetListenerImagePullPolicy
|
||||||
|
|
||||||
|
func SetListenerImagePullPolicy(pullPolicy string) bool {
|
||||||
|
switch p := corev1.PullPolicy(pullPolicy); p {
|
||||||
|
case corev1.PullAlways, corev1.PullNever, corev1.PullIfNotPresent:
|
||||||
|
scaleSetListenerImagePullPolicy = p
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type resourceBuilder struct{}
|
type resourceBuilder struct{}
|
||||||
|
|
||||||
|
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
||||||
|
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
effectiveMinRunners := 0
|
||||||
|
effectiveMaxRunners := math.MaxInt32
|
||||||
|
if autoscalingRunnerSet.Spec.MaxRunners != nil {
|
||||||
|
effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners
|
||||||
|
}
|
||||||
|
if autoscalingRunnerSet.Spec.MinRunners != nil {
|
||||||
|
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
|
||||||
|
}
|
||||||
|
|
||||||
|
githubConfig, err := actions.ParseGitHubConfigFromURL(autoscalingRunnerSet.Spec.GitHubConfigUrl)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse github config from url: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
autoscalingListener := &v1alpha1.AutoscalingListener{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: scaleSetListenerName(autoscalingRunnerSet),
|
||||||
|
Namespace: namespace,
|
||||||
|
Labels: map[string]string{
|
||||||
|
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||||
|
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||||
|
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||||
|
LabelKeyKubernetesComponent: "runner-scale-set-listener",
|
||||||
|
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||||
|
LabelKeyGitHubEnterprise: githubConfig.Enterprise,
|
||||||
|
LabelKeyGitHubOrganization: githubConfig.Organization,
|
||||||
|
LabelKeyGitHubRepository: githubConfig.Repository,
|
||||||
|
labelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: v1alpha1.AutoscalingListenerSpec{
|
||||||
|
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||||
|
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||||
|
RunnerScaleSetId: runnerScaleSetId,
|
||||||
|
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
|
||||||
|
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
|
||||||
|
EphemeralRunnerSetName: ephemeralRunnerSet.Name,
|
||||||
|
MinRunners: effectiveMinRunners,
|
||||||
|
MaxRunners: effectiveMaxRunners,
|
||||||
|
Image: image,
|
||||||
|
ImagePullPolicy: scaleSetListenerImagePullPolicy,
|
||||||
|
ImagePullSecrets: imagePullSecrets,
|
||||||
|
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||||
|
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return autoscalingListener, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
|
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
|
||||||
listenerEnv := []corev1.EnvVar{
|
listenerEnv := []corev1.EnvVar{
|
||||||
{
|
{
|
||||||
@@ -119,7 +196,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
|||||||
Name: autoscalingListenerContainerName,
|
Name: autoscalingListenerContainerName,
|
||||||
Image: autoscalingListener.Spec.Image,
|
Image: autoscalingListener.Spec.Image,
|
||||||
Env: listenerEnv,
|
Env: listenerEnv,
|
||||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
ImagePullPolicy: autoscalingListener.Spec.ImagePullPolicy,
|
||||||
Command: []string{
|
Command: []string{
|
||||||
"/github-runnerscaleset-listener",
|
"/github-runnerscaleset-listener",
|
||||||
},
|
},
|
||||||
@@ -129,6 +206,11 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
|||||||
RestartPolicy: corev1.RestartPolicyNever,
|
RestartPolicy: corev1.RestartPolicyNever,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
labels := make(map[string]string, len(autoscalingListener.Labels))
|
||||||
|
for key, val := range autoscalingListener.Labels {
|
||||||
|
labels[key] = val
|
||||||
|
}
|
||||||
|
|
||||||
newRunnerScaleSetListenerPod := &corev1.Pod{
|
newRunnerScaleSetListenerPod := &corev1.Pod{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
Kind: "Pod",
|
Kind: "Pod",
|
||||||
@@ -137,10 +219,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: autoscalingListener.Name,
|
Name: autoscalingListener.Name,
|
||||||
Namespace: autoscalingListener.Namespace,
|
Namespace: autoscalingListener.Namespace,
|
||||||
Labels: map[string]string{
|
Labels: labels,
|
||||||
LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
|
||||||
LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
Spec: podSpec,
|
Spec: podSpec,
|
||||||
}
|
}
|
||||||
@@ -148,47 +227,14 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
|||||||
return newRunnerScaleSetListenerPod
|
return newRunnerScaleSetListenerPod
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
|
|
||||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
|
||||||
|
|
||||||
newLabels := map[string]string{}
|
|
||||||
newLabels[LabelKeyRunnerSpecHash] = runnerSpecHash
|
|
||||||
|
|
||||||
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
|
|
||||||
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
|
|
||||||
Labels: newLabels,
|
|
||||||
},
|
|
||||||
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
|
||||||
Replicas: 0,
|
|
||||||
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
|
|
||||||
RunnerScaleSetId: runnerScaleSetId,
|
|
||||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
|
||||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
|
||||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
|
||||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
|
||||||
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
return newEphemeralRunnerSet, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
|
func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
|
||||||
return &corev1.ServiceAccount{
|
return &corev1.ServiceAccount{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: scaleSetListenerServiceAccountName(autoscalingListener),
|
Name: scaleSetListenerServiceAccountName(autoscalingListener),
|
||||||
Namespace: autoscalingListener.Namespace,
|
Namespace: autoscalingListener.Namespace,
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||||
LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -202,10 +248,10 @@ func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.
|
|||||||
Name: scaleSetListenerRoleName(autoscalingListener),
|
Name: scaleSetListenerRoleName(autoscalingListener),
|
||||||
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||||
LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||||
"auto-scaling-listener-namespace": autoscalingListener.Namespace,
|
labelKeyListenerNamespace: autoscalingListener.Namespace,
|
||||||
"auto-scaling-listener-name": autoscalingListener.Name,
|
labelKeyListenerName: autoscalingListener.Name,
|
||||||
"role-policy-rules-hash": rulesHash,
|
"role-policy-rules-hash": rulesHash,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -236,10 +282,10 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
|
|||||||
Name: scaleSetListenerRoleName(autoscalingListener),
|
Name: scaleSetListenerRoleName(autoscalingListener),
|
||||||
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||||
LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||||
"auto-scaling-listener-namespace": autoscalingListener.Namespace,
|
labelKeyListenerNamespace: autoscalingListener.Namespace,
|
||||||
"auto-scaling-listener-name": autoscalingListener.Name,
|
labelKeyListenerName: autoscalingListener.Name,
|
||||||
"role-binding-role-ref-hash": roleRefHash,
|
"role-binding-role-ref-hash": roleRefHash,
|
||||||
"role-binding-subject-hash": subjectHash,
|
"role-binding-subject-hash": subjectHash,
|
||||||
},
|
},
|
||||||
@@ -259,8 +305,8 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
|
|||||||
Name: scaleSetListenerSecretMirrorName(autoscalingListener),
|
Name: scaleSetListenerSecretMirrorName(autoscalingListener),
|
||||||
Namespace: autoscalingListener.Namespace,
|
Namespace: autoscalingListener.Namespace,
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||||
LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||||
"secret-data-hash": dataHash,
|
"secret-data-hash": dataHash,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -270,56 +316,79 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
|
|||||||
return newListenerSecret
|
return newListenerSecret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
|
||||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
|
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||||
|
|
||||||
effectiveMinRunners := 0
|
newLabels := map[string]string{
|
||||||
effectiveMaxRunners := math.MaxInt32
|
labelKeyRunnerSpecHash: runnerSpecHash,
|
||||||
if autoscalingRunnerSet.Spec.MaxRunners != nil {
|
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||||
effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners
|
LabelKeyKubernetesComponent: "runner-set",
|
||||||
}
|
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||||
if autoscalingRunnerSet.Spec.MinRunners != nil {
|
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||||
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
|
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||||
}
|
}
|
||||||
|
|
||||||
autoscalingListener := &v1alpha1.AutoscalingListener{
|
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, newLabels); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
newAnnotations := map[string]string{
|
||||||
|
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
|
||||||
|
}
|
||||||
|
|
||||||
|
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
|
||||||
|
TypeMeta: metav1.TypeMeta{},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: scaleSetListenerName(autoscalingRunnerSet),
|
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
|
||||||
Namespace: namespace,
|
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
|
||||||
Labels: map[string]string{
|
Labels: newLabels,
|
||||||
LabelKeyAutoScaleRunnerSetNamespace: autoscalingRunnerSet.Namespace,
|
Annotations: newAnnotations,
|
||||||
LabelKeyAutoScaleRunnerSetName: autoscalingRunnerSet.Name,
|
|
||||||
LabelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
|
|
||||||
},
|
},
|
||||||
},
|
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
||||||
Spec: v1alpha1.AutoscalingListenerSpec{
|
Replicas: 0,
|
||||||
|
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
|
||||||
|
RunnerScaleSetId: runnerScaleSetId,
|
||||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||||
RunnerScaleSetId: runnerScaleSetId,
|
|
||||||
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
|
|
||||||
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
|
|
||||||
EphemeralRunnerSetName: ephemeralRunnerSet.Name,
|
|
||||||
MinRunners: effectiveMinRunners,
|
|
||||||
MaxRunners: effectiveMaxRunners,
|
|
||||||
Image: image,
|
|
||||||
ImagePullSecrets: imagePullSecrets,
|
|
||||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||||
|
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
return autoscalingListener, nil
|
return newEphemeralRunnerSet, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
|
func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
|
||||||
|
labels := make(map[string]string)
|
||||||
|
for _, key := range commonLabelKeys {
|
||||||
|
switch key {
|
||||||
|
case LabelKeyKubernetesComponent:
|
||||||
|
labels[key] = "runner"
|
||||||
|
default:
|
||||||
|
v, ok := ephemeralRunnerSet.Labels[key]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
labels[key] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
annotations := make(map[string]string)
|
||||||
|
for key, val := range ephemeralRunnerSet.Annotations {
|
||||||
|
annotations[key] = val
|
||||||
|
}
|
||||||
return &v1alpha1.EphemeralRunner{
|
return &v1alpha1.EphemeralRunner{
|
||||||
TypeMeta: metav1.TypeMeta{},
|
TypeMeta: metav1.TypeMeta{},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
GenerateName: ephemeralRunnerSet.Name + "-runner-",
|
GenerateName: ephemeralRunnerSet.Name + "-runner-",
|
||||||
Namespace: ephemeralRunnerSet.Namespace,
|
Namespace: ephemeralRunnerSet.Namespace,
|
||||||
|
Labels: labels,
|
||||||
|
Annotations: annotations,
|
||||||
},
|
},
|
||||||
Spec: ephemeralRunnerSet.Spec.EphemeralRunnerSpec,
|
Spec: ephemeralRunnerSet.Spec.EphemeralRunnerSpec,
|
||||||
}
|
}
|
||||||
@@ -337,6 +406,7 @@ func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
|
|||||||
for k, v := range runner.Spec.PodTemplateSpec.Labels {
|
for k, v := range runner.Spec.PodTemplateSpec.Labels {
|
||||||
labels[k] = v
|
labels[k] = v
|
||||||
}
|
}
|
||||||
|
labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue)
|
||||||
|
|
||||||
for k, v := range runner.ObjectMeta.Annotations {
|
for k, v := range runner.ObjectMeta.Annotations {
|
||||||
annotations[k] = v
|
annotations[k] = v
|
||||||
@@ -352,8 +422,6 @@ func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
|
|||||||
runner.Status.RunnerJITConfig,
|
runner.Status.RunnerJITConfig,
|
||||||
)
|
)
|
||||||
|
|
||||||
labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue)
|
|
||||||
|
|
||||||
objectMeta := metav1.ObjectMeta{
|
objectMeta := metav1.ObjectMeta{
|
||||||
Name: runner.ObjectMeta.Name,
|
Name: runner.ObjectMeta.Name,
|
||||||
Namespace: runner.ObjectMeta.Namespace,
|
Namespace: runner.ObjectMeta.Namespace,
|
||||||
@@ -469,3 +537,22 @@ func rulesForListenerRole(resourceNames []string) []rbacv1.PolicyRule {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func applyGitHubURLLabels(url string, labels map[string]string) error {
|
||||||
|
githubConfig, err := actions.ParseGitHubConfigFromURL(url)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to parse github config from url: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(githubConfig.Enterprise) > 0 {
|
||||||
|
labels[LabelKeyGitHubEnterprise] = githubConfig.Enterprise
|
||||||
|
}
|
||||||
|
if len(githubConfig.Organization) > 0 {
|
||||||
|
labels[LabelKeyGitHubOrganization] = githubConfig.Organization
|
||||||
|
}
|
||||||
|
if len(githubConfig.Repository) > 0 {
|
||||||
|
labels[LabelKeyGitHubRepository] = githubConfig.Repository
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user