mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 11:41:27 +00:00
Compare commits
256 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
52fc819339 | ||
|
|
215b245881 | ||
|
|
a3df23b07c | ||
|
|
f5c69654e7 | ||
|
|
abc0b678d3 | ||
|
|
963ab2a748 | ||
|
|
8a41a596b6 | ||
|
|
e10c437f46 | ||
|
|
a0a3916c80 | ||
|
|
1c360d7e26 | ||
|
|
20bb860a37 | ||
|
|
6a75bc0880 | ||
|
|
78271000c0 | ||
|
|
a36b0e58b0 | ||
|
|
336e11a4e9 | ||
|
|
dcb64f0b9e | ||
|
|
0dadfc4d37 | ||
|
|
dc58f6ba13 | ||
|
|
06cbd632b8 | ||
|
|
9f33ae1507 | ||
|
|
63a6b5a7f0 | ||
|
|
fddc5bf1c8 | ||
|
|
d90ce2bed5 | ||
|
|
cd996e7c27 | ||
|
|
297442975e | ||
|
|
5271f316e6 | ||
|
|
9845a934f4 | ||
|
|
e0a7e142e0 | ||
|
|
f9a11a8b0b | ||
|
|
fde1893494 | ||
|
|
6fe8008640 | ||
|
|
2fee26ddce | ||
|
|
685f7162a4 | ||
|
|
d134dee14b | ||
|
|
c33ce998f4 | ||
|
|
78a93566af | ||
|
|
81dea9b3dc | ||
|
|
7ca3df3605 | ||
|
|
2343cd2d7b | ||
|
|
cf18cb3fb0 | ||
|
|
ae8b27a9a3 | ||
|
|
58ee5e8c4e | ||
|
|
fade63a663 | ||
|
|
ac4056f85b | ||
|
|
462d044604 | ||
|
|
94934819c4 | ||
|
|
aac811f210 | ||
|
|
e7ec736738 | ||
|
|
90ea691e72 | ||
|
|
32a653c0ca | ||
|
|
c7b2dd1764 | ||
|
|
80af7fc125 | ||
|
|
34909f0cf1 | ||
|
|
8afef51c8b | ||
|
|
032443fcfd | ||
|
|
91c8991835 | ||
|
|
c5ebe750dc | ||
|
|
34fdbf1231 | ||
|
|
44e9b7d8eb | ||
|
|
7ab516fdab | ||
|
|
e571df52b5 | ||
|
|
706ec17bf4 | ||
|
|
30355f742b | ||
|
|
8a5fb6ccb7 | ||
|
|
e930ba6e98 | ||
|
|
5ba3805a3f | ||
|
|
f798cddca1 | ||
|
|
367ee46122 | ||
|
|
f4a318fca6 | ||
|
|
4ee21cb24b | ||
|
|
102c9e1afa | ||
|
|
73e676f951 | ||
|
|
41ebb43c65 | ||
|
|
aa50b62c01 | ||
|
|
942f773fef | ||
|
|
21722a5de8 | ||
|
|
a2d4b95b79 | ||
|
|
04fb9f4fa1 | ||
|
|
8304b80955 | ||
|
|
9bd4025e9c | ||
|
|
94c089c407 | ||
|
|
9859bbc7f2 | ||
|
|
c1e2c4ef9d | ||
|
|
2ee15dbca3 | ||
|
|
a4cf626410 | ||
|
|
58f4b6ff2d | ||
|
|
22fbd10bd3 | ||
|
|
52b97139b6 | ||
|
|
3e0bc3f7be | ||
|
|
ba1ac0990b | ||
|
|
76fe43e8e0 | ||
|
|
8869ad28bb | ||
|
|
b86af190f7 | ||
|
|
1a491cbfe5 | ||
|
|
087f20fd5d | ||
|
|
a880114e57 | ||
|
|
e80bc21fa5 | ||
|
|
56754094ea | ||
|
|
8fa4520376 | ||
|
|
a804bf8b00 | ||
|
|
5dea6db412 | ||
|
|
2a0b770a63 | ||
|
|
a7ef871248 | ||
|
|
e45e4c53f1 | ||
|
|
a608abd124 | ||
|
|
02d9add322 | ||
|
|
f5ac134787 | ||
|
|
42abad5def | ||
|
|
514b7da742 | ||
|
|
c8e3bb5ec3 | ||
|
|
878c9b8b49 | ||
|
|
4536707af6 | ||
|
|
13802c5a6d | ||
|
|
362fa5d52e | ||
|
|
65184f1ed8 | ||
|
|
c23e31123c | ||
|
|
56e1c62ac2 | ||
|
|
64cedff2b4 | ||
|
|
37f93b794e | ||
|
|
dc833e57a0 | ||
|
|
5228aded87 | ||
|
|
f49d08e4bc | ||
|
|
064039afc0 | ||
|
|
e5d8d65396 | ||
|
|
c465ace8fb | ||
|
|
34f3878829 | ||
|
|
44c3931d8e | ||
|
|
08acb1b831 | ||
|
|
40811ebe0e | ||
|
|
3417c5a3a8 | ||
|
|
172faa883c | ||
|
|
9e6c7d019f | ||
|
|
9fbcafa703 | ||
|
|
2bf83d0d7f | ||
|
|
19d30dea5f | ||
|
|
6c66c1633f | ||
|
|
e55708588b | ||
|
|
261d4371b5 | ||
|
|
bd9f32e354 | ||
|
|
babbfc77d5 | ||
|
|
322df79617 | ||
|
|
1c7c6639ed | ||
|
|
bcaac39a2e | ||
|
|
af625dd1cb | ||
|
|
44969659df | ||
|
|
a5f98dea75 | ||
|
|
1d24d3b00d | ||
|
|
9994d3aa60 | ||
|
|
a2ea12e93c | ||
|
|
d7b589bed5 | ||
|
|
4f293c6f79 | ||
|
|
c569304271 | ||
|
|
068f987238 | ||
|
|
a462ecbe79 | ||
|
|
c5d6842d5f | ||
|
|
947bc8ab5b | ||
|
|
9d5c6e85c5 | ||
|
|
2420a40c02 | ||
|
|
b3e7c723d2 | ||
|
|
2e36db52c3 | ||
|
|
5d41609bea | ||
|
|
e289fe43d4 | ||
|
|
91fddca3f7 | ||
|
|
befe4cee0a | ||
|
|
548acdf05c | ||
|
|
41f2ca3ed9 | ||
|
|
00996ec799 | ||
|
|
893833fdd5 | ||
|
|
7f3eef8761 | ||
|
|
40c905f25d | ||
|
|
2984de912c | ||
|
|
1df06a69d7 | ||
|
|
be47190d4c | ||
|
|
e8d8c6f357 | ||
|
|
0c091f59b6 | ||
|
|
a4751b74e0 | ||
|
|
adad3d5530 | ||
|
|
70156e3fea | ||
|
|
69abd51f30 | ||
|
|
73e35b1dc6 | ||
|
|
c4178d5633 | ||
|
|
edf924106b | ||
|
|
34ebbf74d1 | ||
|
|
a9af82ec78 | ||
|
|
b5e9e14244 | ||
|
|
910269aa11 | ||
|
|
149cf47c83 | ||
|
|
ec3afef00d | ||
|
|
7d0918b6d5 | ||
|
|
678eafcd67 | ||
|
|
b6515fe25c | ||
|
|
1c7b7f467d | ||
|
|
73e22a1756 | ||
|
|
9b44f0051c | ||
|
|
6b4250ca90 | ||
|
|
ced88228fc | ||
|
|
44c06c21ce | ||
|
|
4103fe35df | ||
|
|
a44fe04bef | ||
|
|
274d0c874e | ||
|
|
256e08eb45 | ||
|
|
f677fd5872 | ||
|
|
d9627141dc | ||
|
|
3886f285f8 | ||
|
|
dab900462b | ||
|
|
dd8ec1a055 | ||
|
|
8e52a6d2cf | ||
|
|
9990243520 | ||
|
|
08919814b1 | ||
|
|
facae69e0b | ||
|
|
8f62e35f6b | ||
|
|
55951c2bdb | ||
|
|
c4297d25bb | ||
|
|
0774f0680c | ||
|
|
92ab11b4d2 | ||
|
|
7414dc6568 | ||
|
|
34efb9d585 | ||
|
|
fbad56197f | ||
|
|
a5cef7e47b | ||
|
|
1f4fe4681e | ||
|
|
067686c684 | ||
|
|
df12e00c9e | ||
|
|
cc26593a9b | ||
|
|
835eac7835 | ||
|
|
01e9dd31a9 | ||
|
|
803818162c | ||
|
|
219ba5b477 | ||
|
|
b09e3a2dc9 | ||
|
|
7ea60e497c | ||
|
|
c8918f5a7b | ||
|
|
d57d17f161 | ||
|
|
6e69c75637 | ||
|
|
882bfab569 | ||
|
|
3327f620fb | ||
|
|
282f2dd09c | ||
|
|
d67f80863e | ||
|
|
4932412cd6 | ||
|
|
6da1cde09c | ||
|
|
f9bae708c2 | ||
|
|
05a3908ba6 | ||
|
|
606ed1b28e | ||
|
|
de244a17be | ||
|
|
5be307ec62 | ||
|
|
ee71ff14bd | ||
|
|
9e93c7ee54 | ||
|
|
bb61bb1342 | ||
|
|
23fdca4786 | ||
|
|
211bacaf1e | ||
|
|
0324658a3f | ||
|
|
c4d3cff3df | ||
|
|
294bd75cf1 | ||
|
|
068a427c52 | ||
|
|
622eaa34f8 | ||
|
|
619667fc3b | ||
|
|
3e88ae2d38 | ||
|
|
360957cfbc |
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.png filter=lfs diff=lfs merge=lfs -text
|
||||
202
.github/actions/execute-assert-arc-e2e/action.yaml
vendored
Normal file
202
.github/actions/execute-assert-arc-e2e/action.yaml
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
name: 'Execute and Assert ARC E2E Test Action'
|
||||
description: 'Queue E2E test workflow and assert workflow run result to be succeed'
|
||||
|
||||
inputs:
|
||||
auth-token:
|
||||
description: 'GitHub access token to queue workflow run'
|
||||
required: true
|
||||
repo-owner:
|
||||
description: "The repository owner name that has the test workflow file, ex: actions"
|
||||
required: true
|
||||
repo-name:
|
||||
description: "The repository name that has the test workflow file, ex: test"
|
||||
required: true
|
||||
workflow-file:
|
||||
description: 'The file name of the workflow yaml, ex: test.yml'
|
||||
required: true
|
||||
arc-name:
|
||||
description: 'The name of the configured gha-runner-scale-set'
|
||||
required: true
|
||||
arc-namespace:
|
||||
description: 'The namespace of the configured gha-runner-scale-set'
|
||||
required: true
|
||||
arc-controller-namespace:
|
||||
description: 'The namespace of the configured gha-runner-scale-set-controller'
|
||||
required: true
|
||||
wait-to-finish:
|
||||
description: 'Wait for the workflow run to finish'
|
||||
required: true
|
||||
default: "true"
|
||||
wait-to-running:
|
||||
description: 'Wait for the workflow run to start running'
|
||||
required: true
|
||||
default: "false"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Queue test workflow
|
||||
shell: bash
|
||||
id: queue_workflow
|
||||
run: |
|
||||
queue_time=`date +%FT%TZ`
|
||||
echo "queue_time=$queue_time" >> $GITHUB_OUTPUT
|
||||
curl -X POST https://api.github.com/repos/${{inputs.repo-owner}}/${{inputs.repo-name}}/actions/workflows/${{inputs.workflow-file}}/dispatches \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
-H "Authorization: token ${{inputs.auth-token}}" \
|
||||
-d '{"ref": "main", "inputs": { "arc_name": "${{inputs.arc-name}}" } }'
|
||||
|
||||
- name: Fetch workflow run & job ids
|
||||
uses: actions/github-script@v6
|
||||
id: query_workflow
|
||||
with:
|
||||
script: |
|
||||
// Try to find the workflow run triggered by the previous step using the workflow_dispatch event.
|
||||
// - Find recently create workflow runs in the test repository
|
||||
// - For each workflow run, list its workflow job and see if the job's labels contain `inputs.arc-name`
|
||||
// - Since the inputs.arc-name should be unique per e2e workflow run, once we find the job with the label, we find the workflow that we just triggered.
|
||||
function sleep(ms) {
|
||||
return new Promise(resolve => setTimeout(resolve, ms))
|
||||
}
|
||||
const owner = '${{inputs.repo-owner}}'
|
||||
const repo = '${{inputs.repo-name}}'
|
||||
const workflow_id = '${{inputs.workflow-file}}'
|
||||
let workflow_run_id = 0
|
||||
let workflow_job_id = 0
|
||||
let workflow_run_html_url = ""
|
||||
let count = 0
|
||||
while (count++<12) {
|
||||
await sleep(10 * 1000);
|
||||
let listRunResponse = await github.rest.actions.listWorkflowRuns({
|
||||
owner: owner,
|
||||
repo: repo,
|
||||
workflow_id: workflow_id,
|
||||
created: '>${{steps.queue_workflow.outputs.queue_time}}'
|
||||
})
|
||||
if (listRunResponse.data.total_count > 0) {
|
||||
console.log(`Found some new workflow runs for ${workflow_id}`)
|
||||
for (let i = 0; i<listRunResponse.data.total_count; i++) {
|
||||
let workflowRun = listRunResponse.data.workflow_runs[i]
|
||||
console.log(`Check if workflow run ${workflowRun.id} is triggered by us.`)
|
||||
let listJobResponse = await github.rest.actions.listJobsForWorkflowRun({
|
||||
owner: owner,
|
||||
repo: repo,
|
||||
run_id: workflowRun.id
|
||||
})
|
||||
console.log(`Workflow run ${workflowRun.id} has ${listJobResponse.data.total_count} jobs.`)
|
||||
if (listJobResponse.data.total_count > 0) {
|
||||
for (let j = 0; j<listJobResponse.data.total_count; j++) {
|
||||
let workflowJob = listJobResponse.data.jobs[j]
|
||||
console.log(`Check if workflow job ${workflowJob.id} is triggered by us.`)
|
||||
console.log(JSON.stringify(workflowJob.labels));
|
||||
if (workflowJob.labels.includes('${{inputs.arc-name}}')) {
|
||||
console.log(`Workflow job ${workflowJob.id} (Run id: ${workflowJob.run_id}) is triggered by us.`)
|
||||
workflow_run_id = workflowJob.run_id
|
||||
workflow_job_id = workflowJob.id
|
||||
workflow_run_html_url = workflowRun.html_url
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (workflow_job_id > 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (workflow_job_id > 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (workflow_job_id == 0) {
|
||||
core.setFailed(`Can't find workflow run and workflow job triggered to 'runs-on ${{inputs.arc-name}}'`)
|
||||
} else {
|
||||
core.setOutput('workflow_run', workflow_run_id);
|
||||
core.setOutput('workflow_job', workflow_job_id);
|
||||
core.setOutput('workflow_run_url', workflow_run_html_url);
|
||||
}
|
||||
|
||||
- name: Generate summary about the triggered workflow run
|
||||
shell: bash
|
||||
run: |
|
||||
cat <<-EOF > $GITHUB_STEP_SUMMARY
|
||||
| **Triggered workflow run** |
|
||||
|:--------------------------:|
|
||||
| ${{steps.query_workflow.outputs.workflow_run_url}} |
|
||||
EOF
|
||||
|
||||
- name: Wait for workflow to start running
|
||||
if: inputs.wait-to-running == 'true' && inputs.wait-to-finish == 'false'
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
function sleep(ms) {
|
||||
return new Promise(resolve => setTimeout(resolve, ms))
|
||||
}
|
||||
const owner = '${{inputs.repo-owner}}'
|
||||
const repo = '${{inputs.repo-name}}'
|
||||
const workflow_run_id = ${{steps.query_workflow.outputs.workflow_run}}
|
||||
const workflow_job_id = ${{steps.query_workflow.outputs.workflow_job}}
|
||||
let count = 0
|
||||
while (count++<10) {
|
||||
await sleep(30 * 1000);
|
||||
let getRunResponse = await github.rest.actions.getWorkflowRun({
|
||||
owner: owner,
|
||||
repo: repo,
|
||||
run_id: workflow_run_id
|
||||
})
|
||||
console.log(`${getRunResponse.data.html_url}: ${getRunResponse.data.status} (${getRunResponse.data.conclusion})`);
|
||||
if (getRunResponse.data.status == 'in_progress') {
|
||||
console.log(`Workflow run is in progress.`)
|
||||
return
|
||||
}
|
||||
}
|
||||
core.setFailed(`The triggered workflow run didn't start properly using ${{inputs.arc-name}}`)
|
||||
|
||||
- name: Wait for workflow to finish successfully
|
||||
if: inputs.wait-to-finish == 'true'
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
// Wait 5 minutes and make sure the workflow run we triggered completed with result 'success'
|
||||
function sleep(ms) {
|
||||
return new Promise(resolve => setTimeout(resolve, ms))
|
||||
}
|
||||
const owner = '${{inputs.repo-owner}}'
|
||||
const repo = '${{inputs.repo-name}}'
|
||||
const workflow_run_id = ${{steps.query_workflow.outputs.workflow_run}}
|
||||
const workflow_job_id = ${{steps.query_workflow.outputs.workflow_job}}
|
||||
let count = 0
|
||||
while (count++<10) {
|
||||
await sleep(30 * 1000);
|
||||
let getRunResponse = await github.rest.actions.getWorkflowRun({
|
||||
owner: owner,
|
||||
repo: repo,
|
||||
run_id: workflow_run_id
|
||||
})
|
||||
console.log(`${getRunResponse.data.html_url}: ${getRunResponse.data.status} (${getRunResponse.data.conclusion})`);
|
||||
if (getRunResponse.data.status == 'completed') {
|
||||
if ( getRunResponse.data.conclusion == 'success') {
|
||||
console.log(`Workflow run finished properly.`)
|
||||
return
|
||||
} else {
|
||||
core.setFailed(`The triggered workflow run finish with result ${getRunResponse.data.conclusion}`)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
core.setFailed(`The triggered workflow run didn't finish properly using ${{inputs.arc-name}}`)
|
||||
|
||||
- name: cleanup
|
||||
if: inputs.wait-to-finish == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
|
||||
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-name}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
|
||||
|
||||
- name: Gather logs and cleanup
|
||||
shell: bash
|
||||
if: always()
|
||||
run: |
|
||||
kubectl logs deployment/arc-gha-rs-controller -n ${{inputs.arc-controller-namespace}}
|
||||
63
.github/actions/setup-arc-e2e/action.yaml
vendored
Normal file
63
.github/actions/setup-arc-e2e/action.yaml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
name: 'Setup ARC E2E Test Action'
|
||||
description: 'Build controller image, create kind cluster, load the image, and exchange ARC configure token.'
|
||||
|
||||
inputs:
|
||||
app-id:
|
||||
description: 'GitHub App Id for exchange access token'
|
||||
required: true
|
||||
app-pk:
|
||||
description: "GitHub App private key for exchange access token"
|
||||
required: true
|
||||
image-name:
|
||||
description: "Local docker image name for building"
|
||||
required: true
|
||||
image-tag:
|
||||
description: "Tag of ARC Docker image for building"
|
||||
required: true
|
||||
target-org:
|
||||
description: "The test organization for ARC e2e test"
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
token:
|
||||
description: 'Token to use for configure ARC'
|
||||
value: ${{steps.config-token.outputs.token}}
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||
# BuildKit v0.11 which has a bug causing intermittent
|
||||
# failures pushing images to GHCR
|
||||
version: v0.9.1
|
||||
driver-opts: image=moby/buildkit:v0.10.6
|
||||
|
||||
- name: Build controller image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64
|
||||
load: true
|
||||
build-args: |
|
||||
DOCKER_IMAGE_NAME=${{inputs.image-name}}
|
||||
VERSION=${{inputs.image-tag}}
|
||||
tags: |
|
||||
${{inputs.image-name}}:${{inputs.image-tag}}
|
||||
no-cache: true
|
||||
|
||||
- name: Create minikube cluster and load image
|
||||
shell: bash
|
||||
run: |
|
||||
minikube start
|
||||
minikube image load ${{inputs.image-name}}:${{inputs.image-tag}}
|
||||
|
||||
- name: Get configure token
|
||||
id: config-token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
with:
|
||||
application_id: ${{ inputs.app-id }}
|
||||
application_private_key: ${{ inputs.app-pk }}
|
||||
organization: ${{ inputs.target-org}}
|
||||
43
.github/renovate.json5
vendored
43
.github/renovate.json5
vendored
@@ -1,43 +0,0 @@
|
||||
{
|
||||
"extends": ["config:base"],
|
||||
"labels": ["dependencies"],
|
||||
"packageRules": [
|
||||
{
|
||||
// automatically merge an update of runner
|
||||
"matchPackageNames": ["actions/runner"],
|
||||
"extractVersion": "^v(?<version>.*)$",
|
||||
"automerge": true
|
||||
}
|
||||
],
|
||||
"regexManagers": [
|
||||
{
|
||||
// use https://github.com/actions/runner/releases
|
||||
"fileMatch": [
|
||||
".github/workflows/runners.yaml"
|
||||
],
|
||||
"matchStrings": ["RUNNER_VERSION: +(?<currentValue>.*?)\\n"],
|
||||
"depNameTemplate": "actions/runner",
|
||||
"datasourceTemplate": "github-releases"
|
||||
},
|
||||
{
|
||||
"fileMatch": [
|
||||
"runner/Makefile",
|
||||
"Makefile"
|
||||
],
|
||||
"matchStrings": ["RUNNER_VERSION \\?= +(?<currentValue>.*?)\\n"],
|
||||
"depNameTemplate": "actions/runner",
|
||||
"datasourceTemplate": "github-releases"
|
||||
},
|
||||
{
|
||||
"fileMatch": [
|
||||
"runner/actions-runner.ubuntu-20.04.dockerfile",
|
||||
"runner/actions-runner.ubuntu-22.04.dockerfile",
|
||||
"runner/actions-runner-dind.ubuntu-20.04.dockerfile",
|
||||
"runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile"
|
||||
],
|
||||
"matchStrings": ["RUNNER_VERSION=+(?<currentValue>.*?)\\n"],
|
||||
"depNameTemplate": "actions/runner",
|
||||
"datasourceTemplate": "github-releases"
|
||||
}
|
||||
]
|
||||
}
|
||||
212
.github/workflows/arc-publish-chart.yaml
vendored
Normal file
212
.github/workflows/arc-publish-chart.yaml
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
name: Publish ARC Helm Charts
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/arc-publish-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!charts/gha-runner-scale-set-controller/**'
|
||||
- '!charts/gha-runner-scale-set/**'
|
||||
- '!**.md'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
force:
|
||||
description: 'Force publish even if the chart version is not bumped'
|
||||
type: boolean
|
||||
required: true
|
||||
default: false
|
||||
|
||||
env:
|
||||
KUBE_SCORE_VERSION: 1.10.0
|
||||
HELM_VERSION: v3.8.0
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
lint-chart:
|
||||
name: Lint Chart
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v3.4
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
- name: Set up kube-score
|
||||
run: |
|
||||
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
|
||||
chmod 755 kube-score
|
||||
|
||||
- name: Kube-score generated manifests
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.3.1
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "::set-output name=changed::true"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: |
|
||||
ct lint --config charts/.ci/ct-config.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
uses: helm/kind-action@v1.4.0
|
||||
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
- name: Install cert-manager
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct install --config charts/.ci/ct-config.yaml
|
||||
|
||||
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
|
||||
- name: Check if Chart Publish is Needed
|
||||
id: publish-chart-step
|
||||
run: |
|
||||
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
|
||||
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
|
||||
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
|
||||
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
|
||||
|
||||
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
|
||||
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
|
||||
|
||||
# Always publish if force is true
|
||||
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
|
||||
echo "publish=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "publish=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
publish-chart:
|
||||
if: needs.lint-chart.outputs.publish-chart == 'true'
|
||||
needs: lint-chart
|
||||
name: Publish Chart
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write # for helm/chart-releaser-action to push chart release and create a release
|
||||
env:
|
||||
CHART_TARGET_ORG: actions-runner-controller
|
||||
CHART_TARGET_REPO: actions-runner-controller.github.io
|
||||
CHART_TARGET_BRANCH: master
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
organization: ${{ env.CHART_TARGET_ORG }}
|
||||
|
||||
- name: Install chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.4.1
|
||||
with:
|
||||
install_only: true
|
||||
install_dir: ${{ github.workspace }}/bin
|
||||
|
||||
- name: Package and upload release assets
|
||||
run: |
|
||||
cr package \
|
||||
${{ github.workspace }}/charts/actions-runner-controller/ \
|
||||
--package-path .cr-release-packages
|
||||
|
||||
cr upload \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--package-path .cr-release-packages \
|
||||
--token ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate updated index.yaml
|
||||
run: |
|
||||
cr index \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--index-path ${{ github.workspace }}/index.yaml \
|
||||
--token ${{ secrets.GITHUB_TOKEN }} \
|
||||
--push \
|
||||
--pages-branch 'gh-pages' \
|
||||
--pages-index-path 'index.yaml'
|
||||
|
||||
# Chart Release was never intended to publish to a different repo
|
||||
# this workaround is intended to move the index.yaml to the target repo
|
||||
# where the github pages are hosted
|
||||
- name: Checkout target repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||
path: ${{ env.CHART_TARGET_REPO }}
|
||||
ref: ${{ env.CHART_TARGET_BRANCH }}
|
||||
token: ${{ steps.get_workflow_token.outputs.token }}
|
||||
|
||||
- name: Copy index.yaml
|
||||
run: |
|
||||
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
|
||||
|
||||
- name: Commit and push to target repository
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
git add .
|
||||
git commit -m "Update index.yaml"
|
||||
git push
|
||||
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Publish ARC
|
||||
name: Publish ARC Image
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
@@ -25,10 +25,18 @@ env:
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_REPO: actions-runner-controller
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
release-controller:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
# gha-runner-scale-set has its own release workflow.
|
||||
# We don't want to publish a new actions-runner-controller image
|
||||
# we release gha-runner-scale-set.
|
||||
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
@@ -1,33 +1,42 @@
|
||||
name: Runners
|
||||
name: Release ARC Runner Images
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
on:
|
||||
# We must do a trigger on a push: instead of a types: closed so GitHub Secrets
|
||||
# We must do a trigger on a push: instead of a types: closed so GitHub Secrets
|
||||
# are available to the workflow run
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- 'runner/**'
|
||||
- '!runner/Makefile'
|
||||
- '.github/workflows/runners.yaml'
|
||||
- '!**.md'
|
||||
- 'runner/VERSION'
|
||||
- '.github/workflows/arc-release-runners.yaml'
|
||||
|
||||
env:
|
||||
# Safeguard to prevent pushing images to registeries after build
|
||||
# Safeguard to prevent pushing images to registeries after build
|
||||
PUSH_TO_REGISTRIES: true
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_WORKFLOW: release-runners.yaml
|
||||
RUNNER_VERSION: 2.300.2
|
||||
DOCKER_VERSION: 20.10.21
|
||||
RUNNER_CONTAINER_HOOKS_VERSION: 0.2.0
|
||||
DOCKER_VERSION: 20.10.23
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-runners:
|
||||
name: Trigger Build and Push of Runner Images
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Get runner version
|
||||
id: versions
|
||||
run: |
|
||||
runner_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))"
|
||||
container_hooks_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))"
|
||||
echo runner_version=$runner_current_version >> $GITHUB_OUTPUT
|
||||
echo container_hooks_version=$container_hooks_current_version >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
@@ -37,6 +46,9 @@ jobs:
|
||||
organization: ${{ env.TARGET_ORG }}
|
||||
|
||||
- name: Trigger Build And Push Runner Images To Registries
|
||||
env:
|
||||
RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }}
|
||||
CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }}
|
||||
run: |
|
||||
# Authenticate
|
||||
gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
|
||||
@@ -45,18 +57,21 @@ jobs:
|
||||
gh workflow run ${{ env.TARGET_WORKFLOW }} -R ${{ env.TARGET_ORG }}/releases \
|
||||
-f runner_version=${{ env.RUNNER_VERSION }} \
|
||||
-f docker_version=${{ env.DOCKER_VERSION }} \
|
||||
-f runner_container_hooks_version=${{ env.RUNNER_CONTAINER_HOOKS_VERSION }} \
|
||||
-f runner_container_hooks_version=${{ env.CONTAINER_HOOKS_VERSION }} \
|
||||
-f sha='${{ github.sha }}' \
|
||||
-f push_to_registries=${{ env.PUSH_TO_REGISTRIES }}
|
||||
|
||||
- name: Job summary
|
||||
env:
|
||||
RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }}
|
||||
CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }}
|
||||
run: |
|
||||
echo "The [release-runners.yaml](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/release-runners.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- runner_version: ${{ env.RUNNER_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- docker_version: ${{ env.DOCKER_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- runner_container_hooks_version: ${{ env.RUNNER_CONTAINER_HOOKS_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- runner_container_hooks_version: ${{ env.CONTAINER_HOOKS_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- push_to_registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
149
.github/workflows/arc-update-runners-scheduled.yaml
vendored
Normal file
149
.github/workflows/arc-update-runners-scheduled.yaml
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
# This workflows polls releases from actions/runner and in case of a new one it
|
||||
# updates files containing runner version and opens a pull request.
|
||||
name: Runner Updates Check (Scheduled Job)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# run daily
|
||||
- cron: "0 9 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
# check_versions compares our current version and the latest available runner
|
||||
# version and sets them as outputs.
|
||||
check_versions:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
outputs:
|
||||
runner_current_version: ${{ steps.runner_versions.outputs.runner_current_version }}
|
||||
runner_latest_version: ${{ steps.runner_versions.outputs.runner_latest_version }}
|
||||
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
|
||||
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get runner current and latest versions
|
||||
id: runner_versions
|
||||
run: |
|
||||
CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))"
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo runner_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner | grep -oP '(?<=v)[0-9.]+' | head -1)
|
||||
echo "Latest version: $LATEST_VERSION"
|
||||
echo runner_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get container-hooks current and latest versions
|
||||
id: container_hooks_versions
|
||||
run: |
|
||||
CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))"
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo container_hooks_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner-container-hooks | grep -oP '(?<=v)[0-9.]+' | head -1)
|
||||
echo "Latest version: $LATEST_VERSION"
|
||||
echo container_hooks_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
# check_pr checks if a PR for the same update already exists. It only runs if
|
||||
# runner latest version != our current version. If no existing PR is found,
|
||||
# it sets a PR name as output.
|
||||
check_pr:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check_versions
|
||||
if: needs.check_versions.outputs.runner_current_version != needs.check_versions.outputs.runner_latest_version || needs.check_versions.outputs.container_hooks_current_version != needs.check_versions.outputs.container_hooks_latest_version
|
||||
outputs:
|
||||
pr_name: ${{ steps.pr_name.outputs.pr_name }}
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
steps:
|
||||
- name: debug
|
||||
run:
|
||||
echo "RUNNER_CURRENT_VERSION=${{ needs.check_versions.outputs.runner_current_version }}"
|
||||
echo "RUNNER_LATEST_VERSION=${{ needs.check_versions.outputs.runner_latest_version }}"
|
||||
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
|
||||
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: PR Name
|
||||
id: pr_name
|
||||
env:
|
||||
RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }}
|
||||
RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }}
|
||||
CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }}
|
||||
CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }}
|
||||
# Generate a PR name with the following title:
|
||||
# Updates: runner to v2.304.0 and container-hooks to v0.3.1
|
||||
run: |
|
||||
RUNNER_MESSAGE="runner to v${RUNNER_LATEST_VERSION}"
|
||||
CONTAINER_HOOKS_MESSAGE="container-hooks to v${CONTAINER_HOOKS_LATEST_VERSION}"
|
||||
|
||||
PR_NAME="Updates:"
|
||||
if [ "$RUNNER_CURRENT_VERSION" != "$RUNNER_LATEST_VERSION" ]
|
||||
then
|
||||
PR_NAME="$PR_NAME $RUNNER_MESSAGE"
|
||||
fi
|
||||
if [ "$CONTAINER_HOOKS_CURRENT_VERSION" != "$CONTAINER_HOOKS_LATEST_VERSION" ]
|
||||
then
|
||||
PR_NAME="$PR_NAME $CONTAINER_HOOKS_MESSAGE"
|
||||
fi
|
||||
|
||||
result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1)
|
||||
if [ -z "$result" ]
|
||||
then
|
||||
echo "No existing PRs found, setting output with pr_name=$PR_NAME"
|
||||
echo pr_name=$PR_NAME >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Found a PR with title '$PR_NAME' already existing: ${{ github.server_url }}/${{ github.repository }}/pull/$result"
|
||||
fi
|
||||
|
||||
# update_version updates runner version in the files listed below, commits
|
||||
# the changes and opens a pull request as `github-actions` bot.
|
||||
update_version:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- check_versions
|
||||
- check_pr
|
||||
if: needs.check_pr.outputs.pr_name
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
actions: write
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }}
|
||||
RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }}
|
||||
CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }}
|
||||
CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }}
|
||||
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: New branch
|
||||
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"
|
||||
|
||||
- name: Update files
|
||||
run: |
|
||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/VERSION
|
||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/Makefile
|
||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" Makefile
|
||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" test/e2e/e2e_test.go
|
||||
|
||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/VERSION
|
||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/Makefile
|
||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" Makefile
|
||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" test/e2e/e2e_test.go
|
||||
|
||||
- name: Commit changes
|
||||
run: |
|
||||
# from https://github.com/orgs/community/discussions/26560
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git config user.name "github-actions[bot]"
|
||||
git add .
|
||||
git commit -m "$PR_NAME"
|
||||
git push -u origin HEAD
|
||||
|
||||
- name: Create pull request
|
||||
run: gh pr create -f -l "runners update"
|
||||
@@ -1,12 +1,24 @@
|
||||
name: Validate Helm Chart
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/arc-validate-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!**.md'
|
||||
- '!charts/gha-runner-scale-set-controller/**'
|
||||
- '!charts/gha-runner-scale-set/**'
|
||||
push:
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/validate-chart.yaml'
|
||||
- '.github/workflows/arc-validate-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!**.md'
|
||||
- '!charts/gha-runner-scale-set-controller/**'
|
||||
- '!charts/gha-runner-scale-set/**'
|
||||
workflow_dispatch:
|
||||
env:
|
||||
KUBE_SCORE_VERSION: 1.10.0
|
||||
@@ -15,6 +27,13 @@ env:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
# but not pushes to master branch by making the concurrency group name unique
|
||||
# for pushes
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
validate-chart:
|
||||
name: Lint Chart
|
||||
@@ -26,7 +45,8 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v3.4
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
|
||||
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
@@ -52,7 +72,7 @@ jobs:
|
||||
python-version: '3.7'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.3.1
|
||||
uses: helm/chart-testing-action@v2.4.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
@@ -78,5 +98,6 @@ jobs:
|
||||
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
ct install --config charts/.ci/ct-config.yaml
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Validate Runners
|
||||
name: Validate ARC Runners
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -12,6 +12,13 @@ on:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
# but not pushes to master branch by making the concurrency group name unique
|
||||
# for pushes
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
shellcheck:
|
||||
name: runner / shellcheck
|
||||
882
.github/workflows/gha-e2e-tests.yaml
vendored
Normal file
882
.github/workflows/gha-e2e-tests.yaml
vendored
Normal file
@@ -0,0 +1,882 @@
|
||||
name: (gha) E2E Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_REPO: arc_e2e_test_dummy
|
||||
IMAGE_NAME: "arc-test-image"
|
||||
IMAGE_VERSION: "0.5.0"
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
# but not pushes to master branch by making the concurrency group name unique
|
||||
# for pushes
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
default-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
- uses: ./.github/actions/setup-arc-e2e
|
||||
id: setup
|
||||
with:
|
||||
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||
image-name: ${{env.IMAGE_NAME}}
|
||||
image-tag: ${{env.IMAGE_VERSION}}
|
||||
target-org: ${{env.TARGET_ORG}}
|
||||
|
||||
- name: Install gha-runner-scale-set-controller
|
||||
id: install_arc_controller
|
||||
run: |
|
||||
helm install arc \
|
||||
--namespace "arc-systems" \
|
||||
--create-namespace \
|
||||
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||
./charts/gha-runner-scale-set-controller \
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
run: |
|
||||
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||
helm install "$ARC_NAME" \
|
||||
--namespace "arc-runners" \
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||
./charts/gha-runner-scale-set \
|
||||
--debug
|
||||
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
|
||||
- name: Test ARC E2E
|
||||
uses: ./.github/actions/execute-assert-arc-e2e
|
||||
timeout-minutes: 10
|
||||
with:
|
||||
auth-token: ${{ steps.setup.outputs.token }}
|
||||
repo-owner: ${{ env.TARGET_ORG }}
|
||||
repo-name: ${{env.TARGET_REPO}}
|
||||
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||
arc-namespace: "arc-runners"
|
||||
arc-controller-namespace: "arc-systems"
|
||||
|
||||
single-namespace-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
- uses: ./.github/actions/setup-arc-e2e
|
||||
id: setup
|
||||
with:
|
||||
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||
image-name: ${{env.IMAGE_NAME}}
|
||||
image-tag: ${{env.IMAGE_VERSION}}
|
||||
target-org: ${{env.TARGET_ORG}}
|
||||
|
||||
- name: Install gha-runner-scale-set-controller
|
||||
id: install_arc_controller
|
||||
run: |
|
||||
kubectl create namespace arc-runners
|
||||
helm install arc \
|
||||
--namespace "arc-systems" \
|
||||
--create-namespace \
|
||||
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||
--set flags.watchSingleNamespace=arc-runners \
|
||||
./charts/gha-runner-scale-set-controller \
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
run: |
|
||||
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||
helm install "$ARC_NAME" \
|
||||
--namespace "arc-runners" \
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||
./charts/gha-runner-scale-set \
|
||||
--debug
|
||||
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
|
||||
- name: Test ARC E2E
|
||||
uses: ./.github/actions/execute-assert-arc-e2e
|
||||
timeout-minutes: 10
|
||||
with:
|
||||
auth-token: ${{ steps.setup.outputs.token }}
|
||||
repo-owner: ${{ env.TARGET_ORG }}
|
||||
repo-name: ${{env.TARGET_REPO}}
|
||||
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||
arc-namespace: "arc-runners"
|
||||
arc-controller-namespace: "arc-systems"
|
||||
|
||||
dind-mode-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: arc-test-dind-workflow.yaml
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
- uses: ./.github/actions/setup-arc-e2e
|
||||
id: setup
|
||||
with:
|
||||
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||
image-name: ${{env.IMAGE_NAME}}
|
||||
image-tag: ${{env.IMAGE_VERSION}}
|
||||
target-org: ${{env.TARGET_ORG}}
|
||||
|
||||
- name: Install gha-runner-scale-set-controller
|
||||
id: install_arc_controller
|
||||
run: |
|
||||
helm install arc \
|
||||
--namespace "arc-systems" \
|
||||
--create-namespace \
|
||||
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||
./charts/gha-runner-scale-set-controller \
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
run: |
|
||||
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||
helm install "$ARC_NAME" \
|
||||
--namespace "arc-runners" \
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||
--set containerMode.type="dind" \
|
||||
./charts/gha-runner-scale-set \
|
||||
--debug
|
||||
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
|
||||
- name: Test ARC E2E
|
||||
uses: ./.github/actions/execute-assert-arc-e2e
|
||||
timeout-minutes: 10
|
||||
with:
|
||||
auth-token: ${{ steps.setup.outputs.token }}
|
||||
repo-owner: ${{ env.TARGET_ORG }}
|
||||
repo-name: ${{env.TARGET_REPO}}
|
||||
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||
arc-namespace: "arc-runners"
|
||||
arc-controller-namespace: "arc-systems"
|
||||
|
||||
kubernetes-mode-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
- uses: ./.github/actions/setup-arc-e2e
|
||||
id: setup
|
||||
with:
|
||||
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||
image-name: ${{env.IMAGE_NAME}}
|
||||
image-tag: ${{env.IMAGE_VERSION}}
|
||||
target-org: ${{env.TARGET_ORG}}
|
||||
|
||||
- name: Install gha-runner-scale-set-controller
|
||||
id: install_arc_controller
|
||||
run: |
|
||||
echo "Install openebs/dynamic-localpv-provisioner"
|
||||
helm repo add openebs https://openebs.github.io/charts
|
||||
helm repo update
|
||||
helm install openebs openebs/openebs -n openebs --create-namespace
|
||||
|
||||
helm install arc \
|
||||
--namespace "arc-systems" \
|
||||
--create-namespace \
|
||||
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||
./charts/gha-runner-scale-set-controller \
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n openebs -l name=openebs-localpv-provisioner
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
run: |
|
||||
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||
helm install "$ARC_NAME" \
|
||||
--namespace "arc-runners" \
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||
--set containerMode.type="kubernetes" \
|
||||
--set containerMode.kubernetesModeWorkVolumeClaim.accessModes={"ReadWriteOnce"} \
|
||||
--set containerMode.kubernetesModeWorkVolumeClaim.storageClassName="openebs-hostpath" \
|
||||
--set containerMode.kubernetesModeWorkVolumeClaim.resources.requests.storage="1Gi" \
|
||||
./charts/gha-runner-scale-set \
|
||||
--debug
|
||||
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
|
||||
- name: Test ARC E2E
|
||||
uses: ./.github/actions/execute-assert-arc-e2e
|
||||
timeout-minutes: 10
|
||||
with:
|
||||
auth-token: ${{ steps.setup.outputs.token }}
|
||||
repo-owner: ${{ env.TARGET_ORG }}
|
||||
repo-name: ${{env.TARGET_REPO}}
|
||||
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||
arc-namespace: "arc-runners"
|
||||
arc-controller-namespace: "arc-systems"
|
||||
|
||||
auth-proxy-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
- uses: ./.github/actions/setup-arc-e2e
|
||||
id: setup
|
||||
with:
|
||||
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||
image-name: ${{env.IMAGE_NAME}}
|
||||
image-tag: ${{env.IMAGE_VERSION}}
|
||||
target-org: ${{env.TARGET_ORG}}
|
||||
|
||||
- name: Install gha-runner-scale-set-controller
|
||||
id: install_arc_controller
|
||||
run: |
|
||||
helm install arc \
|
||||
--namespace "arc-systems" \
|
||||
--create-namespace \
|
||||
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||
./charts/gha-runner-scale-set-controller \
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
run: |
|
||||
docker run -d \
|
||||
--name squid \
|
||||
--publish 3128:3128 \
|
||||
huangtingluo/squid-proxy:latest
|
||||
kubectl create namespace arc-runners
|
||||
kubectl create secret generic proxy-auth \
|
||||
--namespace=arc-runners \
|
||||
--from-literal=username=github \
|
||||
--from-literal=password='actions'
|
||||
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||
helm install "$ARC_NAME" \
|
||||
--namespace "arc-runners" \
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||
--set proxy.https.url="http://host.minikube.internal:3128" \
|
||||
--set proxy.https.credentialSecretRef="proxy-auth" \
|
||||
--set "proxy.noProxy[0]=10.96.0.1:443" \
|
||||
./charts/gha-runner-scale-set \
|
||||
--debug
|
||||
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
|
||||
- name: Test ARC E2E
|
||||
uses: ./.github/actions/execute-assert-arc-e2e
|
||||
timeout-minutes: 10
|
||||
with:
|
||||
auth-token: ${{ steps.setup.outputs.token }}
|
||||
repo-owner: ${{ env.TARGET_ORG }}
|
||||
repo-name: ${{env.TARGET_REPO}}
|
||||
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||
arc-namespace: "arc-runners"
|
||||
arc-controller-namespace: "arc-systems"
|
||||
|
||||
anonymous-proxy-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
- uses: ./.github/actions/setup-arc-e2e
|
||||
id: setup
|
||||
with:
|
||||
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||
image-name: ${{env.IMAGE_NAME}}
|
||||
image-tag: ${{env.IMAGE_VERSION}}
|
||||
target-org: ${{env.TARGET_ORG}}
|
||||
|
||||
- name: Install gha-runner-scale-set-controller
|
||||
id: install_arc_controller
|
||||
run: |
|
||||
helm install arc \
|
||||
--namespace "arc-systems" \
|
||||
--create-namespace \
|
||||
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||
./charts/gha-runner-scale-set-controller \
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
run: |
|
||||
docker run -d \
|
||||
--name squid \
|
||||
--publish 3128:3128 \
|
||||
ubuntu/squid:latest
|
||||
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||
helm install "$ARC_NAME" \
|
||||
--namespace "arc-runners" \
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||
--set proxy.https.url="http://host.minikube.internal:3128" \
|
||||
--set "proxy.noProxy[0]=10.96.0.1:443" \
|
||||
./charts/gha-runner-scale-set \
|
||||
--debug
|
||||
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
|
||||
- name: Test ARC E2E
|
||||
uses: ./.github/actions/execute-assert-arc-e2e
|
||||
timeout-minutes: 10
|
||||
with:
|
||||
auth-token: ${{ steps.setup.outputs.token }}
|
||||
repo-owner: ${{ env.TARGET_ORG }}
|
||||
repo-name: ${{env.TARGET_REPO}}
|
||||
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||
arc-namespace: "arc-runners"
|
||||
arc-controller-namespace: "arc-systems"
|
||||
|
||||
self-signed-ca-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
- uses: ./.github/actions/setup-arc-e2e
|
||||
id: setup
|
||||
with:
|
||||
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||
image-name: ${{env.IMAGE_NAME}}
|
||||
image-tag: ${{env.IMAGE_VERSION}}
|
||||
target-org: ${{env.TARGET_ORG}}
|
||||
|
||||
- name: Install gha-runner-scale-set-controller
|
||||
id: install_arc_controller
|
||||
run: |
|
||||
helm install arc \
|
||||
--namespace "arc-systems" \
|
||||
--create-namespace \
|
||||
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||
./charts/gha-runner-scale-set-controller \
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
run: |
|
||||
docker run -d \
|
||||
--rm \
|
||||
--name mitmproxy \
|
||||
--publish 8080:8080 \
|
||||
-v ${{ github.workspace }}/mitmproxy:/home/mitmproxy/.mitmproxy \
|
||||
mitmproxy/mitmproxy:latest \
|
||||
mitmdump
|
||||
count=0
|
||||
while true; do
|
||||
if [ -f "${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem" ]; then
|
||||
echo "CA cert generated"
|
||||
cat ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for mitmproxy generate its CA cert"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
sudo cp ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
|
||||
sudo chown runner ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
|
||||
kubectl create namespace arc-runners
|
||||
kubectl -n arc-runners create configmap ca-cert --from-file="${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt"
|
||||
kubectl -n arc-runners get configmap ca-cert -o yaml
|
||||
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||
helm install "$ARC_NAME" \
|
||||
--namespace "arc-runners" \
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||
--set proxy.https.url="http://host.minikube.internal:8080" \
|
||||
--set "proxy.noProxy[0]=10.96.0.1:443" \
|
||||
--set "githubServerTLS.certificateFrom.configMapKeyRef.name=ca-cert" \
|
||||
--set "githubServerTLS.certificateFrom.configMapKeyRef.key=mitmproxy-ca-cert.crt" \
|
||||
--set "githubServerTLS.runnerMountPath=/usr/local/share/ca-certificates/" \
|
||||
./charts/gha-runner-scale-set \
|
||||
--debug
|
||||
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
|
||||
- name: Test ARC E2E
|
||||
uses: ./.github/actions/execute-assert-arc-e2e
|
||||
timeout-minutes: 10
|
||||
with:
|
||||
auth-token: ${{ steps.setup.outputs.token }}
|
||||
repo-owner: ${{ env.TARGET_ORG }}
|
||||
repo-name: ${{env.TARGET_REPO}}
|
||||
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||
arc-namespace: "arc-runners"
|
||||
arc-controller-namespace: "arc-systems"
|
||||
|
||||
update-strategy-tests:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-sleepy-matrix.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
- uses: ./.github/actions/setup-arc-e2e
|
||||
id: setup
|
||||
with:
|
||||
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||
image-name: ${{env.IMAGE_NAME}}
|
||||
image-tag: ${{env.IMAGE_VERSION}}
|
||||
target-org: ${{env.TARGET_ORG}}
|
||||
|
||||
- name: Install gha-runner-scale-set-controller
|
||||
id: install_arc_controller
|
||||
run: |
|
||||
helm install arc \
|
||||
--namespace "arc-systems" \
|
||||
--create-namespace \
|
||||
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||
--set flags.updateStrategy="eventual" \
|
||||
./charts/gha-runner-scale-set-controller \
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
run: |
|
||||
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||
helm install "$ARC_NAME" \
|
||||
--namespace "arc-runners" \
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||
./charts/gha-runner-scale-set \
|
||||
--debug
|
||||
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
|
||||
- name: Trigger long running jobs and wait for runners to pick them up
|
||||
uses: ./.github/actions/execute-assert-arc-e2e
|
||||
timeout-minutes: 10
|
||||
with:
|
||||
auth-token: ${{ steps.setup.outputs.token }}
|
||||
repo-owner: ${{ env.TARGET_ORG }}
|
||||
repo-name: ${{env.TARGET_REPO}}
|
||||
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||
arc-namespace: "arc-runners"
|
||||
arc-controller-namespace: "arc-systems"
|
||||
wait-to-running: "true"
|
||||
wait-to-finish: "false"
|
||||
|
||||
- name: Upgrade the gha-runner-scale-set
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade --install "${{ steps.install_arc.outputs.ARC_NAME }}" \
|
||||
--namespace "arc-runners" \
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{ env.TARGET_REPO }}" \
|
||||
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||
--set template.spec.containers[0].name="runner" \
|
||||
--set template.spec.containers[0].image="ghcr.io/actions/actions-runner:latest" \
|
||||
--set template.spec.containers[0].command={"/home/runner/run.sh"} \
|
||||
--set template.spec.containers[0].env[0].name="TEST" \
|
||||
--set template.spec.containers[0].env[0].value="E2E TESTS" \
|
||||
./charts/gha-runner-scale-set \
|
||||
--debug
|
||||
|
||||
- name: Assert that the listener is deleted while jobs are running
|
||||
shell: bash
|
||||
run: |
|
||||
count=0
|
||||
while true; do
|
||||
LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name=${{ steps.install_arc.outputs.ARC_NAME }} -n arc-systems --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||
RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n arc-runners --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||
RESOURCES="$(kubectl get pods -A)"
|
||||
|
||||
if [ "$LISTENER_COUNT" -eq 0 ]; then
|
||||
echo "Listener has been deleted"
|
||||
echo "$RESOURCES"
|
||||
exit 0
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener to be deleted"
|
||||
echo "$RESOURCES"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Waiting for listener to be deleted"
|
||||
echo "Listener count: $LISTENER_COUNT target: 0 | Runners count: $RUNNERS_COUNT target: 3"
|
||||
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
|
||||
- name: Assert that the listener goes back up after the jobs are done
|
||||
shell: bash
|
||||
run: |
|
||||
count=0
|
||||
while true; do
|
||||
LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name=${{ steps.install_arc.outputs.ARC_NAME }} -n arc-systems --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||
RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n arc-runners --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||
RESOURCES="$(kubectl get pods -A)"
|
||||
|
||||
if [ "$LISTENER_COUNT" -eq 1 ]; then
|
||||
echo "Listener is up!"
|
||||
echo "$RESOURCES"
|
||||
exit 0
|
||||
fi
|
||||
if [ "$count" -ge 120 ]; then
|
||||
echo "Timeout waiting for listener to be recreated"
|
||||
echo "$RESOURCES"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Waiting for listener to be recreated"
|
||||
echo "Listener count: $LISTENER_COUNT target: 1 | Runners count: $RUNNERS_COUNT target: 0"
|
||||
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
|
||||
- name: Gather logs and cleanup
|
||||
shell: bash
|
||||
if: always()
|
||||
run: |
|
||||
helm uninstall "${{ steps.install_arc.outputs.ARC_NAME }}" --namespace "arc-runners" --debug
|
||||
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n "${{ steps.install_arc.outputs.ARC_NAME }}" -l app.kubernetes.io/instance="${{ steps.install_arc.outputs.ARC_NAME }}"
|
||||
kubectl logs deployment/arc-gha-rs-controller -n "arc-systems"
|
||||
212
.github/workflows/gha-publish-chart.yaml
vendored
Normal file
212
.github/workflows/gha-publish-chart.yaml
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
name: (gha) Publish Helm Charts
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
ref:
|
||||
description: 'The branch, tag or SHA to cut a release from'
|
||||
required: false
|
||||
type: string
|
||||
default: ''
|
||||
release_tag_name:
|
||||
description: 'The name to tag the controller image with'
|
||||
required: true
|
||||
type: string
|
||||
default: 'canary'
|
||||
push_to_registries:
|
||||
description: 'Push images to registries'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
publish_gha_runner_scale_set_controller_chart:
|
||||
description: 'Publish new helm chart for gha-runner-scale-set-controller'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
publish_gha_runner_scale_set_chart:
|
||||
description: 'Publish new helm chart for gha-runner-scale-set'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
env:
|
||||
HELM_VERSION: v3.8.0
|
||||
|
||||
permissions:
|
||||
packages: write
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-push-image:
|
||||
name: Build and push controller image
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
|
||||
- name: Check chart versions
|
||||
# Binary version and chart versions need to match.
|
||||
# In case of an upgrade, the controller will try to clean up
|
||||
# resources with older versions that should have been cleaned up
|
||||
# during the upgrade process
|
||||
run: ./hack/check-gh-chart-versions.sh ${{ inputs.release_tag_name }}
|
||||
|
||||
- name: Resolve parameters
|
||||
id: resolve_parameters
|
||||
run: |
|
||||
resolvedRef="${{ inputs.ref }}"
|
||||
if [ -z "$resolvedRef" ]
|
||||
then
|
||||
resolvedRef="${{ github.ref }}"
|
||||
fi
|
||||
echo "resolved_ref=$resolvedRef" >> $GITHUB_OUTPUT
|
||||
echo "INFO: Resolving short SHA for $resolvedRef"
|
||||
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
|
||||
echo "INFO: Normalizing repository name (lowercase)"
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||
# BuildKit v0.11 which has a bug causing intermittent
|
||||
# failures pushing images to GHCR
|
||||
version: v0.9.1
|
||||
driver-opts: image=moby/buildkit:v0.10.6
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build & push controller image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: VERSION=${{ inputs.release_tag_name }}
|
||||
push: ${{ inputs.push_to_registries }}
|
||||
tags: |
|
||||
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}
|
||||
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}-${{ steps.resolve_parameters.outputs.short_sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "The [gha-publish-chart.yaml](https://github.com/actions/actions-runner-controller/blob/main/.github/workflows/gha-publish-chart.yaml) workflow run was completed successfully!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Release tag: ${{ inputs.release_tag_name }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Push to registries: ${{ inputs.push_to_registries }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
publish-helm-chart-gha-runner-scale-set-controller:
|
||||
if: ${{ inputs.publish_gha_runner_scale_set_controller_chart == true }}
|
||||
needs: build-push-image
|
||||
name: Publish Helm chart for gha-runner-scale-set-controller
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
|
||||
- name: Resolve parameters
|
||||
id: resolve_parameters
|
||||
run: |
|
||||
resolvedRef="${{ inputs.ref }}"
|
||||
if [ -z "$resolvedRef" ]
|
||||
then
|
||||
resolvedRef="${{ github.ref }}"
|
||||
fi
|
||||
echo "INFO: Resolving short SHA for $resolvedRef"
|
||||
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
|
||||
echo "INFO: Normalizing repository name (lowercase)"
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
|
||||
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
- name: Publish new helm chart for gha-runner-scale-set-controller
|
||||
run: |
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin
|
||||
GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG=$(cat charts/gha-runner-scale-set-controller/Chart.yaml | grep version: | cut -d " " -f 2)
|
||||
echo "GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG=${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}" >> $GITHUB_ENV
|
||||
helm package charts/gha-runner-scale-set-controller/ --version="${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}"
|
||||
helm push gha-runner-scale-set-controller-"${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "New helm chart for gha-runner-scale-set-controller published successfully!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- gha-runner-scale-set-controller Chart version: ${{ env.GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
publish-helm-chart-gha-runner-scale-set:
|
||||
if: ${{ inputs.publish_gha_runner_scale_set_chart == true }}
|
||||
needs: build-push-image
|
||||
name: Publish Helm chart for gha-runner-scale-set
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
|
||||
- name: Resolve parameters
|
||||
id: resolve_parameters
|
||||
run: |
|
||||
resolvedRef="${{ inputs.ref }}"
|
||||
if [ -z "$resolvedRef" ]
|
||||
then
|
||||
resolvedRef="${{ github.ref }}"
|
||||
fi
|
||||
echo "INFO: Resolving short SHA for $resolvedRef"
|
||||
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
|
||||
echo "INFO: Normalizing repository name (lowercase)"
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
|
||||
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
- name: Publish new helm chart for gha-runner-scale-set
|
||||
run: |
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin
|
||||
|
||||
GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG=$(cat charts/gha-runner-scale-set/Chart.yaml | grep version: | cut -d " " -f 2)
|
||||
echo "GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG=${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}" >> $GITHUB_ENV
|
||||
helm package charts/gha-runner-scale-set/ --version="${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}"
|
||||
helm push gha-runner-scale-set-"${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "New helm chart for gha-runner-scale-set published successfully!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- gha-runner-scale-set Chart version: ${{ env.GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY
|
||||
125
.github/workflows/gha-validate-chart.yaml
vendored
Normal file
125
.github/workflows/gha-validate-chart.yaml
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
name: (gha) Validate Helm Charts
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/gha-validate-chart.yaml'
|
||||
- '!charts/actions-runner-controller/**'
|
||||
- '!**.md'
|
||||
push:
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/gha-validate-chart.yaml'
|
||||
- '!charts/actions-runner-controller/**'
|
||||
- '!**.md'
|
||||
workflow_dispatch:
|
||||
env:
|
||||
KUBE_SCORE_VERSION: 1.16.1
|
||||
HELM_VERSION: v3.8.0
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
# but not pushes to master branch by making the concurrency group name unique
|
||||
# for pushes
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
validate-chart:
|
||||
name: Lint Chart
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
|
||||
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
- name: Set up kube-score
|
||||
run: |
|
||||
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
|
||||
chmod 755 kube-score
|
||||
|
||||
- name: Kube-score generated manifests
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
|
||||
--ignore-test pod-networkpolicy
|
||||
--ignore-test deployment-has-poddisruptionbudget
|
||||
--ignore-test deployment-has-host-podantiaffinity
|
||||
--ignore-test container-security-context
|
||||
--ignore-test pod-probes
|
||||
--ignore-test container-image-tag
|
||||
--enable-optional-test container-security-context-privileged
|
||||
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.4.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
ct version
|
||||
changed=$(ct list-changed --config charts/.ci/ct-config-gha.yaml)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "::set-output name=changed::true"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: |
|
||||
ct lint --config charts/.ci/ct-config-gha.yaml
|
||||
|
||||
- name: Set up docker buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Build controller image
|
||||
uses: docker/build-push-action@v3
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64
|
||||
load: true
|
||||
build-args: |
|
||||
DOCKER_IMAGE_NAME=test-arc
|
||||
VERSION=dev
|
||||
tags: |
|
||||
test-arc:dev
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.4.0
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
cluster_name: chart-testing
|
||||
|
||||
- name: Load image into cluster
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
export DOCKER_IMAGE_NAME=test-arc
|
||||
export VERSION=dev
|
||||
export IMG_RESULT=load
|
||||
make docker-buildx
|
||||
kind load docker-image test-arc:dev --name chart-testing
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
ct install --config charts/.ci/ct-config-gha.yaml
|
||||
133
.github/workflows/global-publish-canary.yaml
vendored
Normal file
133
.github/workflows/global-publish-canary.yaml
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
name: Publish Canary Images
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '.github/actions/**'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/workflows/e2e-test-dispatch-workflow.yaml'
|
||||
- '.github/workflows/gha-e2e-tests.yaml'
|
||||
- '.github/workflows/arc-publish.yaml'
|
||||
- '.github/workflows/arc-publish-chart.yaml'
|
||||
- '.github/workflows/gha-publish-chart.yaml'
|
||||
- '.github/workflows/arc-release-runners.yaml'
|
||||
- '.github/workflows/global-run-codeql.yaml'
|
||||
- '.github/workflows/global-run-first-interaction.yaml'
|
||||
- '.github/workflows/global-run-stale.yaml'
|
||||
- '.github/workflows/arc-update-runners-scheduled.yaml'
|
||||
- '.github/workflows/validate-arc.yaml'
|
||||
- '.github/workflows/arc-validate-chart.yaml'
|
||||
- '.github/workflows/gha-validate-chart.yaml'
|
||||
- '.github/workflows/arc-validate-runners.yaml'
|
||||
- '.github/dependabot.yml'
|
||||
- '.github/RELEASE_NOTE_TEMPLATE.md'
|
||||
- 'runner/**'
|
||||
- '.gitignore'
|
||||
- 'PROJECT'
|
||||
- 'LICENSE'
|
||||
- 'Makefile'
|
||||
|
||||
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
# Safeguard to prevent pushing images to registeries after build
|
||||
PUSH_TO_REGISTRIES: true
|
||||
|
||||
jobs:
|
||||
legacy-canary-build:
|
||||
name: Build and Publish Legacy Canary Image
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_REPO: actions-runner-controller
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
organization: ${{ env.TARGET_ORG }}
|
||||
|
||||
- name: Trigger Build And Push Images To Registries
|
||||
run: |
|
||||
# Authenticate
|
||||
gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
|
||||
|
||||
# Trigger the workflow run
|
||||
jq -n '{"event_type": "canary", "client_payload": {"sha": "${{ github.sha }}", "push_to_registries": ${{ env.PUSH_TO_REGISTRIES }}}}' \
|
||||
| gh api -X POST /repos/actions-runner-controller/releases/dispatches --input -
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "The [publish-canary](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-canary.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Push to registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml)" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
canary-build:
|
||||
name: Build and Publish gha-runner-scale-set-controller Canary Image
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Normalization is needed because upper case characters are not allowed in the repository name
|
||||
# and the short sha is needed for image tagging
|
||||
- name: Resolve parameters
|
||||
id: resolve_parameters
|
||||
run: |
|
||||
echo "INFO: Resolving short sha"
|
||||
echo "short_sha=$(git rev-parse --short ${{ github.ref }})" >> $GITHUB_OUTPUT
|
||||
echo "INFO: Normalizing repository name (lowercase)"
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
version: latest
|
||||
|
||||
# Unstable builds - run at your own risk
|
||||
- name: Build and Push
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: VERSION=canary-"${{ github.ref }}"
|
||||
push: ${{ env.PUSH_TO_REGISTRIES }}
|
||||
tags: |
|
||||
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary
|
||||
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary-${{ steps.resolve_parameters.outputs.short_sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
@@ -10,6 +10,13 @@ on:
|
||||
schedule:
|
||||
- cron: '30 1 * * 0'
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
# but not pushes to master branch by making the concurrency group name unique
|
||||
# for pushes
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
@@ -1,4 +1,4 @@
|
||||
name: first-interaction
|
||||
name: First Interaction
|
||||
|
||||
on:
|
||||
issues:
|
||||
88
.github/workflows/go.yaml
vendored
Normal file
88
.github/workflows/go.yaml
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
name: Go
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- '.github/workflows/go.yaml'
|
||||
- '**.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/go.yaml'
|
||||
- '**.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
# but not pushes to master branch by making the concurrency group name unique
|
||||
# for pushes
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache: false
|
||||
- name: fmt
|
||||
run: go fmt ./...
|
||||
- name: Check diff
|
||||
run: git diff --exit-code
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache: false
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
only-new-issues: true
|
||||
version: v1.51.1
|
||||
|
||||
generate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache: false
|
||||
- name: Generate
|
||||
run: make generate
|
||||
- name: Check diff
|
||||
run: git diff --exit-code
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- run: make manifests
|
||||
- name: Check diff
|
||||
run: git diff --exit-code
|
||||
- name: Install kubebuilder
|
||||
run: |
|
||||
curl -D headers.txt -fsL "https://storage.googleapis.com/kubebuilder-tools/kubebuilder-tools-1.26.1-linux-amd64.tar.gz" -o kubebuilder-tools
|
||||
echo "$(grep -i etag headers.txt -m 1 | cut -d'"' -f2) kubebuilder-tools" > sum
|
||||
md5sum -c sum
|
||||
tar -zvxf kubebuilder-tools
|
||||
sudo mv kubebuilder /usr/local/
|
||||
- name: Run go tests
|
||||
run: |
|
||||
go test -short `go list ./... | grep -v ./test_e2e_arc`
|
||||
23
.github/workflows/golangci-lint.yaml
vendored
23
.github/workflows/golangci-lint.yaml
vendored
@@ -1,23 +0,0 @@
|
||||
name: golangci-lint
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
jobs:
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
- uses: actions/checkout@v3
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
only-new-issues: true
|
||||
version: v1.49.0
|
||||
70
.github/workflows/publish-canary.yaml
vendored
70
.github/workflows/publish-canary.yaml
vendored
@@ -1,70 +0,0 @@
|
||||
name: Publish Canary Image
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/workflows/validate-chart.yaml'
|
||||
- '.github/workflows/publish-chart.yaml'
|
||||
- '.github/workflows/publish-arc.yaml'
|
||||
- '.github/workflows/runners.yaml'
|
||||
- '.github/workflows/validate-entrypoint.yaml'
|
||||
- '.github/renovate.*'
|
||||
- 'runner/**'
|
||||
- '.gitignore'
|
||||
- 'PROJECT'
|
||||
- 'LICENSE'
|
||||
- 'Makefile'
|
||||
|
||||
env:
|
||||
# Safeguard to prevent pushing images to registeries after build
|
||||
PUSH_TO_REGISTRIES: true
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_REPO: actions-runner-controller
|
||||
|
||||
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
canary-build:
|
||||
name: Build and Publish Canary Image
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
organization: ${{ env.TARGET_ORG }}
|
||||
|
||||
- name: Trigger Build And Push Images To Registries
|
||||
run: |
|
||||
# Authenticate
|
||||
gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
|
||||
|
||||
# Trigger the workflow run
|
||||
jq -n '{"event_type": "canary", "client_payload": {"sha": "${{ github.sha }}", "push_to_registries": ${{ env.PUSH_TO_REGISTRIES }}}}' \
|
||||
| gh api -X POST /repos/actions-runner-controller/releases/dispatches --input -
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "The [publish-canary](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-canary.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Push to registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml)" >> $GITHUB_STEP_SUMMARY
|
||||
203
.github/workflows/publish-chart.yaml
vendored
203
.github/workflows/publish-chart.yaml
vendored
@@ -1,203 +0,0 @@
|
||||
name: Publish Helm Chart
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/publish-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!**.md'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
KUBE_SCORE_VERSION: 1.10.0
|
||||
HELM_VERSION: v3.8.0
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
lint-chart:
|
||||
name: Lint Chart
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v3.4
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
- name: Set up kube-score
|
||||
run: |
|
||||
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
|
||||
chmod 755 kube-score
|
||||
|
||||
- name: Kube-score generated manifests
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
|
||||
--ignore-test pod-networkpolicy
|
||||
--ignore-test deployment-has-poddisruptionbudget
|
||||
--ignore-test deployment-has-host-podantiaffinity
|
||||
--ignore-test container-security-context
|
||||
--ignore-test pod-probes
|
||||
--ignore-test container-image-tag
|
||||
--enable-optional-test container-security-context-privileged
|
||||
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.3.1
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "::set-output name=changed::true"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: |
|
||||
ct lint --config charts/.ci/ct-config.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
uses: helm/kind-action@v1.4.0
|
||||
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
- name: Install cert-manager
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct install --config charts/.ci/ct-config.yaml
|
||||
|
||||
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
|
||||
- name: Check if Chart Publish is Needed
|
||||
id: publish-chart-step
|
||||
run: |
|
||||
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
|
||||
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
|
||||
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
|
||||
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
|
||||
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
|
||||
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
|
||||
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION ]]; then
|
||||
echo "publish=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "publish=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
publish-chart:
|
||||
if: needs.lint-chart.outputs.publish-chart == 'true'
|
||||
needs: lint-chart
|
||||
name: Publish Chart
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write # for helm/chart-releaser-action to push chart release and create a release
|
||||
env:
|
||||
CHART_TARGET_ORG: actions-runner-controller
|
||||
CHART_TARGET_REPO: actions-runner-controller.github.io
|
||||
CHART_TARGET_BRANCH: main
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
organization: ${{ env.CHART_TARGET_ORG }}
|
||||
|
||||
- name: Install chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.4.1
|
||||
with:
|
||||
install_only: true
|
||||
install_dir: ${{ github.workspace }}/bin
|
||||
|
||||
- name: Package and upload release assets
|
||||
run: |
|
||||
cr package \
|
||||
${{ github.workspace }}/charts/actions-runner-controller/ \
|
||||
--package-path .cr-release-packages
|
||||
|
||||
cr upload \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--package-path .cr-release-packages \
|
||||
--token ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate updated index.yaml
|
||||
run: |
|
||||
cr index \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--index-path ${{ github.workspace }}/index.yaml \
|
||||
--pages-branch 'gh-pages' \
|
||||
--pages-index-path 'index.yaml'
|
||||
|
||||
# Chart Release was never intended to publish to a different repo
|
||||
# this workaround is intended to move the index.yaml to the target repo
|
||||
# where the github pages are hosted
|
||||
- name: Checkout pages repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||
path: ${{ env.CHART_TARGET_REPO }}
|
||||
ref: ${{ env.CHART_TARGET_BRANCH }}
|
||||
token: ${{ steps.get_workflow_token.outputs.token }}
|
||||
|
||||
- name: Copy index.yaml
|
||||
run: |
|
||||
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
|
||||
|
||||
- name: Commit and push
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
git add .
|
||||
git commit -m "Update index.yaml"
|
||||
git push
|
||||
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/main/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
|
||||
60
.github/workflows/validate-arc.yaml
vendored
60
.github/workflows/validate-arc.yaml
vendored
@@ -1,60 +0,0 @@
|
||||
name: Validate ARC
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/workflows/publish-canary.yaml'
|
||||
- '.github/workflows/validate-chart.yaml'
|
||||
- '.github/workflows/publish-chart.yaml'
|
||||
- '.github/workflows/runners.yaml'
|
||||
- '.github/workflows/publish-arc.yaml'
|
||||
- '.github/workflows/validate-entrypoint.yaml'
|
||||
- '.github/renovate.*'
|
||||
- 'runner/**'
|
||||
- '.gitignore'
|
||||
- 'PROJECT'
|
||||
- 'LICENSE'
|
||||
- 'Makefile'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test-controller:
|
||||
name: Test ARC
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set-up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18.2'
|
||||
check-latest: false
|
||||
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Install kubebuilder
|
||||
run: |
|
||||
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz
|
||||
tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz
|
||||
sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
make test
|
||||
|
||||
- name: Verify manifests are up-to-date
|
||||
run: |
|
||||
make manifests
|
||||
git diff --exit-code
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -29,8 +29,10 @@ bin
|
||||
.env
|
||||
.test.env
|
||||
*.pem
|
||||
!github/actions/testdata/*.pem
|
||||
|
||||
# OS
|
||||
.DS_STORE
|
||||
|
||||
/test-assets
|
||||
/.tools
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
# actions-runner-controller maintainers
|
||||
* @mumoshu @toast-gear @actions/actions-runtime
|
||||
* @mumoshu @toast-gear @actions/actions-runtime @nikola-jokic
|
||||
|
||||
164
CONTRIBUTING.md
164
CONTRIBUTING.md
@@ -15,6 +15,13 @@
|
||||
- [Opening the Pull Request](#opening-the-pull-request)
|
||||
- [Helm Version Changes](#helm-version-changes)
|
||||
- [Testing Controller Built from a Pull Request](#testing-controller-built-from-a-pull-request)
|
||||
- [Release process](#release-process)
|
||||
- [Workflow structure](#workflow-structure)
|
||||
- [Releasing legacy actions-runner-controller image and helm charts](#releasing-legacy-actions-runner-controller-image-and-helm-charts)
|
||||
- [Release actions-runner-controller runner images](#release-actions-runner-controller-runner-images)
|
||||
- [Release gha-runner-scale-set-controller image and helm charts](#release-gha-runner-scale-set-controller-image-and-helm-charts)
|
||||
- [Release actions/runner image](#release-actionsrunner-image)
|
||||
- [Canary releases](#canary-releases)
|
||||
|
||||
## Welcome
|
||||
|
||||
@@ -25,14 +32,13 @@ reviewed and merged.
|
||||
|
||||
## Before contributing code
|
||||
|
||||
We welcome code patches, but to make sure things are well coordinated you should discuss any significant change before starting the work.
|
||||
The maintainers ask that you signal your intention to contribute to the project using the issue tracker.
|
||||
If there is an existing issue that you want to work on, please let us know so we can get it assigned to you.
|
||||
If you noticed a bug or want to add a new feature, there are issue templates you can fill out.
|
||||
We welcome code patches, but to make sure things are well coordinated you should discuss any significant change before starting the work. The maintainers ask that you signal your intention to contribute to the project using the issue tracker. If there is an existing issue that you want to work on, please let us know so we can get it assigned to you. If you noticed a bug or want to add a new feature, there are issue templates you can fill out.
|
||||
|
||||
When filing a feature request, the maintainers will review the change and give you a decision on whether we are willing to accept the feature into the project.
|
||||
|
||||
For significantly large and/or complex features, we may request that you write up an architectural decision record ([ADR](https://github.blog/2020-08-13-why-write-adrs/)) detailing the change.
|
||||
Please use the [template](/adrs/0000-TEMPLATE.md) as guidance.
|
||||
|
||||
Please use the [template](/docs/adrs/yyyy-mm-dd-TEMPLATE) as guidance.
|
||||
|
||||
<!--
|
||||
TODO: Add a pre-requisite section describing what developers should
|
||||
@@ -45,6 +51,7 @@ Depending on what you are patching depends on how you should go about it.
|
||||
Below are some guides on how to test patches locally as well as develop the controller and runners.
|
||||
|
||||
When submitting a PR for a change please provide evidence that your change works as we still need to work on improving the CI of the project.
|
||||
|
||||
Some resources are provided for helping achieve this, see this guide for details.
|
||||
|
||||
### Developing the Controller
|
||||
@@ -130,7 +137,7 @@ GINKGO_FOCUS='[It] should create a new Runner resource from the specified templa
|
||||
>
|
||||
> If you want to stick with `snap`-provided `docker`, do not forget to set `TMPDIR` to somewhere under `$HOME`.
|
||||
> Otherwise `kind load docker-image` fail while running `docker save`.
|
||||
> See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap for more information.
|
||||
> See <https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap> for more information.
|
||||
|
||||
To test your local changes against both PAT and App based authentication please run the `acceptance` make target with the authentication configuration details provided:
|
||||
|
||||
@@ -186,7 +193,7 @@ Before shipping your PR, please check the following items to make sure CI passes
|
||||
- Run `go mod tidy` if you made changes to dependencies.
|
||||
- Format the code using `gofmt`
|
||||
- Run the `golangci-lint` tool locally.
|
||||
- We recommend you use `make lint` to run the tool using a Docker container matching the CI version.
|
||||
- We recommend you use `make lint` to run the tool using a Docker container matching the CI version.
|
||||
|
||||
### Opening the Pull Request
|
||||
|
||||
@@ -217,3 +224,146 @@ Please also note that you need to replace `$DOCKER_USER` with your own DockerHub
|
||||
Only the maintainers can release a new version of actions-runner-controller, publish a new version of the helm charts, and runner images.
|
||||
|
||||
All release workflows have been moved to [actions-runner-controller/releases](https://github.com/actions-runner-controller/releases) since the packages are owned by the former organization.
|
||||
|
||||
### Workflow structure
|
||||
|
||||
Following the migration of actions-runner-controller into GitHub actions, all the workflows had to be modified to accommodate the move to a new organization. The following table describes the workflows, their purpose and dependencies.
|
||||
|
||||
| Filename | Workflow name | Purpose |
|
||||
|-----------------------------------|--------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| gha-e2e-tests.yaml | (gha) E2E Tests | Tests the Autoscaling Runner Set mode end to end. Coverage is restricted to this mode. Legacy modes are not tested. |
|
||||
| go.yaml | Format, Lint, Unit Tests | Formats, lints and runs unit tests for the entire codebase. |
|
||||
| arc-publish.yaml | Publish ARC Image | Uploads release/actions-runner-controller.yaml as an artifact to the newly created release and triggers the [build and publication of the controller image](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-arc.yaml) |
|
||||
| global-publish-canary.yaml | Publish Canary Images | Builds and publishes canary controller container images for both new and legacy modes. |
|
||||
| arc-publish-chart.yaml | Publish ARC Helm Charts | Packages and publishes charts/actions-runner-controller (via GitHub Pages) |
|
||||
| gha-publish-chart.yaml | (gha) Publish Helm Charts | Packages and publishes charts/gha-runner-scale-set-controller and charts/gha-runner-scale-set charts (OCI to GHCR) |
|
||||
| arc-release-runners.yaml | Release ARC Runner Images | Triggers [release-runners.yaml](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/release-runners.yaml) which will build and push new runner images used with the legacy ARC modes. |
|
||||
| global-run-codeql.yaml | Run CodeQL | Run CodeQL on all the codebase |
|
||||
| global-run-first-interaction.yaml | First Interaction | Informs first time contributors what to expect when they open a new issue / PR |
|
||||
| global-run-stale.yaml | Run Stale Bot | Closes issues / PRs without activity |
|
||||
| arc-update-runners-scheduled.yaml | Runner Updates Check (Scheduled Job) | Polls [actions/runner](https://github.com/actions/runner) and [actions/runner-container-hooks](https://github.com/actions/runner-container-hooks) for new releases. If found, a PR is created to publish new runner images |
|
||||
| arc-validate-chart.yaml | Validate Helm Chart | Run helm chart validators for charts/actions-runner-controller |
|
||||
| gha-validate-chart.yaml | (gha) Validate Helm Charts | Run helm chart validators for charts/gha-runner-scale-set-controller and charts/gha-runner-scale-set charts |
|
||||
| arc-validate-runners.yaml | Validate ARC Runners | Run validators for runners |
|
||||
|
||||
There are 7 components that we release regularly:
|
||||
|
||||
1. legacy [actions-runner-controller controller image](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller)
|
||||
2. legacy [actions-runner-controller helm charts](https://actions-runner-controller.github.io/actions-runner-controller/)
|
||||
3. legacy actions-runner-controller runner images
|
||||
1. [ubuntu-20.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner)
|
||||
2. [ubuntu-22.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner)
|
||||
3. [dind-ubuntu-20.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner-dind)
|
||||
4. [dind-ubuntu-22.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner-dind)
|
||||
5. [dind-rootless-ubuntu-20.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner-dind-rootless)
|
||||
6. [dind-rootless-ubuntu-22.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner-dind-rootless)
|
||||
4. [gha-runner-scale-set-controller image](https://github.com/actions/actions-runner-controller/pkgs/container/gha-runner-scale-set-controller)
|
||||
5. [gha-runner-scale-set-controller helm charts](https://github.com/actions/actions-runner-controller/pkgs/container/actions-runner-controller-charts%2Fgha-runner-scale-set-controller)
|
||||
6. [gha-runner-scale-set runner helm charts](https://github.com/actions/actions-runner-controller/pkgs/container/actions-runner-controller-charts%2Fgha-runner-scale-set)
|
||||
7. [actions/runner image](https://github.com/actions/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner)
|
||||
|
||||
#### Releasing legacy actions-runner-controller image and helm charts
|
||||
|
||||
1. Start by making sure the master branch is stable and all CI jobs are passing
|
||||
2. Create a new release in <https://github.com/actions/actions-runner-controller/releases> (Draft a new release)
|
||||
3. Bump up the `version` and `appVersion` in charts/actions-runner-controller/Chart.yaml - make sure the `version` matches the release version you just created. (Example: <https://github.com/actions/actions-runner-controller/pull/2577>)
|
||||
4. When the workflows finish execution, you will see:
|
||||
1. A new controller image published to: <https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller>
|
||||
2. Helm charts published to: <https://github.com/actions-runner-controller/actions-runner-controller.github.io/tree/master/actions-runner-controller> (the index.yaml file is updated)
|
||||
|
||||
When a new release is created, the [Publish ARC Image](https://github.com/actions/actions-runner-controller/blob/master/.github/workflows/arc-publish.yaml) workflow is triggered.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph repository: actions/actions-runner-controller
|
||||
event_a{{"release: published"}} -- triggers --> workflow_a["arc-publish.yaml"]
|
||||
event_b{{"workflow_dispatch"}} -- triggers --> workflow_a["arc-publish.yaml"]
|
||||
workflow_a["arc-publish.yaml"] -- uploads --> package["actions-runner-controller.tar.gz"]
|
||||
end
|
||||
subgraph repository: actions-runner-controller/releases
|
||||
workflow_a["arc-publish.yaml"] -- triggers --> event_d{{"repository_dispatch"}} --> workflow_b["publish-arc.yaml"]
|
||||
workflow_b["publish-arc.yaml"] -- push --> A["GHCR: \nactions-runner-controller/actions-runner-controller:*"]
|
||||
workflow_b["publish-arc.yaml"] -- push --> B["DockerHub: \nsummerwind/actions-runner-controller:*"]
|
||||
end
|
||||
```
|
||||
|
||||
#### Release actions-runner-controller runner images
|
||||
|
||||
**Manual steps:**
|
||||
|
||||
1. Navigate to the [actions-runner-controller/releases](https://github.com/actions-runner-controller/releases) repository
|
||||
2. Trigger [the release-runners.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/release-runners.yaml) workflow.
|
||||
1. The list of input prameters for this workflow is defined in the table below (always inspect the workflow file for the latest version)
|
||||
|
||||
<!-- Table of Paramters -->
|
||||
| Parameter | Description | Default |
|
||||
|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
|
||||
| `runner_version` | The version of the [actions/runner](https://github.com/actions/runner) to use | `2.300.2` |
|
||||
| `docker_version` | The version of docker to use | `20.10.12` |
|
||||
| `runner_container_hooks_version` | The version of [actions/runner-container-hooks](https://github.com/actions/runner-container-hooks) to use | `0.2.0` |
|
||||
| `sha` | The commit sha from [actions/actions-runner-controller](https://github.com/actions/actions-runner-controller) to be used to build the runner images. This will be provided to `actions/checkout` & used to tag the container images | Empty string. |
|
||||
| `push_to_registries` | Whether to push the images to the registries. Use false to test the build | false |
|
||||
|
||||
**Automated steps:**
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
workflow["release-runners.yaml"] -- workflow_dispatch* --> workflow_b["release-runners.yaml"]
|
||||
subgraph repository: actions/actions-runner-controller
|
||||
runner_updates_check["arc-update-runners-scheduled.yaml"] -- "polls (daily)" --> runner_releases["actions/runner/releases"]
|
||||
runner_updates_check -- creates --> runner_update_pr["PR: update /runner/VERSION"]****
|
||||
runner_update_pr --> runner_update_pr_merge{{"merge"}}
|
||||
runner_update_pr_merge -- triggers --> workflow["release-runners.yaml"]
|
||||
end
|
||||
subgraph repository: actions-runner-controller/releases
|
||||
workflow_b["release-runners.yaml"] -- push --> A["GHCR: \n actions-runner-controller/actions-runner:* \n actions-runner-controller/actions-runner-dind:* \n actions-runner-controller/actions-runner-dind-rootless:*"]
|
||||
workflow_b["release-runners.yaml"] -- push --> B["DockerHub: \n summerwind/actions-runner:* \n summerwind/actions-runner-dind:* \n summerwind/actions-runner-dind-rootless:*"]
|
||||
event_b{{"workflow_dispatch"}} -- triggers --> workflow_b["release-runners.yaml"]
|
||||
end
|
||||
```
|
||||
|
||||
#### Release gha-runner-scale-set-controller image and helm charts
|
||||
|
||||
1. Make sure the master branch is stable and all CI jobs are passing
|
||||
1. Prepare a release PR (example: <https://github.com/actions/actions-runner-controller/pull/2467>)
|
||||
1. Bump up the version of the chart in: charts/gha-runner-scale-set-controller/Chart.yaml
|
||||
2. Bump up the version of the chart in: charts/gha-runner-scale-set/Chart.yaml
|
||||
1. Make sure that `version`, `appVersion` of both charts are always the same. These versions cannot diverge.
|
||||
3. Update the quickstart guide to reflect the latest versions: docs/preview/gha-runner-scale-set-controller/README.md
|
||||
4. Add changelog to the PR as well as the quickstart guide
|
||||
1. Merge the release PR
|
||||
1. Manually trigger the [(gha) Publish Helm Charts](https://github.com/actions/actions-runner-controller/actions/workflows/gha-publish-chart.yaml) workflow
|
||||
1. Manually create a tag and release in [actions/actions-runner-controller](https://github.com/actions/actions-runner-controller/releases) with the format: `gha-runner-scale-set-x.x.x` where the version (x.x.x) matches that of the Helm chart
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|-------------------------------------------------|--------------------------------------------------------------------------------------------------------|----------------|
|
||||
| `ref` | The branch, tag or SHA to cut a release from. | default branch |
|
||||
| `release_tag_name` | The tag of the controller image. This is not a git tag. | canary |
|
||||
| `push_to_registries` | Push images to registries. Use false to test the build process. | false |
|
||||
| `publish_gha_runner_scale_set_controller_chart` | Publish new helm chart for gha-runner-scale-set-controller. This will push the new OCI archive to GHCR | false |
|
||||
| `publish_gha_runner_scale_set_chart` | Publish new helm chart for gha-runner-scale-set. This will push the new OCI archive to GHCR | false |
|
||||
|
||||
#### Release actions/runner image
|
||||
|
||||
A new runner image is built and published to <https://github.com/actions/runner/pkgs/container/actions-runner> whenever a new runner binary has been released. There's nothing to do here.
|
||||
|
||||
#### Canary releases
|
||||
|
||||
We publish canary images for both the legacy actions-runner-controller and gha-runner-scale-set-controller images.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph org: actions
|
||||
event_a{{"push: [master]"}} -- triggers --> workflow_a["publish-canary.yaml"]
|
||||
end
|
||||
subgraph org: actions-runner-controller
|
||||
workflow_a["publish-canary.yaml"] -- triggers --> event_d{{"repository_dispatch"}} --> workflow_b["publish-canary.yaml"]
|
||||
workflow_b["publish-canary.yaml"] -- push --> A["GHCR: \nactions-runner-controller/actions-runner-controller:canary"]
|
||||
workflow_b["publish-canary.yaml"] -- push --> B["DockerHub: \nsummerwind/actions-runner-controller:canary"]
|
||||
end
|
||||
```
|
||||
|
||||
1. [actions-runner-controller canary image](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller)
|
||||
2. [gha-runner-scale-set-controller image](https://github.com/actions/actions-runner-controller/pkgs/container/gha-runner-scale-set-controller)
|
||||
|
||||
These canary images are automatically built and released on each push to the master branch.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build the manager binary
|
||||
FROM --platform=$BUILDPLATFORM golang:1.19.4 as builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.20.7 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
@@ -37,8 +37,10 @@ RUN --mount=target=. \
|
||||
--mount=type=cache,mode=0777,target=${GOCACHE} \
|
||||
export GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOARM=${TARGETVARIANT#v} && \
|
||||
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}'" -o /out/manager main.go && \
|
||||
go build -trimpath -ldflags="-s -w" -o /out/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener && \
|
||||
go build -trimpath -ldflags="-s -w" -o /out/github-webhook-server ./cmd/githubwebhookserver && \
|
||||
go build -trimpath -ldflags="-s -w" -o /out/actions-metrics-server ./cmd/actionsmetricsserver
|
||||
go build -trimpath -ldflags="-s -w" -o /out/actions-metrics-server ./cmd/actionsmetricsserver && \
|
||||
go build -trimpath -ldflags="-s -w" -o /out/sleep ./cmd/sleep
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
@@ -49,6 +51,8 @@ WORKDIR /
|
||||
COPY --from=builder /out/manager .
|
||||
COPY --from=builder /out/github-webhook-server .
|
||||
COPY --from=builder /out/actions-metrics-server .
|
||||
COPY --from=builder /out/github-runnerscaleset-listener .
|
||||
COPY --from=builder /out/sleep .
|
||||
|
||||
USER 65532:65532
|
||||
|
||||
|
||||
101
Makefile
101
Makefile
@@ -1,11 +1,11 @@
|
||||
ifdef DOCKER_USER
|
||||
NAME ?= ${DOCKER_USER}/actions-runner-controller
|
||||
DOCKER_IMAGE_NAME ?= ${DOCKER_USER}/actions-runner-controller
|
||||
else
|
||||
NAME ?= summerwind/actions-runner-controller
|
||||
DOCKER_IMAGE_NAME ?= summerwind/actions-runner-controller
|
||||
endif
|
||||
DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1)
|
||||
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
||||
VERSION ?= dev
|
||||
RUNNER_VERSION ?= 2.300.2
|
||||
RUNNER_VERSION ?= 2.308.0
|
||||
TARGETPLATFORM ?= $(shell arch)
|
||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||
RUNNER_TAG ?= ${VERSION}
|
||||
@@ -73,7 +73,7 @@ GO_TEST_ARGS ?= -short
|
||||
|
||||
# Run tests
|
||||
test: generate fmt vet manifests shellcheck
|
||||
go test $(GO_TEST_ARGS) ./... -coverprofile cover.out
|
||||
go test $(GO_TEST_ARGS) `go list ./... | grep -v ./test_e2e_arc` -coverprofile cover.out
|
||||
go test -fuzz=Fuzz -fuzztime=10s -run=Fuzz* ./controllers/actions.summerwind.net
|
||||
|
||||
test-with-deps: kube-apiserver etcd kubectl
|
||||
@@ -86,14 +86,21 @@ test-with-deps: kube-apiserver etcd kubectl
|
||||
# Build manager binary
|
||||
manager: generate fmt vet
|
||||
go build -o bin/manager main.go
|
||||
go build -o bin/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener
|
||||
|
||||
# Run against the configured Kubernetes cluster in ~/.kube/config
|
||||
run: generate fmt vet manifests
|
||||
go run ./main.go
|
||||
|
||||
run-scaleset: generate fmt vet
|
||||
CONTROLLER_MANAGER_POD_NAMESPACE=default \
|
||||
CONTROLLER_MANAGER_CONTAINER_IMAGE="${DOCKER_IMAGE_NAME}:${VERSION}" \
|
||||
go run -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=$(VERSION)'" \
|
||||
./main.go --auto-scaling-runner-set-only
|
||||
|
||||
# Install CRDs into a cluster
|
||||
install: manifests
|
||||
kustomize build config/crd | kubectl apply -f -
|
||||
kustomize build config/crd | kubectl apply --server-side -f -
|
||||
|
||||
# Uninstall CRDs from a cluster
|
||||
uninstall: manifests
|
||||
@@ -101,8 +108,8 @@ uninstall: manifests
|
||||
|
||||
# Deploy controller in the configured Kubernetes cluster in ~/.kube/config
|
||||
deploy: manifests
|
||||
cd config/manager && kustomize edit set image controller=${NAME}:${VERSION}
|
||||
kustomize build config/default | kubectl apply -f -
|
||||
cd config/manager && kustomize edit set image controller=${DOCKER_IMAGE_NAME}:${VERSION}
|
||||
kustomize build config/default | kubectl apply --server-side -f -
|
||||
|
||||
# Generate manifests e.g. CRD, RBAC etc.
|
||||
manifests: manifests-gen-crds chart-crds
|
||||
@@ -112,9 +119,75 @@ manifests-gen-crds: controller-gen yq
|
||||
for YAMLFILE in config/crd/bases/actions*.yaml; do \
|
||||
$(YQ) '.spec.preserveUnknownFields = false' --inplace "$$YAMLFILE" ; \
|
||||
done
|
||||
make manifests-gen-crds-fix DELETE_KEY=x-kubernetes-list-type
|
||||
make manifests-gen-crds-fix DELETE_KEY=x-kubernetes-list-map-keys
|
||||
|
||||
manifests-gen-crds-fix: DELETE_KEY ?=
|
||||
manifests-gen-crds-fix:
|
||||
#runners
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.containers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.sidecarContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.dockerdContainerResources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
#runnerreplicasets
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.sidecarContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.dockerdContainerResources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
#runnerdeployments
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.sidecarContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.dockerdContainerResources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
#runnersets
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.volumeClaimTemplates.items.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml
|
||||
#autoscalingrunnersets
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml
|
||||
#ehemeralrunnersets
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.containers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml
|
||||
# ephemeralrunners
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.containers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml
|
||||
|
||||
chart-crds:
|
||||
cp config/crd/bases/*.yaml charts/actions-runner-controller/crds/
|
||||
cp config/crd/bases/actions.github.com_autoscalingrunnersets.yaml charts/gha-runner-scale-set-controller/crds/
|
||||
cp config/crd/bases/actions.github.com_autoscalinglisteners.yaml charts/gha-runner-scale-set-controller/crds/
|
||||
cp config/crd/bases/actions.github.com_ephemeralrunnersets.yaml charts/gha-runner-scale-set-controller/crds/
|
||||
cp config/crd/bases/actions.github.com_ephemeralrunners.yaml charts/gha-runner-scale-set-controller/crds/
|
||||
rm charts/actions-runner-controller/crds/actions.github.com_autoscalingrunnersets.yaml
|
||||
rm charts/actions-runner-controller/crds/actions.github.com_autoscalinglisteners.yaml
|
||||
rm charts/actions-runner-controller/crds/actions.github.com_ephemeralrunnersets.yaml
|
||||
rm charts/actions-runner-controller/crds/actions.github.com_ephemeralrunners.yaml
|
||||
|
||||
# Run go fmt against code
|
||||
fmt:
|
||||
@@ -130,7 +203,7 @@ generate: controller-gen
|
||||
|
||||
# Run shellcheck on runner scripts
|
||||
shellcheck: shellcheck-install
|
||||
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh
|
||||
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh hack/*.sh
|
||||
|
||||
docker-buildx:
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled ;\
|
||||
@@ -142,18 +215,18 @@ docker-buildx:
|
||||
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
||||
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
||||
--build-arg VERSION=${VERSION} \
|
||||
-t "${NAME}:${VERSION}" \
|
||||
-t "${DOCKER_IMAGE_NAME}:${VERSION}" \
|
||||
-f Dockerfile \
|
||||
. ${PUSH_ARG}
|
||||
|
||||
# Push the docker image
|
||||
docker-push:
|
||||
docker push ${NAME}:${VERSION}
|
||||
docker push ${DOCKER_IMAGE_NAME}:${VERSION}
|
||||
docker push ${RUNNER_NAME}:${RUNNER_TAG}
|
||||
|
||||
# Generate the release manifest file
|
||||
release: manifests
|
||||
cd config/manager && kustomize edit set image controller=${NAME}:${VERSION}
|
||||
cd config/manager && kustomize edit set image controller=${DOCKER_IMAGE_NAME}:${VERSION}
|
||||
mkdir -p release
|
||||
kustomize build config/default > release/actions-runner-controller.yaml
|
||||
|
||||
@@ -177,7 +250,7 @@ acceptance/kind:
|
||||
# Otherwise `load docker-image` fail while running `docker save`.
|
||||
# See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap
|
||||
acceptance/load:
|
||||
kind load docker-image ${NAME}:${VERSION} --name ${CLUSTER}
|
||||
kind load docker-image ${DOCKER_IMAGE_NAME}:${VERSION} --name ${CLUSTER}
|
||||
kind load docker-image quay.io/brancz/kube-rbac-proxy:$(KUBE_RBAC_PROXY_VERSION) --name ${CLUSTER}
|
||||
kind load docker-image ${RUNNER_NAME}:${RUNNER_TAG} --name ${CLUSTER}
|
||||
kind load docker-image docker:dind --name ${CLUSTER}
|
||||
@@ -207,7 +280,7 @@ acceptance/teardown:
|
||||
kind delete cluster --name ${CLUSTER}
|
||||
|
||||
acceptance/deploy:
|
||||
NAME=${NAME} DOCKER_USER=${DOCKER_USER} VERSION=${VERSION} RUNNER_NAME=${RUNNER_NAME} RUNNER_TAG=${RUNNER_TAG} TEST_REPO=${TEST_REPO} \
|
||||
DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME} DOCKER_USER=${DOCKER_USER} VERSION=${VERSION} RUNNER_NAME=${RUNNER_NAME} RUNNER_TAG=${RUNNER_TAG} TEST_REPO=${TEST_REPO} \
|
||||
TEST_ORG=${TEST_ORG} TEST_ORG_REPO=${TEST_ORG_REPO} SYNC_PERIOD=${SYNC_PERIOD} \
|
||||
USE_RUNNERSET=${USE_RUNNERSET} \
|
||||
TEST_EPHEMERAL=${TEST_EPHEMERAL} \
|
||||
|
||||
12
PROJECT
12
PROJECT
@@ -10,4 +10,16 @@ resources:
|
||||
- group: actions
|
||||
kind: RunnerDeployment
|
||||
version: v1alpha1
|
||||
- group: actions
|
||||
kind: AutoscalingRunnerSet
|
||||
version: v1alpha1
|
||||
- group: actions
|
||||
kind: EphemeralRunnerSet
|
||||
version: v1alpha1
|
||||
- group: actions
|
||||
kind: EphemeralRunner
|
||||
version: v1alpha1
|
||||
- group: actions
|
||||
kind: AutoscalingListener
|
||||
version: v1alpha1
|
||||
version: "2"
|
||||
|
||||
48
README.md
48
README.md
@@ -4,42 +4,40 @@
|
||||
[](https://github.com/jonico/awesome-runners)
|
||||
[](https://artifacthub.io/packages/search?repo=actions-runner-controller)
|
||||
|
||||
## About
|
||||
|
||||
Actions Runner Controller (ARC) is a Kubernetes operator that orchestrates and scales self-hosted runners for GitHub Actions.
|
||||
|
||||
With ARC, you can create runner scale sets that automatically scale based on the number of workflows running in your repository, organization, or enterprise. Because controlled runners can be ephemeral and based on containers, new runner instances can scale up or down rapidly and cleanly. For more information about autoscaling, see ["Autoscaling with self-hosted runners."](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/autoscaling-with-self-hosted-runners)
|
||||
|
||||
You can set up ARC on Kubernetes using Helm, then create and run a workflow that uses runner scale sets. For more information about runner scale sets, see ["Deploying runner scale sets with Actions Runner Controller."](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller#runner-scale-set)
|
||||
## People
|
||||
|
||||
`actions-runner-controller` is an open-source project currently developed and maintained in collaboration with maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions), mostly in their spare time.
|
||||
Actions Runner Controller (ARC) is an open-source project currently developed and maintained in collaboration with the GitHub Actions team, external maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions).
|
||||
|
||||
If you think the project is awesome and it's becoming a basis for your important business, consider [sponsoring us](https://github.com/sponsors/actions-runner-controller)!
|
||||
If you think the project is awesome and is adding value to your business, please consider directly sponsoring [community maintainers](https://github.com/sponsors/actions-runner-controller) and individual contributors via GitHub Sponsors.
|
||||
|
||||
In case you are already the employer of one of contributors, sponsoring via GitHub Sponsors might not be an option. Just support them in other means!
|
||||
|
||||
We don't currently have [any sponsors dedicated to this project yet](https://github.com/sponsors/actions-runner-controller).
|
||||
|
||||
However, [HelloFresh](https://www.hellofreshgroup.com/en/) has recently started sponsoring @mumoshu for this project along with his other works. A part of their sponsorship will enable @mumoshu to add an E2E test to keep ARC even more reliable on AWS. Thank you for your sponsorship!
|
||||
|
||||
[<img src="https://user-images.githubusercontent.com/22009/170898715-07f02941-35ec-418b-8cd4-251b422fa9ac.png" width="219" height="71" />](https://careers.hellofresh.com/)
|
||||
|
||||
## Status
|
||||
|
||||
Even though actions-runner-controller is used in production environments, it is still in its early stage of development, hence versioned 0.x.
|
||||
|
||||
actions-runner-controller complies to Semantic Versioning 2.0.0 in which v0.x means that there could be backward-incompatible changes for every release.
|
||||
|
||||
The documentation is kept inline with master@HEAD, we do our best to highlight any features that require a specific ARC version or higher however this is not always easily done due to there being many moving parts. Additionally, we actively do not retain compatibly with every GitHub Enterprise Server version nor every Kubernetes version so you will need to ensure you stay current within a reasonable timespan.
|
||||
|
||||
## About
|
||||
|
||||
[GitHub Actions](https://github.com/features/actions) is a very useful tool for automating development. GitHub Actions jobs are run in the cloud by default, but you may want to run your jobs in your environment. [Self-hosted runner](https://github.com/actions/runner) can be used for such use cases, but requires the provisioning and configuration of a virtual machine instance. Instead if you already have a Kubernetes cluster, it makes more sense to run the self-hosted runner on top of it.
|
||||
|
||||
**actions-runner-controller** makes that possible. Just create a *Runner* resource on your Kubernetes, and it will run and operate the self-hosted runner for the specified repository. Combined with Kubernetes RBAC, you can also build simple Self-hosted runners as a Service.
|
||||
See [the sponsorship dashboard](https://github.com/sponsors/actions-runner-controller) for the former and the current sponsors.
|
||||
|
||||
## Getting Started
|
||||
To give ARC a try with just a handful of commands, Please refer to the [Quickstart guide](/docs/quickstart.md).
|
||||
|
||||
For an overview of ARC, please refer to [About ARC](https://github.com/actions/actions-runner-controller/blob/master/docs/about-arc.md)
|
||||
To give ARC a try with just a handful of commands, Please refer to the [Quickstart guide](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller).
|
||||
|
||||
For more information, please refer to detailed documentation below!
|
||||
For an overview of ARC, please refer to [About ARC](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/about-actions-runner-controller)
|
||||
|
||||
## Documentation
|
||||
With the introduction of [autoscaling runner scale sets](https://github.com/actions/actions-runner-controller/discussions/2775), the existing [autoscaling modes](./docs/automatically-scaling-runners.md) are now legacy. The legacy modes have certain use cases and will continue to be maintained by the community only.
|
||||
|
||||
For further information on what is supported by GitHub and what's managed by the community, please refer to [this announcement discussion.](https://github.com/actions/actions-runner-controller/discussions/2775)
|
||||
|
||||
### Documentation
|
||||
|
||||
ARC documentation is available on [docs.github.com](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller).
|
||||
|
||||
### Legacy documentation
|
||||
|
||||
The following documentation is for the legacy autoscaling modes that continue to be maintained by the community
|
||||
|
||||
- [Quickstart guide](/docs/quickstart.md)
|
||||
- [About ARC](/docs/about-arc.md)
|
||||
|
||||
@@ -17,8 +17,8 @@
|
||||
|
||||
A list of tools which are helpful for troubleshooting
|
||||
|
||||
* https://github.com/rewanthtammana/kubectl-fields Kubernetes resources hierarchy parsing tool
|
||||
* https://github.com/stern/stern Multi pod and container log tailing for Kubernetes
|
||||
* [Kubernetes resources hierarchy parsing tool `kubectl-fields`](https://github.com/rewanthtammana/kubectl-fields)
|
||||
* [Multi pod and container log tailing for Kubernetes `stern`](https://github.com/stern/stern)
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -30,7 +30,7 @@ Troubeshooting runbooks that relate to ARC installation problems
|
||||
|
||||
This issue can come up for various reasons like leftovers from previous installations or not being able to access the K8s service's clusterIP associated with the admission webhook server (of ARC).
|
||||
|
||||
```
|
||||
```text
|
||||
Internal error occurred: failed calling webhook "mutate.runnerdeployment.actions.summerwind.dev":
|
||||
Post "https://actions-runner-controller-webhook.actions-runner-system.svc:443/mutate-actions-summerwind-dev-v1alpha1-runnerdeployment?timeout=10s": context deadline exceeded
|
||||
```
|
||||
@@ -39,22 +39,24 @@ Post "https://actions-runner-controller-webhook.actions-runner-system.svc:443/mu
|
||||
|
||||
First we will try the common solution of checking webhook leftovers from previous installations:
|
||||
|
||||
1. ```bash
|
||||
kubectl get validatingwebhookconfiguration -A
|
||||
kubectl get mutatingwebhookconfiguration -A
|
||||
```
|
||||
2. If you see any webhooks related to actions-runner-controller, delete them:
|
||||
1. ```bash
|
||||
kubectl get validatingwebhookconfiguration -A
|
||||
kubectl get mutatingwebhookconfiguration -A
|
||||
```
|
||||
|
||||
2. If you see any webhooks related to actions-runner-controller, delete them:
|
||||
|
||||
```bash
|
||||
kubectl delete mutatingwebhookconfiguration actions-runner-controller-mutating-webhook-configuration
|
||||
kubectl delete validatingwebhookconfiguration actions-runner-controller-validating-webhook-configuration
|
||||
```
|
||||
|
||||
If that didn't work then probably your K8s control-plane is somehow unable to access the K8s service's clusterIP associated with the admission webhook server:
|
||||
|
||||
1. You're running apiserver as a binary and you didn't make service cluster IPs available to the host network.
|
||||
2. You're running the apiserver in the pod but your pod network (i.e. CNI plugin installation and config) is not good so your pods(like kube-apiserver) in the K8s control-plane nodes can't access ARC's admission webhook server pod(s) in probably data-plane nodes.
|
||||
|
||||
|
||||
Another reason could be due to GKEs firewall settings you may run into the following errors when trying to deploy runners on a private GKE cluster:
|
||||
Another reason could be due to GKEs firewall settings you may run into the following errors when trying to deploy runners on a private GKE cluster:
|
||||
|
||||
To fix this, you may either:
|
||||
|
||||
@@ -65,7 +67,7 @@ To fix this, you may either:
|
||||
# With helm, you'd set `webhookPort` to the port number of your choice
|
||||
# See https://github.com/actions/actions-runner-controller/pull/1410/files for more information
|
||||
helm upgrade --install --namespace actions-runner-system --create-namespace \
|
||||
--wait actions-runner-controller actions/actions-runner-controller \
|
||||
--wait actions-runner-controller actions-runner-controller/actions-runner-controller \
|
||||
--set webhookPort=10250
|
||||
```
|
||||
|
||||
@@ -93,7 +95,7 @@ To fix this, you may either:
|
||||
**Problem**
|
||||
|
||||
```json
|
||||
2020-11-12T22:17:30.693Z ERROR controller-runtime.controller Reconciler error
|
||||
2020-11-12T22:17:30.693Z ERROR controller-runtime.controller Reconciler error
|
||||
{
|
||||
"controller": "runner",
|
||||
"request": "actions-runner-system/runner-deployment-dk7q8-dk5c9",
|
||||
@@ -104,6 +106,7 @@ To fix this, you may either:
|
||||
**Solution**
|
||||
|
||||
Your base64'ed PAT token has a new line at the end, it needs to be created without a `\n` added, either:
|
||||
|
||||
* `echo -n $TOKEN | base64`
|
||||
* Create the secret as described in the docs using the shell and documented flags
|
||||
|
||||
@@ -111,7 +114,7 @@ Your base64'ed PAT token has a new line at the end, it needs to be created witho
|
||||
|
||||
**Problem**
|
||||
|
||||
```
|
||||
```text
|
||||
Error: UPGRADE FAILED: failed to create resource: Internal error occurred: failed calling webhook "webhook.cert-manager.io": failed to call webhook: Post "https://cert-manager-webhook.cert-manager.svc:443/mutate?timeout=10s": x509: certificate signed by unknown authority
|
||||
```
|
||||
|
||||
@@ -119,7 +122,7 @@ Apparently, it's failing while `helm` is creating one of resources defined in th
|
||||
|
||||
You'd try to tail logs from the `cert-manager-cainjector` and see it's failing with an error like:
|
||||
|
||||
```
|
||||
```text
|
||||
$ kubectl -n cert-manager logs cert-manager-cainjector-7cdbb9c945-g6bt4
|
||||
I0703 03:31:55.159339 1 start.go:91] "starting" version="v1.1.1" revision="3ac7418070e22c87fae4b22603a6b952f797ae96"
|
||||
I0703 03:31:55.615061 1 leaderelection.go:243] attempting to acquire leader lease kube-system/cert-manager-cainjector-leader-election...
|
||||
@@ -137,7 +140,7 @@ Your cluster is based on a new enough Kubernetes of version 1.22 or greater whic
|
||||
|
||||
In many cases, it's not an option to downgrade Kubernetes. So, just upgrade `cert-manager` to a more recent version that does have have the support for the specific Kubernetes version you're using.
|
||||
|
||||
See https://cert-manager.io/docs/installation/supported-releases/ for the list of available cert-manager versions.
|
||||
See <https://cert-manager.io/docs/installation/supported-releases/> for the list of available cert-manager versions.
|
||||
|
||||
## Operations
|
||||
|
||||
@@ -153,7 +156,7 @@ Sometimes either the runner kind (`kubectl get runners`) or it's underlying pod
|
||||
|
||||
Remove the finaliser from the relevent runner kind or pod
|
||||
|
||||
```
|
||||
```text
|
||||
# Get all kind runners and remove the finalizer
|
||||
$ kubectl get runners --no-headers | awk {'print $1'} | xargs kubectl patch runner --type merge -p '{"metadata":{"finalizers":null}}'
|
||||
|
||||
@@ -195,7 +198,7 @@ spec:
|
||||
If you're running your action runners on a service mesh like Istio, you might
|
||||
have problems with runner configuration accompanied by logs like:
|
||||
|
||||
```
|
||||
```text
|
||||
....
|
||||
runner Starting Runner listener with startup type: service
|
||||
runner Started listener process
|
||||
@@ -210,7 +213,7 @@ configuration script tries to communicate with the network.
|
||||
|
||||
More broadly, there are many other circumstances where the runner pod coming up first can cause issues.
|
||||
|
||||
**Solution**<br />
|
||||
**Solution**
|
||||
|
||||
> Added originally to help users with older istio instances.
|
||||
> Newer Istio instances can use Istio's `holdApplicationUntilProxyStarts` attribute ([istio/istio#11130](https://github.com/istio/istio/issues/11130)) to avoid having to delay starting up the runner.
|
||||
@@ -232,7 +235,7 @@ spec:
|
||||
value: "5"
|
||||
```
|
||||
|
||||
## Outgoing network action hangs indefinitely
|
||||
### Outgoing network action hangs indefinitely
|
||||
|
||||
**Problem**
|
||||
|
||||
@@ -278,9 +281,9 @@ spec:
|
||||
```
|
||||
|
||||
You can read the discussion regarding this issue in
|
||||
(#1406)[https://github.com/actions/actions-runner-controller/issues/1046].
|
||||
[#1406](https://github.com/actions/actions-runner-controller/issues/1046).
|
||||
|
||||
## Unable to scale to zero with TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||
### Unable to scale to zero with TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||
|
||||
**Problem**
|
||||
|
||||
@@ -292,7 +295,7 @@ You very likely have some dangling workflow jobs stuck in `queued` or `in_progre
|
||||
|
||||
Manually call [the "list workflow runs" API](https://docs.github.com/en/rest/actions/workflow-runs#list-workflow-runs-for-a-repository), and [remove the dangling workflow job(s)](https://docs.github.com/en/rest/actions/workflow-runs#delete-a-workflow-run).
|
||||
|
||||
## Slow / failure to boot dind sidecar (default runner)
|
||||
### Slow / failure to boot dind sidecar (default runner)
|
||||
|
||||
**Problem**
|
||||
|
||||
@@ -300,4 +303,28 @@ If you noticed that it takes several minutes for sidecar dind container to be cr
|
||||
|
||||
**Solution**
|
||||
|
||||
The solution is to switch to using faster storage, if you are experiencing this issue you are probably using hdd, switch to ssh fixed the problem in my case. Most cloud providers have a list of storage options to use just pick something faster that your current disk, for on prem clusters you will need to invest in some ssds.
|
||||
The solution is to switch to using faster storage, if you are experiencing this issue you are probably using HDD storage. Switching to SSD storage fixed the problem in my case. Most cloud providers have a list of storage options to use just pick something faster that your current disk, for on prem clusters you will need to invest in some SSDs.
|
||||
|
||||
### Dockerd no space left on device
|
||||
|
||||
**Problem**
|
||||
|
||||
If you are running many containers on your runner you might encounter an issue where docker daemon is unable to start new containers and you see error `no space left on device`.
|
||||
|
||||
**Solution**
|
||||
|
||||
Add a `dockerVarRunVolumeSizeLimit` key in your runner's spec with a higher size limit (the default is 1M) For instance:
|
||||
|
||||
```yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
name: github-runner
|
||||
namespace: github-system
|
||||
spec:
|
||||
replicas: 6
|
||||
template:
|
||||
spec:
|
||||
dockerVarRunVolumeSizeLimit: 50M
|
||||
env: []
|
||||
```
|
||||
@@ -35,7 +35,7 @@ else
|
||||
echo 'Skipped deploying secret "github-webhook-server". Set WEBHOOK_GITHUB_TOKEN to deploy.' 1>&2
|
||||
fi
|
||||
|
||||
if [ -n "${WEBHOOK_GITHUB_TOKEN}" ]; then
|
||||
if [ -n "${WEBHOOK_GITHUB_TOKEN}" ] && [ -z "${CREATE_SECRETS_USING_HELM}" ]; then
|
||||
kubectl -n actions-runner-system delete secret \
|
||||
actions-metrics-server || :
|
||||
kubectl -n actions-runner-system create secret generic \
|
||||
@@ -61,6 +61,9 @@ if [ "${tool}" == "helm" ]; then
|
||||
flags+=( --set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
||||
flags+=( --set actionsMetricsServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
||||
fi
|
||||
if [ "${WATCH_NAMESPACE}" != "" ]; then
|
||||
flags+=( --set watchNamespace=${WATCH_NAMESPACE} --set singleNamespace=true)
|
||||
fi
|
||||
if [ "${CHART_VERSION}" != "" ]; then
|
||||
flags+=( --version ${CHART_VERSION})
|
||||
fi
|
||||
@@ -69,6 +72,21 @@ if [ "${tool}" == "helm" ]; then
|
||||
flags+=( --set githubWebhookServer.logFormat=${LOG_FORMAT})
|
||||
flags+=( --set actionsMetricsServer.logFormat=${LOG_FORMAT})
|
||||
fi
|
||||
if [ "${ADMISSION_WEBHOOKS_TIMEOUT}" != "" ]; then
|
||||
flags+=( --set admissionWebHooks.timeoutSeconds=${ADMISSION_WEBHOOKS_TIMEOUT})
|
||||
fi
|
||||
if [ -n "${CREATE_SECRETS_USING_HELM}" ]; then
|
||||
if [ -z "${WEBHOOK_GITHUB_TOKEN}" ]; then
|
||||
echo 'Failed deploying secret "actions-metrics-server" using helm. Set WEBHOOK_GITHUB_TOKEN to deploy.' 1>&2
|
||||
exit 1
|
||||
fi
|
||||
flags+=( --set actionsMetricsServer.secret.create=true)
|
||||
flags+=( --set actionsMetricsServer.secret.github_token=${WEBHOOK_GITHUB_TOKEN})
|
||||
fi
|
||||
if [ -n "${GITHUB_WEBHOOK_SERVER_ENV_NAME}" ] && [ -n "${GITHUB_WEBHOOK_SERVER_ENV_VALUE}" ]; then
|
||||
flags+=( --set githubWebhookServer.env[0].name=${GITHUB_WEBHOOK_SERVER_ENV_NAME})
|
||||
flags+=( --set githubWebhookServer.env[0].value=${GITHUB_WEBHOOK_SERVER_ENV_VALUE})
|
||||
fi
|
||||
|
||||
set -vx
|
||||
|
||||
@@ -84,6 +102,7 @@ if [ "${tool}" == "helm" ]; then
|
||||
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
|
||||
--set actionsMetricsServer.podAnnotations.test-id=${TEST_ID} \
|
||||
${flags[@]} --set image.imagePullPolicy=${IMAGE_PULL_POLICY} \
|
||||
--set image.dindSidecarRepositoryAndTag=${DIND_SIDECAR_REPOSITORY_AND_TAG} \
|
||||
-f ${VALUES_FILE}
|
||||
set +v
|
||||
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`
|
||||
|
||||
@@ -6,6 +6,10 @@ OP=${OP:-apply}
|
||||
|
||||
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
||||
|
||||
# See https://github.com/actions/actions-runner-controller/issues/2123
|
||||
kubectl delete secret generic docker-config || :
|
||||
kubectl create secret generic docker-config --from-file .dockerconfigjson=<(jq -M 'del(.aliases)' $HOME/.docker/config.json) --type=kubernetes.io/dockerconfigjson || :
|
||||
|
||||
cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=${RUNNER_NAMESPACE} envsubst | kubectl apply -f -
|
||||
|
||||
if [ -n "${TEST_REPO}" ]; then
|
||||
|
||||
27
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
27
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
@@ -95,6 +95,24 @@ spec:
|
||||
# that part is created by dockerd.
|
||||
mountPath: /home/runner/.local
|
||||
readOnly: false
|
||||
# See https://github.com/actions/actions-runner-controller/issues/2123
|
||||
# Be sure to omit the "aliases" field from the config.json.
|
||||
# Otherwise you may encounter nasty errors like:
|
||||
# $ docker build
|
||||
# docker: 'buildx' is not a docker command.
|
||||
# See 'docker --help'
|
||||
# due to the incompatibility between your host docker config.json and the runner environment.
|
||||
# That is, your host dockcer config.json might contain this:
|
||||
# "aliases": {
|
||||
# "builder": "buildx"
|
||||
# }
|
||||
# And this results in the above error when the runner does not have buildx installed yet.
|
||||
- name: docker-config
|
||||
mountPath: /home/runner/.docker/config.json
|
||||
subPath: config.json
|
||||
readOnly: true
|
||||
- name: docker-config-root
|
||||
mountPath: /home/runner/.docker
|
||||
volumes:
|
||||
- name: rootless-dind-work-dir
|
||||
ephemeral:
|
||||
@@ -105,6 +123,15 @@ spec:
|
||||
resources:
|
||||
requests:
|
||||
storage: 3Gi
|
||||
- name: docker-config
|
||||
# Refer to .dockerconfigjson/.docker/config.json
|
||||
secret:
|
||||
secretName: docker-config
|
||||
items:
|
||||
- key: .dockerconfigjson
|
||||
path: config.json
|
||||
- name: docker-config-root
|
||||
emptyDir: {}
|
||||
|
||||
#
|
||||
# Non-standard working directory
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
# Title
|
||||
|
||||
<!-- ADR titles should typically be imperative sentences. -->
|
||||
|
||||
**Status**: (Proposed|Accepted|Rejected|Superceded|Deprecated)
|
||||
|
||||
## Context
|
||||
|
||||
*What is the issue or background knowledge necessary for future readers
|
||||
to understand why this ADR was written?*
|
||||
|
||||
## Decision
|
||||
|
||||
**What** is the change being proposed? / **How** will it be implemented?*
|
||||
|
||||
## Consequences
|
||||
|
||||
*What becomes easier or more difficult to do because of this change?*
|
||||
@@ -0,0 +1,97 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// AutoscalingListenerSpec defines the desired state of AutoscalingListener
|
||||
type AutoscalingListenerSpec struct {
|
||||
// Required
|
||||
GitHubConfigUrl string `json:"githubConfigUrl,omitempty"`
|
||||
|
||||
// Required
|
||||
GitHubConfigSecret string `json:"githubConfigSecret,omitempty"`
|
||||
|
||||
// Required
|
||||
RunnerScaleSetId int `json:"runnerScaleSetId,omitempty"`
|
||||
|
||||
// Required
|
||||
AutoscalingRunnerSetNamespace string `json:"autoscalingRunnerSetNamespace,omitempty"`
|
||||
|
||||
// Required
|
||||
AutoscalingRunnerSetName string `json:"autoscalingRunnerSetName,omitempty"`
|
||||
|
||||
// Required
|
||||
EphemeralRunnerSetName string `json:"ephemeralRunnerSetName,omitempty"`
|
||||
|
||||
// Required
|
||||
// +kubebuilder:validation:Minimum:=0
|
||||
MaxRunners int `json:"maxRunners,omitempty"`
|
||||
|
||||
// Required
|
||||
// +kubebuilder:validation:Minimum:=0
|
||||
MinRunners int `json:"minRunners,omitempty"`
|
||||
|
||||
// Required
|
||||
Image string `json:"image,omitempty"`
|
||||
|
||||
// Required
|
||||
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
|
||||
|
||||
// Required
|
||||
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
||||
|
||||
// +optional
|
||||
Proxy *ProxyConfig `json:"proxy,omitempty"`
|
||||
|
||||
// +optional
|
||||
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
|
||||
}
|
||||
|
||||
// AutoscalingListenerStatus defines the observed state of AutoscalingListener
|
||||
type AutoscalingListenerStatus struct{}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
//+kubebuilder:printcolumn:JSONPath=".spec.githubConfigUrl",name=GitHub Configure URL,type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=".spec.autoscalingRunnerSetNamespace",name=AutoscalingRunnerSet Namespace,type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=".spec.autoscalingRunnerSetName",name=AutoscalingRunnerSet Name,type=string
|
||||
|
||||
// AutoscalingListener is the Schema for the autoscalinglisteners API
|
||||
type AutoscalingListener struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec AutoscalingListenerSpec `json:"spec,omitempty"`
|
||||
Status AutoscalingListenerStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// AutoscalingListenerList contains a list of AutoscalingListener
|
||||
type AutoscalingListenerList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []AutoscalingListener `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&AutoscalingListener{}, &AutoscalingListenerList{})
|
||||
}
|
||||
289
apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go
Normal file
289
apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go
Normal file
@@ -0,0 +1,289 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/actions/actions-runner-controller/hash"
|
||||
"golang.org/x/net/http/httpproxy"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
//+kubebuilder:printcolumn:JSONPath=".spec.minRunners",name=Minimum Runners,type=integer
|
||||
//+kubebuilder:printcolumn:JSONPath=".spec.maxRunners",name=Maximum Runners,type=integer
|
||||
//+kubebuilder:printcolumn:JSONPath=".status.currentRunners",name=Current Runners,type=integer
|
||||
//+kubebuilder:printcolumn:JSONPath=".status.state",name=State,type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=".status.pendingEphemeralRunners",name=Pending Runners,type=integer
|
||||
//+kubebuilder:printcolumn:JSONPath=".status.runningEphemeralRunners",name=Running Runners,type=integer
|
||||
//+kubebuilder:printcolumn:JSONPath=".status.finishedEphemeralRunners",name=Finished Runners,type=integer
|
||||
//+kubebuilder:printcolumn:JSONPath=".status.deletingEphemeralRunners",name=Deleting Runners,type=integer
|
||||
|
||||
// AutoscalingRunnerSet is the Schema for the autoscalingrunnersets API
|
||||
type AutoscalingRunnerSet struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec AutoscalingRunnerSetSpec `json:"spec,omitempty"`
|
||||
Status AutoscalingRunnerSetStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// AutoscalingRunnerSetSpec defines the desired state of AutoscalingRunnerSet
|
||||
type AutoscalingRunnerSetSpec struct {
|
||||
// Required
|
||||
GitHubConfigUrl string `json:"githubConfigUrl,omitempty"`
|
||||
|
||||
// Required
|
||||
GitHubConfigSecret string `json:"githubConfigSecret,omitempty"`
|
||||
|
||||
// +optional
|
||||
RunnerGroup string `json:"runnerGroup,omitempty"`
|
||||
|
||||
// +optional
|
||||
RunnerScaleSetName string `json:"runnerScaleSetName,omitempty"`
|
||||
|
||||
// +optional
|
||||
Proxy *ProxyConfig `json:"proxy,omitempty"`
|
||||
|
||||
// +optional
|
||||
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
|
||||
|
||||
// Required
|
||||
Template corev1.PodTemplateSpec `json:"template,omitempty"`
|
||||
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum:=0
|
||||
MaxRunners *int `json:"maxRunners,omitempty"`
|
||||
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum:=0
|
||||
MinRunners *int `json:"minRunners,omitempty"`
|
||||
}
|
||||
|
||||
type GitHubServerTLSConfig struct {
|
||||
// Required
|
||||
CertificateFrom *TLSCertificateSource `json:"certificateFrom,omitempty"`
|
||||
}
|
||||
|
||||
func (c *GitHubServerTLSConfig) ToCertPool(keyFetcher func(name, key string) ([]byte, error)) (*x509.CertPool, error) {
|
||||
if c.CertificateFrom == nil {
|
||||
return nil, fmt.Errorf("certificateFrom not specified")
|
||||
}
|
||||
|
||||
if c.CertificateFrom.ConfigMapKeyRef == nil {
|
||||
return nil, fmt.Errorf("configMapKeyRef not specified")
|
||||
}
|
||||
|
||||
cert, err := keyFetcher(c.CertificateFrom.ConfigMapKeyRef.Name, c.CertificateFrom.ConfigMapKeyRef.Key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to fetch key %q in configmap %q: %w",
|
||||
c.CertificateFrom.ConfigMapKeyRef.Key,
|
||||
c.CertificateFrom.ConfigMapKeyRef.Name,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
systemPool, err := x509.SystemCertPool()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get system cert pool: %w", err)
|
||||
}
|
||||
|
||||
pool := systemPool.Clone()
|
||||
if !pool.AppendCertsFromPEM(cert) {
|
||||
return nil, fmt.Errorf("failed to parse certificate")
|
||||
}
|
||||
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
type TLSCertificateSource struct {
|
||||
// Required
|
||||
ConfigMapKeyRef *corev1.ConfigMapKeySelector `json:"configMapKeyRef,omitempty"`
|
||||
}
|
||||
|
||||
type ProxyConfig struct {
|
||||
// +optional
|
||||
HTTP *ProxyServerConfig `json:"http,omitempty"`
|
||||
|
||||
// +optional
|
||||
HTTPS *ProxyServerConfig `json:"https,omitempty"`
|
||||
|
||||
// +optional
|
||||
NoProxy []string `json:"noProxy,omitempty"`
|
||||
}
|
||||
|
||||
func (c *ProxyConfig) toHTTPProxyConfig(secretFetcher func(string) (*corev1.Secret, error)) (*httpproxy.Config, error) {
|
||||
config := &httpproxy.Config{
|
||||
NoProxy: strings.Join(c.NoProxy, ","),
|
||||
}
|
||||
|
||||
if c.HTTP != nil {
|
||||
u, err := url.Parse(c.HTTP.Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse proxy http url %q: %w", c.HTTP.Url, err)
|
||||
}
|
||||
|
||||
if c.HTTP.CredentialSecretRef != "" {
|
||||
secret, err := secretFetcher(c.HTTP.CredentialSecretRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to get secret %s for http proxy: %w",
|
||||
c.HTTP.CredentialSecretRef,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
u.User = url.UserPassword(
|
||||
string(secret.Data["username"]),
|
||||
string(secret.Data["password"]),
|
||||
)
|
||||
}
|
||||
|
||||
config.HTTPProxy = u.String()
|
||||
}
|
||||
|
||||
if c.HTTPS != nil {
|
||||
u, err := url.Parse(c.HTTPS.Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse proxy https url %q: %w", c.HTTPS.Url, err)
|
||||
}
|
||||
|
||||
if c.HTTPS.CredentialSecretRef != "" {
|
||||
secret, err := secretFetcher(c.HTTPS.CredentialSecretRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to get secret %s for https proxy: %w",
|
||||
c.HTTPS.CredentialSecretRef,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
u.User = url.UserPassword(
|
||||
string(secret.Data["username"]),
|
||||
string(secret.Data["password"]),
|
||||
)
|
||||
}
|
||||
|
||||
config.HTTPSProxy = u.String()
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func (c *ProxyConfig) ToSecretData(secretFetcher func(string) (*corev1.Secret, error)) (map[string][]byte, error) {
|
||||
config, err := c.toHTTPProxyConfig(secretFetcher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data := map[string][]byte{}
|
||||
data["http_proxy"] = []byte(config.HTTPProxy)
|
||||
data["https_proxy"] = []byte(config.HTTPSProxy)
|
||||
data["no_proxy"] = []byte(config.NoProxy)
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (c *ProxyConfig) ProxyFunc(secretFetcher func(string) (*corev1.Secret, error)) (func(*http.Request) (*url.URL, error), error) {
|
||||
config, err := c.toHTTPProxyConfig(secretFetcher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proxyFunc := func(req *http.Request) (*url.URL, error) {
|
||||
return config.ProxyFunc()(req.URL)
|
||||
}
|
||||
|
||||
return proxyFunc, nil
|
||||
}
|
||||
|
||||
type ProxyServerConfig struct {
|
||||
// Required
|
||||
Url string `json:"url,omitempty"`
|
||||
|
||||
// +optional
|
||||
CredentialSecretRef string `json:"credentialSecretRef,omitempty"`
|
||||
}
|
||||
|
||||
// AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet
|
||||
type AutoscalingRunnerSetStatus struct {
|
||||
// +optional
|
||||
CurrentRunners int `json:"currentRunners"`
|
||||
|
||||
// +optional
|
||||
State string `json:"state"`
|
||||
|
||||
// EphemeralRunner counts separated by the stage ephemeral runners are in, taken from the EphemeralRunnerSet
|
||||
|
||||
//+optional
|
||||
PendingEphemeralRunners int `json:"pendingEphemeralRunners"`
|
||||
// +optional
|
||||
RunningEphemeralRunners int `json:"runningEphemeralRunners"`
|
||||
// +optional
|
||||
FailedEphemeralRunners int `json:"failedEphemeralRunners"`
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) ListenerSpecHash() string {
|
||||
arsSpec := ars.Spec.DeepCopy()
|
||||
spec := arsSpec
|
||||
return hash.ComputeTemplateHash(&spec)
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) RunnerSetSpecHash() string {
|
||||
type runnerSetSpec struct {
|
||||
GitHubConfigUrl string
|
||||
GitHubConfigSecret string
|
||||
RunnerGroup string
|
||||
RunnerScaleSetName string
|
||||
Proxy *ProxyConfig
|
||||
GitHubServerTLS *GitHubServerTLSConfig
|
||||
Template corev1.PodTemplateSpec
|
||||
}
|
||||
spec := &runnerSetSpec{
|
||||
GitHubConfigUrl: ars.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: ars.Spec.GitHubConfigSecret,
|
||||
RunnerGroup: ars.Spec.RunnerGroup,
|
||||
RunnerScaleSetName: ars.Spec.RunnerScaleSetName,
|
||||
Proxy: ars.Spec.Proxy,
|
||||
GitHubServerTLS: ars.Spec.GitHubServerTLS,
|
||||
Template: ars.Spec.Template,
|
||||
}
|
||||
return hash.ComputeTemplateHash(&spec)
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// AutoscalingRunnerSetList contains a list of AutoscalingRunnerSet
|
||||
type AutoscalingRunnerSetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []AutoscalingRunnerSet `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&AutoscalingRunnerSet{}, &AutoscalingRunnerSetList{})
|
||||
}
|
||||
133
apis/actions.github.com/v1alpha1/ephemeralrunner_types.go
Normal file
133
apis/actions.github.com/v1alpha1/ephemeralrunner_types.go
Normal file
@@ -0,0 +1,133 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.githubConfigUrl",name="GitHub Config URL",type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.runnerId",name=RunnerId,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.jobRepositoryName",name=JobRepository,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.jobWorkflowRef",name=JobWorkflowRef,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.workflowRunId",name=WorkflowRunId,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.jobDisplayName",name=JobDisplayName,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.message",name=Message,type=string
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||
|
||||
// EphemeralRunner is the Schema for the ephemeralrunners API
|
||||
type EphemeralRunner struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec EphemeralRunnerSpec `json:"spec,omitempty"`
|
||||
Status EphemeralRunnerStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// EphemeralRunnerSpec defines the desired state of EphemeralRunner
|
||||
type EphemeralRunnerSpec struct {
|
||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
// +required
|
||||
GitHubConfigUrl string `json:"githubConfigUrl,omitempty"`
|
||||
|
||||
// +required
|
||||
GitHubConfigSecret string `json:"githubConfigSecret,omitempty"`
|
||||
|
||||
// +required
|
||||
RunnerScaleSetId int `json:"runnerScaleSetId,omitempty"`
|
||||
|
||||
// +optional
|
||||
Proxy *ProxyConfig `json:"proxy,omitempty"`
|
||||
|
||||
// +optional
|
||||
ProxySecretRef string `json:"proxySecretRef,omitempty"`
|
||||
|
||||
// +optional
|
||||
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
|
||||
|
||||
// +required
|
||||
corev1.PodTemplateSpec `json:",inline"`
|
||||
}
|
||||
|
||||
// EphemeralRunnerStatus defines the observed state of EphemeralRunner
|
||||
type EphemeralRunnerStatus struct {
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
// Turns true only if the runner is online.
|
||||
// +optional
|
||||
Ready bool `json:"ready"`
|
||||
// Phase describes phases where EphemeralRunner can be in.
|
||||
// The underlying type is a PodPhase, but the meaning is more restrictive
|
||||
//
|
||||
// The PodFailed phase should be set only when EphemeralRunner fails to start
|
||||
// after multiple retries. That signals that this EphemeralRunner won't work,
|
||||
// and manual inspection is required
|
||||
//
|
||||
// The PodSucceded phase should be set only when confirmed that EphemeralRunner
|
||||
// actually executed the job and has been removed from the service.
|
||||
// +optional
|
||||
Phase corev1.PodPhase `json:"phase,omitempty"`
|
||||
// +optional
|
||||
Reason string `json:"reason,omitempty"`
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
|
||||
// +optional
|
||||
RunnerId int `json:"runnerId,omitempty"`
|
||||
// +optional
|
||||
RunnerName string `json:"runnerName,omitempty"`
|
||||
// +optional
|
||||
RunnerJITConfig string `json:"runnerJITConfig,omitempty"`
|
||||
|
||||
// +optional
|
||||
Failures map[string]bool `json:"failures,omitempty"`
|
||||
|
||||
// +optional
|
||||
JobRequestId int64 `json:"jobRequestId,omitempty"`
|
||||
|
||||
// +optional
|
||||
JobRepositoryName string `json:"jobRepositoryName,omitempty"`
|
||||
|
||||
// +optional
|
||||
JobWorkflowRef string `json:"jobWorkflowRef,omitempty"`
|
||||
|
||||
// +optional
|
||||
WorkflowRunId int64 `json:"workflowRunId,omitempty"`
|
||||
|
||||
// +optional
|
||||
JobDisplayName string `json:"jobDisplayName,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// EphemeralRunnerList contains a list of EphemeralRunner
|
||||
type EphemeralRunnerList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []EphemeralRunner `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&EphemeralRunner{}, &EphemeralRunnerList{})
|
||||
}
|
||||
75
apis/actions.github.com/v1alpha1/ephemeralrunnerset_types.go
Normal file
75
apis/actions.github.com/v1alpha1/ephemeralrunnerset_types.go
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// EphemeralRunnerSetSpec defines the desired state of EphemeralRunnerSet
|
||||
type EphemeralRunnerSetSpec struct {
|
||||
// Replicas is the number of desired EphemeralRunner resources in the k8s namespace.
|
||||
Replicas int `json:"replicas,omitempty"`
|
||||
|
||||
EphemeralRunnerSpec EphemeralRunnerSpec `json:"ephemeralRunnerSpec,omitempty"`
|
||||
}
|
||||
|
||||
// EphemeralRunnerSetStatus defines the observed state of EphemeralRunnerSet
|
||||
type EphemeralRunnerSetStatus struct {
|
||||
// CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet.
|
||||
CurrentReplicas int `json:"currentReplicas"`
|
||||
|
||||
// EphemeralRunner counts separated by the stage ephemeral runners are in
|
||||
|
||||
// +optional
|
||||
PendingEphemeralRunners int `json:"pendingEphemeralRunners"`
|
||||
// +optional
|
||||
RunningEphemeralRunners int `json:"runningEphemeralRunners"`
|
||||
// +optional
|
||||
FailedEphemeralRunners int `json:"failedEphemeralRunners"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name="DesiredReplicas",type="integer"
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.currentReplicas", name="CurrentReplicas",type="integer"
|
||||
//+kubebuilder:printcolumn:JSONPath=".status.pendingEphemeralRunners",name=Pending Runners,type=integer
|
||||
//+kubebuilder:printcolumn:JSONPath=".status.runningEphemeralRunners",name=Running Runners,type=integer
|
||||
//+kubebuilder:printcolumn:JSONPath=".status.finishedEphemeralRunners",name=Finished Runners,type=integer
|
||||
//+kubebuilder:printcolumn:JSONPath=".status.deletingEphemeralRunners",name=Deleting Runners,type=integer
|
||||
|
||||
// EphemeralRunnerSet is the Schema for the ephemeralrunnersets API
|
||||
type EphemeralRunnerSet struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec EphemeralRunnerSetSpec `json:"spec,omitempty"`
|
||||
Status EphemeralRunnerSetStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// EphemeralRunnerSetList contains a list of EphemeralRunnerSet
|
||||
type EphemeralRunnerSetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []EphemeralRunnerSet `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&EphemeralRunnerSet{}, &EphemeralRunnerSetList{})
|
||||
}
|
||||
36
apis/actions.github.com/v1alpha1/groupversion_info.go
Normal file
36
apis/actions.github.com/v1alpha1/groupversion_info.go
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package v1 contains API Schema definitions for the batch v1 API group
|
||||
// +kubebuilder:object:generate=true
|
||||
// +groupName=actions.github.com
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects
|
||||
GroupVersion = schema.GroupVersion{Group: "actions.github.com", Version: "v1alpha1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
118
apis/actions.github.com/v1alpha1/proxy_config_test.go
Normal file
118
apis/actions.github.com/v1alpha1/proxy_config_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package v1alpha1_test
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestProxyConfig_ToSecret(t *testing.T) {
|
||||
config := &v1alpha1.ProxyConfig{
|
||||
HTTP: &v1alpha1.ProxyServerConfig{
|
||||
Url: "http://proxy.example.com:8080",
|
||||
CredentialSecretRef: "my-secret",
|
||||
},
|
||||
HTTPS: &v1alpha1.ProxyServerConfig{
|
||||
Url: "https://proxy.example.com:8080",
|
||||
CredentialSecretRef: "my-secret",
|
||||
},
|
||||
NoProxy: []string{
|
||||
"noproxy.example.com",
|
||||
"noproxy2.example.com",
|
||||
},
|
||||
}
|
||||
|
||||
secretFetcher := func(string) (*corev1.Secret, error) {
|
||||
return &corev1.Secret{
|
||||
Data: map[string][]byte{
|
||||
"username": []byte("username"),
|
||||
"password": []byte("password"),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
result, err := config.ToSecretData(secretFetcher)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, result)
|
||||
|
||||
assert.Equal(t, "http://username:password@proxy.example.com:8080", string(result["http_proxy"]))
|
||||
assert.Equal(t, "https://username:password@proxy.example.com:8080", string(result["https_proxy"]))
|
||||
assert.Equal(t, "noproxy.example.com,noproxy2.example.com", string(result["no_proxy"]))
|
||||
}
|
||||
|
||||
func TestProxyConfig_ProxyFunc(t *testing.T) {
|
||||
config := &v1alpha1.ProxyConfig{
|
||||
HTTP: &v1alpha1.ProxyServerConfig{
|
||||
Url: "http://proxy.example.com:8080",
|
||||
CredentialSecretRef: "my-secret",
|
||||
},
|
||||
HTTPS: &v1alpha1.ProxyServerConfig{
|
||||
Url: "https://proxy.example.com:8080",
|
||||
CredentialSecretRef: "my-secret",
|
||||
},
|
||||
NoProxy: []string{
|
||||
"noproxy.example.com",
|
||||
"noproxy2.example.com",
|
||||
},
|
||||
}
|
||||
|
||||
secretFetcher := func(string) (*corev1.Secret, error) {
|
||||
return &corev1.Secret{
|
||||
Data: map[string][]byte{
|
||||
"username": []byte("username"),
|
||||
"password": []byte("password"),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
result, err := config.ProxyFunc(secretFetcher)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{
|
||||
name: "http target",
|
||||
in: "http://target.com",
|
||||
out: "http://username:password@proxy.example.com:8080",
|
||||
},
|
||||
{
|
||||
name: "https target",
|
||||
in: "https://target.com",
|
||||
out: "https://username:password@proxy.example.com:8080",
|
||||
},
|
||||
{
|
||||
name: "no proxy",
|
||||
in: "https://noproxy.example.com",
|
||||
out: "",
|
||||
},
|
||||
{
|
||||
name: "no proxy 2",
|
||||
in: "https://noproxy2.example.com",
|
||||
out: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", test.in, nil)
|
||||
require.NoError(t, err)
|
||||
u, err := result(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
if test.out == "" {
|
||||
assert.Nil(t, u)
|
||||
return
|
||||
}
|
||||
|
||||
assert.Equal(t, test.out, u.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
105
apis/actions.github.com/v1alpha1/tls_config_test.go
Normal file
105
apis/actions.github.com/v1alpha1/tls_config_test.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package v1alpha1_test
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/github/actions/testserver"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestGitHubServerTLSConfig_ToCertPool(t *testing.T) {
|
||||
t.Run("returns an error if CertificateFrom not specified", func(t *testing.T) {
|
||||
c := &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: nil,
|
||||
}
|
||||
|
||||
pool, err := c.ToCertPool(nil)
|
||||
assert.Nil(t, pool)
|
||||
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, err.Error(), "certificateFrom not specified")
|
||||
})
|
||||
|
||||
t.Run("returns an error if CertificateFrom.ConfigMapKeyRef not specified", func(t *testing.T) {
|
||||
c := &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{},
|
||||
}
|
||||
|
||||
pool, err := c.ToCertPool(nil)
|
||||
assert.Nil(t, pool)
|
||||
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, err.Error(), "configMapKeyRef not specified")
|
||||
})
|
||||
|
||||
t.Run("returns a valid cert pool with correct configuration", func(t *testing.T) {
|
||||
c := &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "name",
|
||||
},
|
||||
Key: "key",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
certsFolder := filepath.Join(
|
||||
"../../../",
|
||||
"github",
|
||||
"actions",
|
||||
"testdata",
|
||||
)
|
||||
|
||||
fetcher := func(name, key string) ([]byte, error) {
|
||||
cert, err := os.ReadFile(filepath.Join(certsFolder, "rootCA.crt"))
|
||||
require.NoError(t, err)
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
ok := pool.AppendCertsFromPEM(cert)
|
||||
assert.True(t, ok)
|
||||
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
pool, err := c.ToCertPool(fetcher)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, pool)
|
||||
|
||||
// can be used to communicate with a server
|
||||
serverSuccessfullyCalled := false
|
||||
server := testserver.NewUnstarted(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
serverSuccessfullyCalled = true
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(
|
||||
filepath.Join(certsFolder, "server.crt"),
|
||||
filepath.Join(certsFolder, "server.key"),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}}
|
||||
server.StartTLS()
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: pool,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err = client.Get(server.URL)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, serverSuccessfullyCalled)
|
||||
})
|
||||
}
|
||||
523
apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go
Normal file
523
apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go
Normal file
@@ -0,0 +1,523 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingListener) DeepCopyInto(out *AutoscalingListener) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingListener.
|
||||
func (in *AutoscalingListener) DeepCopy() *AutoscalingListener {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingListener)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AutoscalingListener) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingListenerList) DeepCopyInto(out *AutoscalingListenerList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]AutoscalingListener, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingListenerList.
|
||||
func (in *AutoscalingListenerList) DeepCopy() *AutoscalingListenerList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingListenerList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AutoscalingListenerList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingListenerSpec) DeepCopyInto(out *AutoscalingListenerSpec) {
|
||||
*out = *in
|
||||
if in.ImagePullSecrets != nil {
|
||||
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
|
||||
*out = make([]v1.LocalObjectReference, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Proxy != nil {
|
||||
in, out := &in.Proxy, &out.Proxy
|
||||
*out = new(ProxyConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.GitHubServerTLS != nil {
|
||||
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
|
||||
*out = new(GitHubServerTLSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingListenerSpec.
|
||||
func (in *AutoscalingListenerSpec) DeepCopy() *AutoscalingListenerSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingListenerSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingListenerStatus) DeepCopyInto(out *AutoscalingListenerStatus) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingListenerStatus.
|
||||
func (in *AutoscalingListenerStatus) DeepCopy() *AutoscalingListenerStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingListenerStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingRunnerSet) DeepCopyInto(out *AutoscalingRunnerSet) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingRunnerSet.
|
||||
func (in *AutoscalingRunnerSet) DeepCopy() *AutoscalingRunnerSet {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingRunnerSet)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AutoscalingRunnerSet) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingRunnerSetList) DeepCopyInto(out *AutoscalingRunnerSetList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]AutoscalingRunnerSet, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingRunnerSetList.
|
||||
func (in *AutoscalingRunnerSetList) DeepCopy() *AutoscalingRunnerSetList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingRunnerSetList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AutoscalingRunnerSetList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingRunnerSetSpec) DeepCopyInto(out *AutoscalingRunnerSetSpec) {
|
||||
*out = *in
|
||||
if in.Proxy != nil {
|
||||
in, out := &in.Proxy, &out.Proxy
|
||||
*out = new(ProxyConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.GitHubServerTLS != nil {
|
||||
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
|
||||
*out = new(GitHubServerTLSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.Template.DeepCopyInto(&out.Template)
|
||||
if in.MaxRunners != nil {
|
||||
in, out := &in.MaxRunners, &out.MaxRunners
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.MinRunners != nil {
|
||||
in, out := &in.MinRunners, &out.MinRunners
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingRunnerSetSpec.
|
||||
func (in *AutoscalingRunnerSetSpec) DeepCopy() *AutoscalingRunnerSetSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingRunnerSetSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingRunnerSetStatus) DeepCopyInto(out *AutoscalingRunnerSetStatus) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingRunnerSetStatus.
|
||||
func (in *AutoscalingRunnerSetStatus) DeepCopy() *AutoscalingRunnerSetStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingRunnerSetStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunner) DeepCopyInto(out *EphemeralRunner) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunner.
|
||||
func (in *EphemeralRunner) DeepCopy() *EphemeralRunner {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunner)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *EphemeralRunner) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerList) DeepCopyInto(out *EphemeralRunnerList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]EphemeralRunner, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerList.
|
||||
func (in *EphemeralRunnerList) DeepCopy() *EphemeralRunnerList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunnerList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *EphemeralRunnerList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerSet) DeepCopyInto(out *EphemeralRunnerSet) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerSet.
|
||||
func (in *EphemeralRunnerSet) DeepCopy() *EphemeralRunnerSet {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunnerSet)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *EphemeralRunnerSet) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerSetList) DeepCopyInto(out *EphemeralRunnerSetList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]EphemeralRunnerSet, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerSetList.
|
||||
func (in *EphemeralRunnerSetList) DeepCopy() *EphemeralRunnerSetList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunnerSetList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *EphemeralRunnerSetList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerSetSpec) DeepCopyInto(out *EphemeralRunnerSetSpec) {
|
||||
*out = *in
|
||||
in.EphemeralRunnerSpec.DeepCopyInto(&out.EphemeralRunnerSpec)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerSetSpec.
|
||||
func (in *EphemeralRunnerSetSpec) DeepCopy() *EphemeralRunnerSetSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunnerSetSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerSetStatus) DeepCopyInto(out *EphemeralRunnerSetStatus) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerSetStatus.
|
||||
func (in *EphemeralRunnerSetStatus) DeepCopy() *EphemeralRunnerSetStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunnerSetStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerSpec) DeepCopyInto(out *EphemeralRunnerSpec) {
|
||||
*out = *in
|
||||
if in.Proxy != nil {
|
||||
in, out := &in.Proxy, &out.Proxy
|
||||
*out = new(ProxyConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.GitHubServerTLS != nil {
|
||||
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
|
||||
*out = new(GitHubServerTLSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.PodTemplateSpec.DeepCopyInto(&out.PodTemplateSpec)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerSpec.
|
||||
func (in *EphemeralRunnerSpec) DeepCopy() *EphemeralRunnerSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunnerSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerStatus) DeepCopyInto(out *EphemeralRunnerStatus) {
|
||||
*out = *in
|
||||
if in.Failures != nil {
|
||||
in, out := &in.Failures, &out.Failures
|
||||
*out = make(map[string]bool, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerStatus.
|
||||
func (in *EphemeralRunnerStatus) DeepCopy() *EphemeralRunnerStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunnerStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitHubServerTLSConfig) DeepCopyInto(out *GitHubServerTLSConfig) {
|
||||
*out = *in
|
||||
if in.CertificateFrom != nil {
|
||||
in, out := &in.CertificateFrom, &out.CertificateFrom
|
||||
*out = new(TLSCertificateSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubServerTLSConfig.
|
||||
func (in *GitHubServerTLSConfig) DeepCopy() *GitHubServerTLSConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GitHubServerTLSConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) {
|
||||
*out = *in
|
||||
if in.HTTP != nil {
|
||||
in, out := &in.HTTP, &out.HTTP
|
||||
*out = new(ProxyServerConfig)
|
||||
**out = **in
|
||||
}
|
||||
if in.HTTPS != nil {
|
||||
in, out := &in.HTTPS, &out.HTTPS
|
||||
*out = new(ProxyServerConfig)
|
||||
**out = **in
|
||||
}
|
||||
if in.NoProxy != nil {
|
||||
in, out := &in.NoProxy, &out.NoProxy
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig.
|
||||
func (in *ProxyConfig) DeepCopy() *ProxyConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ProxyConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ProxyServerConfig) DeepCopyInto(out *ProxyServerConfig) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyServerConfig.
|
||||
func (in *ProxyServerConfig) DeepCopy() *ProxyServerConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ProxyServerConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TLSCertificateSource) DeepCopyInto(out *TLSCertificateSource) {
|
||||
*out = *in
|
||||
if in.ConfigMapKeyRef != nil {
|
||||
in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef
|
||||
*out = new(v1.ConfigMapKeySelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateSource.
|
||||
func (in *TLSCertificateSource) DeepCopy() *TLSCertificateSource {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TLSCertificateSource)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
// HorizontalRunnerAutoscalerSpec defines the desired state of HorizontalRunnerAutoscaler
|
||||
type HorizontalRunnerAutoscalerSpec struct {
|
||||
// ScaleTargetRef sis the reference to scaled resource like RunnerDeployment
|
||||
// ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
||||
ScaleTargetRef ScaleTargetRef `json:"scaleTargetRef,omitempty"`
|
||||
|
||||
// MinReplicas is the minimum number of replicas the deployment is allowed to scale
|
||||
|
||||
@@ -70,6 +70,8 @@ type RunnerConfig struct {
|
||||
// +optional
|
||||
DockerRegistryMirror *string `json:"dockerRegistryMirror,omitempty"`
|
||||
// +optional
|
||||
DockerVarRunVolumeSizeLimit *resource.Quantity `json:"dockerVarRunVolumeSizeLimit,omitempty"`
|
||||
// +optional
|
||||
VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"`
|
||||
// +optional
|
||||
VolumeStorageMedium *string `json:"volumeStorageMedium,omitempty"`
|
||||
@@ -248,10 +250,60 @@ type RunnerStatus struct {
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
// +optional
|
||||
WorkflowStatus *WorkflowStatus `json:"workflow"`
|
||||
// +optional
|
||||
// +nullable
|
||||
LastRegistrationCheckTime *metav1.Time `json:"lastRegistrationCheckTime,omitempty"`
|
||||
}
|
||||
|
||||
// WorkflowStatus contains various information that is propagated
|
||||
// from GitHub Actions workflow run environment variables to
|
||||
// ease monitoring workflow run/job/steps that are triggerred on the runner.
|
||||
type WorkflowStatus struct {
|
||||
// +optional
|
||||
// Name is the name of the workflow
|
||||
// that is triggerred within the runner.
|
||||
// It corresponds to GITHUB_WORKFLOW defined in
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
Name string `json:"name,omitempty"`
|
||||
// +optional
|
||||
// Repository is the owner and repository name of the workflow
|
||||
// that is triggerred within the runner.
|
||||
// It corresponds to GITHUB_REPOSITORY defined in
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
Repository string `json:"repository,omitempty"`
|
||||
// +optional
|
||||
// ReositoryOwner is the repository owner's name for the workflow
|
||||
// that is triggerred within the runner.
|
||||
// It corresponds to GITHUB_REPOSITORY_OWNER defined in
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
RepositoryOwner string `json:"repositoryOwner,omitempty"`
|
||||
// +optional
|
||||
// GITHUB_RUN_NUMBER is the unique number for the current workflow run
|
||||
// that is triggerred within the runner.
|
||||
// It corresponds to GITHUB_RUN_ID defined in
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
RunNumber string `json:"runNumber,omitempty"`
|
||||
// +optional
|
||||
// RunID is the unique number for the current workflow run
|
||||
// that is triggerred within the runner.
|
||||
// It corresponds to GITHUB_RUN_ID defined in
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
RunID string `json:"runID,omitempty"`
|
||||
// +optional
|
||||
// Job is the name of the current job
|
||||
// that is triggerred within the runner.
|
||||
// It corresponds to GITHUB_JOB defined in
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
Job string `json:"job,omitempty"`
|
||||
// +optional
|
||||
// Action is the name of the current action or the step ID of the current step
|
||||
// that is triggerred within the runner.
|
||||
// It corresponds to GITHUB_ACTION defined in
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
Action string `json:"action,omitempty"`
|
||||
}
|
||||
|
||||
// RunnerStatusRegistration contains runner registration status
|
||||
type RunnerStatusRegistration struct {
|
||||
Enterprise string `json:"enterprise,omitempty"`
|
||||
@@ -316,6 +368,8 @@ func (w *WorkVolumeClaimTemplate) V1VolumeMount(mountPath string) corev1.VolumeM
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.message",name=Message,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.workflow.repository",name=WF Repo,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.workflow.runID",name=WF Run,type=string
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||
|
||||
// Runner is the Schema for the runners API
|
||||
|
||||
@@ -77,6 +77,11 @@ type RunnerDeploymentStatus struct {
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:shortName=rdeploy
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.enterprise",name=Enterprise,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.organization",name=Organization,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.repository",name=Repository,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.group",name=Group,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.labels",name=Labels,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.updatedReplicas",name=Up-To-Date,type=number
|
||||
|
||||
@@ -436,6 +436,11 @@ func (in *RunnerConfig) DeepCopyInto(out *RunnerConfig) {
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.DockerVarRunVolumeSizeLimit != nil {
|
||||
in, out := &in.DockerVarRunVolumeSizeLimit, &out.DockerVarRunVolumeSizeLimit
|
||||
x := (*in).DeepCopy()
|
||||
*out = &x
|
||||
}
|
||||
if in.VolumeSizeLimit != nil {
|
||||
in, out := &in.VolumeSizeLimit, &out.VolumeSizeLimit
|
||||
x := (*in).DeepCopy()
|
||||
@@ -1049,6 +1054,11 @@ func (in *RunnerSpec) DeepCopy() *RunnerSpec {
|
||||
func (in *RunnerStatus) DeepCopyInto(out *RunnerStatus) {
|
||||
*out = *in
|
||||
in.Registration.DeepCopyInto(&out.Registration)
|
||||
if in.WorkflowStatus != nil {
|
||||
in, out := &in.WorkflowStatus, &out.WorkflowStatus
|
||||
*out = new(WorkflowStatus)
|
||||
**out = **in
|
||||
}
|
||||
if in.LastRegistrationCheckTime != nil {
|
||||
in, out := &in.LastRegistrationCheckTime, &out.LastRegistrationCheckTime
|
||||
*out = (*in).DeepCopy()
|
||||
@@ -1212,3 +1222,18 @@ func (in *WorkflowJobSpec) DeepCopy() *WorkflowJobSpec {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkflowStatus) DeepCopyInto(out *WorkflowStatus) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowStatus.
|
||||
func (in *WorkflowStatus) DeepCopy() *WorkflowStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkflowStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
9
charts/.ci/ct-config-gha.yaml
Normal file
9
charts/.ci/ct-config-gha.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
# This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow
|
||||
lint-conf: charts/.ci/lint-config.yaml
|
||||
chart-repos:
|
||||
- jetstack=https://charts.jetstack.io
|
||||
check-version-increment: false # Disable checking that the chart version has been bumped
|
||||
charts:
|
||||
- charts/gha-runner-scale-set-controller
|
||||
- charts/gha-runner-scale-set
|
||||
skip-clean-up: true
|
||||
@@ -3,3 +3,5 @@ lint-conf: charts/.ci/lint-config.yaml
|
||||
chart-repos:
|
||||
- jetstack=https://charts.jetstack.io
|
||||
check-version-increment: false # Disable checking that the chart version has been bumped
|
||||
charts:
|
||||
- charts/actions-runner-controller
|
||||
|
||||
@@ -15,10 +15,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.21.1
|
||||
version: 0.23.3
|
||||
|
||||
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||
appVersion: 0.26.0
|
||||
appVersion: 0.27.4
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
@@ -35,18 +35,21 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `authSecret.github_basicauth_password` | Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
||||
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. | |
|
||||
| `hostNetwork` | The "hostNetwork" of the controller container | false |
|
||||
| `dnsPolicy` | The "dnsPolicy" of the controller container | ClusterFirst |
|
||||
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
|
||||
| `image.tag` | The tag of the controller container | |
|
||||
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
|
||||
| `image.actionsRunnerImagePullSecrets` | Optional image pull secrets to be included in the runner pod's ImagePullSecrets | |
|
||||
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
|
||||
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
|
||||
| `metrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||
| `metrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||
| `metrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
|
||||
| `metrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
|
||||
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
|
||||
| `metrics.port` | Set port of metrics service | 8443 |
|
||||
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
||||
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
|
||||
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
|
||||
| `fullnameOverride` | Override the full resource names | |
|
||||
@@ -102,8 +105,11 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
|
||||
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
|
||||
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
|
||||
| `githubWebhookServer.terminationGracePeriodSeconds` | Set the githubWebhookServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
|
||||
| `githubWebhookServer.lifecycle` | Set the githubWebhookServer pod lifecycle hooks | `{}` |
|
||||
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
|
||||
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||
| `githubWebhookServer.service.loadBalancerSourceRanges` | Set githubWebhookServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
|
||||
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
|
||||
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||
@@ -115,9 +121,9 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `actionsMetricsServer.logLevel` | Set the log level of the actionsMetricsServer container | |
|
||||
| `actionsMetricsServer.logFormat` | Set the log format of the actionsMetricsServer controller. Valid options are "text" and "json" | text |
|
||||
| `actionsMetricsServer.enabled` | Deploy the actions metrics server pod | false |
|
||||
| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
|
||||
| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the actions-metrics-server | false |
|
||||
| `actionsMetricsServer.secret.create` | Deploy the webhook hook secret | false |
|
||||
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
|
||||
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | actions-metrics-server |
|
||||
| `actionsMetricsServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
||||
| `actionsMetricsServer.imagePullSecrets` | Specifies the secret to be used when pulling the actionsMetricsServer pod containers | |
|
||||
| `actionsMetricsServer.nameOverride` | Override the resource name prefix | |
|
||||
@@ -135,17 +141,22 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `actionsMetricsServer.tolerations` | Set the actionsMetricsServer pod tolerations | |
|
||||
| `actionsMetricsServer.affinity` | Set the actionsMetricsServer pod affinity rules | |
|
||||
| `actionsMetricsServer.priorityClassName` | Set the actionsMetricsServer pod priorityClassName | |
|
||||
| `actionsMetricsServer.terminationGracePeriodSeconds` | Set the actionsMetricsServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
|
||||
| `actionsMetricsServer.lifecycle` | Set the actionsMetricsServer pod lifecycle hooks | `{}` |
|
||||
| `actionsMetricsServer.service.type` | Set actionsMetricsServer service type | |
|
||||
| `actionsMetricsServer.service.ports` | Set actionsMetricsServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||
| `actionsMetricsServer.service.loadBalancerSourceRanges` | Set actionsMetricsServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
|
||||
| `actionsMetricsServer.ingress.enabled` | Deploy an ingress kind for the actionsMetricsServer | false |
|
||||
| `actionsMetricsServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||
| `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||
| `actionsMetricsServer.ingress.tls` | Set tls configuration for ingress | |
|
||||
| `actionsMetricsServer.ingress.ingressClassName` | Set ingress class name | |
|
||||
| `actionsMetrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||
| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | |
|
||||
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
|
||||
| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||
| `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||
| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
||||
| `actionsMetrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||
| `actionsMetrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||
| `actionsMetrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
|
||||
| `actionsMetrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
|
||||
| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | |
|
||||
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
|
||||
| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||
| `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||
| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
|
||||
| `actionsMetrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||
|
||||
@@ -113,7 +113,7 @@ spec:
|
||||
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop)
|
||||
type: integer
|
||||
scaleTargetRef:
|
||||
description: ScaleTargetRef sis the reference to scaled resource like RunnerDeployment
|
||||
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
||||
properties:
|
||||
kind:
|
||||
description: Kind is the type of resource being referenced
|
||||
|
||||
@@ -17,6 +17,21 @@ spec:
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.template.spec.enterprise
|
||||
name: Enterprise
|
||||
type: string
|
||||
- jsonPath: .spec.template.spec.organization
|
||||
name: Organization
|
||||
type: string
|
||||
- jsonPath: .spec.template.spec.repository
|
||||
name: Repository
|
||||
type: string
|
||||
- jsonPath: .spec.template.spec.group
|
||||
name: Group
|
||||
type: string
|
||||
- jsonPath: .spec.template.spec.labels
|
||||
name: Labels
|
||||
type: string
|
||||
- jsonPath: .spec.replicas
|
||||
name: Desired
|
||||
type: number
|
||||
@@ -1081,6 +1096,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -1470,6 +1497,12 @@ spec:
|
||||
type: integer
|
||||
dockerRegistryMirror:
|
||||
type: string
|
||||
dockerVarRunVolumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
dockerVolumeMounts:
|
||||
items:
|
||||
description: VolumeMount describes a mounting of a Volume within a container.
|
||||
@@ -1500,6 +1533,18 @@ spec:
|
||||
dockerdContainerResources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -2141,6 +2186,18 @@ spec:
|
||||
resources:
|
||||
description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -2963,6 +3020,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -3256,6 +3325,18 @@ spec:
|
||||
resources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -3328,7 +3409,7 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
supplementalGroups:
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.
|
||||
items:
|
||||
format: int64
|
||||
type: integer
|
||||
@@ -3873,6 +3954,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -4221,10 +4314,10 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
nodeAffinityPolicy:
|
||||
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
topologyKey:
|
||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||
@@ -4554,7 +4647,7 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
dataSource:
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.'
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4570,7 +4663,7 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
dataSourceRef:
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.'
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4581,6 +4674,9 @@ spec:
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
@@ -4588,6 +4684,18 @@ spec:
|
||||
resources:
|
||||
description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -5216,6 +5324,18 @@ spec:
|
||||
resources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
|
||||
@@ -1078,6 +1078,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -1467,6 +1479,12 @@ spec:
|
||||
type: integer
|
||||
dockerRegistryMirror:
|
||||
type: string
|
||||
dockerVarRunVolumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
dockerVolumeMounts:
|
||||
items:
|
||||
description: VolumeMount describes a mounting of a Volume within a container.
|
||||
@@ -1497,6 +1515,18 @@ spec:
|
||||
dockerdContainerResources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -2138,6 +2168,18 @@ spec:
|
||||
resources:
|
||||
description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -2960,6 +3002,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -3253,6 +3307,18 @@ spec:
|
||||
resources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -3325,7 +3391,7 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
supplementalGroups:
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.
|
||||
items:
|
||||
format: int64
|
||||
type: integer
|
||||
@@ -3870,6 +3936,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -4218,10 +4296,10 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
nodeAffinityPolicy:
|
||||
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
topologyKey:
|
||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||
@@ -4551,7 +4629,7 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
dataSource:
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.'
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4567,7 +4645,7 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
dataSourceRef:
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.'
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4578,6 +4656,9 @@ spec:
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
@@ -4585,6 +4666,18 @@ spec:
|
||||
resources:
|
||||
description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -5213,6 +5306,18 @@ spec:
|
||||
resources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
|
||||
@@ -36,6 +36,12 @@ spec:
|
||||
- jsonPath: .status.message
|
||||
name: Message
|
||||
type: string
|
||||
- jsonPath: .status.workflow.repository
|
||||
name: WF Repo
|
||||
type: string
|
||||
- jsonPath: .status.workflow.runID
|
||||
name: WF Run
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
@@ -1025,6 +1031,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -1414,6 +1432,12 @@ spec:
|
||||
type: integer
|
||||
dockerRegistryMirror:
|
||||
type: string
|
||||
dockerVarRunVolumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
dockerVolumeMounts:
|
||||
items:
|
||||
description: VolumeMount describes a mounting of a Volume within a container.
|
||||
@@ -1444,6 +1468,18 @@ spec:
|
||||
dockerdContainerResources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -2085,6 +2121,18 @@ spec:
|
||||
resources:
|
||||
description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -2907,6 +2955,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -3200,6 +3260,18 @@ spec:
|
||||
resources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -3272,7 +3344,7 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
supplementalGroups:
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.
|
||||
items:
|
||||
format: int64
|
||||
type: integer
|
||||
@@ -3817,6 +3889,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -4165,10 +4249,10 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
nodeAffinityPolicy:
|
||||
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
topologyKey:
|
||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||
@@ -4498,7 +4582,7 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
dataSource:
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.'
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4514,7 +4598,7 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
dataSourceRef:
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.'
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4525,6 +4609,9 @@ spec:
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
@@ -4532,6 +4619,18 @@ spec:
|
||||
resources:
|
||||
description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -5160,6 +5259,18 @@ spec:
|
||||
resources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -5225,6 +5336,31 @@ spec:
|
||||
- expiresAt
|
||||
- token
|
||||
type: object
|
||||
workflow:
|
||||
description: WorkflowStatus contains various information that is propagated from GitHub Actions workflow run environment variables to ease monitoring workflow run/job/steps that are triggerred on the runner.
|
||||
properties:
|
||||
action:
|
||||
description: Action is the name of the current action or the step ID of the current step that is triggerred within the runner. It corresponds to GITHUB_ACTION defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
type: string
|
||||
job:
|
||||
description: Job is the name of the current job that is triggerred within the runner. It corresponds to GITHUB_JOB defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of the workflow that is triggerred within the runner. It corresponds to GITHUB_WORKFLOW defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
type: string
|
||||
repository:
|
||||
description: Repository is the owner and repository name of the workflow that is triggerred within the runner. It corresponds to GITHUB_REPOSITORY defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
type: string
|
||||
repositoryOwner:
|
||||
description: ReositoryOwner is the repository owner's name for the workflow that is triggerred within the runner. It corresponds to GITHUB_REPOSITORY_OWNER defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
type: string
|
||||
runID:
|
||||
description: RunID is the unique number for the current workflow run that is triggerred within the runner. It corresponds to GITHUB_RUN_ID defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
type: string
|
||||
runNumber:
|
||||
description: GITHUB_RUN_NUMBER is the unique number for the current workflow run that is triggerred within the runner. It corresponds to GITHUB_RUN_ID defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
|
||||
@@ -55,6 +55,12 @@ spec:
|
||||
type: integer
|
||||
dockerRegistryMirror:
|
||||
type: string
|
||||
dockerVarRunVolumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
dockerdWithinRunnerContainer:
|
||||
type: boolean
|
||||
effectiveTime:
|
||||
@@ -89,6 +95,14 @@ spec:
|
||||
description: Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
|
||||
format: int32
|
||||
type: integer
|
||||
ordinals:
|
||||
description: ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a "0" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is alpha.
|
||||
properties:
|
||||
start:
|
||||
description: 'start is the number representing the first replica''s index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range: [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). If unset, defaults to 0. Replica indices will be in the range: [0, .spec.replicas).'
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
organization:
|
||||
pattern: ^[^/]+$
|
||||
type: string
|
||||
@@ -152,7 +166,7 @@ spec:
|
||||
description: 'serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where "pod-specific-string" is managed by the StatefulSet controller.'
|
||||
type: string
|
||||
template:
|
||||
description: template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.
|
||||
description: template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format <statefulsetname>-<podindex>. For example, a pod in a StatefulSet named "web" with index number "3" would be named "web-3".
|
||||
properties:
|
||||
metadata:
|
||||
description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata'
|
||||
@@ -1151,6 +1165,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -1963,6 +1989,18 @@ spec:
|
||||
resources:
|
||||
description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -2786,6 +2824,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -3109,6 +3159,31 @@ spec:
|
||||
- conditionType
|
||||
type: object
|
||||
type: array
|
||||
resourceClaims:
|
||||
description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.
|
||||
properties:
|
||||
name:
|
||||
description: Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.
|
||||
type: string
|
||||
source:
|
||||
description: Source describes where to find the ResourceClaim.
|
||||
properties:
|
||||
resourceClaimName:
|
||||
description: ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.
|
||||
type: string
|
||||
resourceClaimTemplateName:
|
||||
description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be <pod name>-<resource name>, where <resource name> is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long). \n An existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed. \n This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim."
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-map-keys:
|
||||
- name
|
||||
x-kubernetes-list-type: map
|
||||
restartPolicy:
|
||||
description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy'
|
||||
type: string
|
||||
@@ -3118,6 +3193,21 @@ spec:
|
||||
schedulerName:
|
||||
description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.
|
||||
type: string
|
||||
schedulingGates:
|
||||
description: "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. \n This is an alpha-level feature enabled by PodSchedulingReadiness feature gate."
|
||||
items:
|
||||
description: PodSchedulingGate is associated to a Pod to guard its scheduling.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the scheduling gate. Each scheduling gate must have a unique name field.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-map-keys:
|
||||
- name
|
||||
x-kubernetes-list-type: map
|
||||
securityContext:
|
||||
description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.'
|
||||
properties:
|
||||
@@ -3168,7 +3258,7 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
supplementalGroups:
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.
|
||||
items:
|
||||
format: int64
|
||||
type: integer
|
||||
@@ -3298,10 +3388,10 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
nodeAffinityPolicy:
|
||||
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
topologyKey:
|
||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||
@@ -3601,7 +3691,7 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
dataSource:
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.'
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -3617,7 +3707,7 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
dataSourceRef:
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.'
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -3628,6 +3718,9 @@ spec:
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
@@ -3635,6 +3728,18 @@ spec:
|
||||
resources:
|
||||
description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -4317,7 +4422,7 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
dataSource:
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.'
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4333,7 +4438,7 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
dataSourceRef:
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.'
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4344,6 +4449,9 @@ spec:
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
@@ -4351,6 +4459,18 @@ spec:
|
||||
resources:
|
||||
description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -4493,6 +4613,18 @@ spec:
|
||||
resources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
|
||||
@@ -29,6 +29,8 @@ curl -L https://github.com/actions/actions-runner-controller/releases/download/a
|
||||
kubectl replace -f crds/
|
||||
```
|
||||
|
||||
Note that in case you're going to create prometheus-operator `ServiceMonitor` resources via the chart, you'd need to deploy prometheus-operator-related CRDs as well.
|
||||
|
||||
2. Upgrade the Helm release
|
||||
|
||||
```shell
|
||||
|
||||
@@ -15,7 +15,7 @@ spec:
|
||||
metadata:
|
||||
{{- with .Values.actionsMetricsServer.podAnnotations }}
|
||||
annotations:
|
||||
kubectl.kubernetes.io/default-logs-container: "github-webhook-server"
|
||||
kubectl.kubernetes.io/default-container: "actions-metrics-server"
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
@@ -36,8 +36,8 @@ spec:
|
||||
{{- end }}
|
||||
containers:
|
||||
- args:
|
||||
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
||||
{{- $metricsHost := .Values.actionsMetrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||
{{- $metricsPort := .Values.actionsMetrics.proxy.enabled | ternary "8080" .Values.actionsMetrics.port }}
|
||||
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
||||
{{- if .Values.actionsMetricsServer.logLevel }}
|
||||
- "--log-level={{ .Values.actionsMetricsServer.logLevel }}"
|
||||
@@ -45,11 +45,17 @@ spec:
|
||||
{{- if .Values.runnerGithubURL }}
|
||||
- "--runner-github-url={{ .Values.runnerGithubURL }}"
|
||||
{{- end }}
|
||||
{{- if .Values.actionsMetricsServer.logFormat }}
|
||||
{{- if .Values.actionsMetricsServer.logFormat }}
|
||||
- "--log-format={{ .Values.actionsMetricsServer.logFormat }}"
|
||||
{{- end }}
|
||||
command:
|
||||
- "/actions-metrics-server"
|
||||
{{- if .Values.actionsMetricsServer.lifecycle }}
|
||||
{{- with .Values.actionsMetricsServer.lifecycle }}
|
||||
lifecycle:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: GITHUB_WEBHOOK_SECRET_TOKEN
|
||||
valueFrom:
|
||||
@@ -74,25 +80,25 @@ spec:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: github_token
|
||||
name: {{ include "actions-runner-controller.githubWebhookServerSecretName" . }}
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.secretName" . }}
|
||||
optional: true
|
||||
- name: GITHUB_APP_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: github_app_id
|
||||
name: {{ include "actions-runner-controller.githubWebhookServerSecretName" . }}
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.secretName" . }}
|
||||
optional: true
|
||||
- name: GITHUB_APP_INSTALLATION_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: github_app_installation_id
|
||||
name: {{ include "actions-runner-controller.githubWebhookServerSecretName" . }}
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.secretName" . }}
|
||||
optional: true
|
||||
- name: GITHUB_APP_PRIVATE_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: github_app_private_key
|
||||
name: {{ include "actions-runner-controller.githubWebhookServerSecretName" . }}
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.secretName" . }}
|
||||
optional: true
|
||||
{{- if .Values.authSecret.github_basicauth_username }}
|
||||
- name: GITHUB_BASICAUTH_USERNAME
|
||||
@@ -116,8 +122,8 @@ spec:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
protocol: TCP
|
||||
{{- if not .Values.metrics.proxy.enabled }}
|
||||
- containerPort: {{ .Values.metrics.port }}
|
||||
{{- if not .Values.actionsMetrics.proxy.enabled }}
|
||||
- containerPort: {{ .Values.actionsMetrics.port }}
|
||||
name: metrics-port
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
@@ -125,24 +131,24 @@ spec:
|
||||
{{- toYaml .Values.actionsMetricsServer.resources | nindent 12 }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.actionsMetricsServer.securityContext | nindent 12 }}
|
||||
{{- if .Values.metrics.proxy.enabled }}
|
||||
{{- if .Values.actionsMetrics.proxy.enabled }}
|
||||
- args:
|
||||
- "--secure-listen-address=0.0.0.0:{{ .Values.metrics.port }}"
|
||||
- "--secure-listen-address=0.0.0.0:{{ .Values.actionsMetrics.port }}"
|
||||
- "--upstream=http://127.0.0.1:8080/"
|
||||
- "--logtostderr=true"
|
||||
- "--v=10"
|
||||
image: "{{ .Values.metrics.proxy.image.repository }}:{{ .Values.metrics.proxy.image.tag }}"
|
||||
image: "{{ .Values.actionsMetrics.proxy.image.repository }}:{{ .Values.actionsMetrics.proxy.image.tag }}"
|
||||
name: kube-rbac-proxy
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.metrics.port }}
|
||||
- containerPort: {{ .Values.actionsMetrics.port }}
|
||||
name: metrics-port
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 10
|
||||
terminationGracePeriodSeconds: {{ .Values.actionsMetricsServer.terminationGracePeriodSeconds }}
|
||||
{{- with .Values.actionsMetricsServer.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
||||
@@ -0,0 +1,90 @@
|
||||
{{- if .Values.actionsMetricsServer.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- horizontalrunnerautoscalers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- horizontalrunnerautoscalers/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- horizontalrunnerautoscalers/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnersets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnerdeployments
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnerdeployments/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnerdeployments/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
{{- end }}
|
||||
@@ -0,0 +1,14 @@
|
||||
{{- if .Values.actionsMetricsServer.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,28 @@
|
||||
{{- if .Values.actionsMetricsServer.enabled }}
|
||||
{{- if .Values.actionsMetricsServer.secret.create }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.secretName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- if .Values.actionsMetricsServer.secret.github_webhook_secret_token }}
|
||||
github_webhook_secret_token: {{ .Values.actionsMetricsServer.secret.github_webhook_secret_token | toString | b64enc }}
|
||||
{{- end }}
|
||||
{{- if .Values.actionsMetricsServer.secret.github_app_id }}
|
||||
github_app_id: {{ .Values.actionsMetricsServer.secret.github_app_id | toString | b64enc }}
|
||||
{{- end }}
|
||||
{{- if .Values.actionsMetricsServer.secret.github_app_installation_id }}
|
||||
github_app_installation_id: {{ .Values.actionsMetricsServer.secret.github_app_installation_id | toString | b64enc }}
|
||||
{{- end }}
|
||||
{{- if .Values.actionsMetricsServer.secret.github_app_private_key }}
|
||||
github_app_private_key: {{ .Values.actionsMetricsServer.secret.github_app_private_key | toString | b64enc }}
|
||||
{{- end }}
|
||||
{{- if .Values.actionsMetricsServer.secret.github_token }}
|
||||
github_token: {{ .Values.actionsMetricsServer.secret.github_token | toString | b64enc }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }}
|
||||
{{- if .Values.actionsMetricsServer.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.actionsMetricsServer.service.annotations | nindent 4 }}
|
||||
@@ -16,11 +16,17 @@ spec:
|
||||
{{ range $_, $port := .Values.actionsMetricsServer.service.ports -}}
|
||||
- {{ $port | toYaml | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.metrics.serviceMonitor }}
|
||||
{{- if .Values.actionsMetrics.serviceMonitor.enable }}
|
||||
- name: metrics-port
|
||||
port: {{ .Values.metrics.port }}
|
||||
port: {{ .Values.actionsMetrics.port }}
|
||||
targetPort: metrics-port
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }}
|
||||
{{- if .Values.actionsMetricsServer.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{- range $ip := .Values.actionsMetricsServer.service.loadBalancerSourceRanges }}
|
||||
- {{ $ip -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
{{- if and .Values.actionsMetricsServer.enabled .Values.actionsMetrics.serviceMonitor }}
|
||||
{{- if and .Values.actionsMetricsServer.enabled .Values.actionsMetrics.serviceMonitor.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
{{- with .Values.actionsMetricsServer.serviceMonitorLabels }}
|
||||
{{- with .Values.actionsMetrics.serviceMonitorLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.serviceMonitorName" . }}
|
||||
@@ -19,6 +19,8 @@ spec:
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
{{- end }}
|
||||
interval: {{ .Values.actionsMetrics.serviceMonitor.interval }}
|
||||
scrapeTimeout: {{ .Values.actionsMetrics.serviceMonitor.timeout }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 6 }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if .Values.metrics.serviceMonitor }}
|
||||
{{- if .Values.metrics.serviceMonitor.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
@@ -19,6 +19,8 @@ spec:
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
{{- end }}
|
||||
interval: {{ .Values.metrics.serviceMonitor.interval }}
|
||||
scrapeTimeout: {{ .Values.metrics.serviceMonitor.timeout }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "actions-runner-controller.selectorLabels" . | nindent 6 }}
|
||||
|
||||
@@ -70,6 +70,9 @@ spec:
|
||||
{{- if .Values.logFormat }}
|
||||
- "--log-format={{ .Values.logFormat }}"
|
||||
{{- end }}
|
||||
{{- if .Values.dockerGID }}
|
||||
- "--docker-gid={{ .Values.dockerGID }}"
|
||||
{{- end }}
|
||||
command:
|
||||
- "/manager"
|
||||
env:
|
||||
@@ -211,3 +214,6 @@ spec:
|
||||
{{- if .Values.hostNetwork }}
|
||||
hostNetwork: {{ .Values.hostNetwork }}
|
||||
{{- end }}
|
||||
{{- if .Values.dnsPolicy }}
|
||||
dnsPolicy: {{ .Values.dnsPolicy }}
|
||||
{{- end }}
|
||||
|
||||
@@ -51,11 +51,17 @@ spec:
|
||||
{{- if .Values.githubWebhookServer.queueLimit }}
|
||||
- "--queue-limit={{ .Values.githubWebhookServer.queueLimit }}"
|
||||
{{- end }}
|
||||
{{- if .Values.githubWebhookServer.logFormat }}
|
||||
{{- if .Values.githubWebhookServer.logFormat }}
|
||||
- "--log-format={{ .Values.githubWebhookServer.logFormat }}"
|
||||
{{- end }}
|
||||
command:
|
||||
- "/github-webhook-server"
|
||||
{{- if .Values.githubWebhookServer.lifecycle }}
|
||||
{{- with .Values.githubWebhookServer.lifecycle }}
|
||||
lifecycle:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: GITHUB_WEBHOOK_SECRET_TOKEN
|
||||
valueFrom:
|
||||
@@ -111,10 +117,14 @@ spec:
|
||||
name: {{ include "actions-runner-controller.secretName" . }}
|
||||
optional: true
|
||||
{{- end }}
|
||||
{{- if kindIs "slice" .Values.githubWebhookServer.env }}
|
||||
{{- toYaml .Values.githubWebhookServer.env | nindent 8 }}
|
||||
{{- else }}
|
||||
{{- range $key, $val := .Values.githubWebhookServer.env }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||
name: github-webhook-server
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
@@ -148,7 +158,7 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 10
|
||||
terminationGracePeriodSeconds: {{ .Values.githubWebhookServer.terminationGracePeriodSeconds }}
|
||||
{{- with .Values.githubWebhookServer.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ include "actions-runner-controller-github-webhook-server.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 4 }}
|
||||
{{- if .Values.githubWebhookServer.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.githubWebhookServer.service.annotations | nindent 4 }}
|
||||
@@ -16,11 +16,17 @@ spec:
|
||||
{{ range $_, $port := .Values.githubWebhookServer.service.ports -}}
|
||||
- {{ $port | toYaml | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.metrics.serviceMonitor }}
|
||||
{{- if .Values.metrics.serviceMonitor.enable }}
|
||||
- name: metrics-port
|
||||
port: {{ .Values.metrics.port }}
|
||||
targetPort: metrics-port
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 4 }}
|
||||
{{- if .Values.githubWebhookServer.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{- range $ip := .Values.githubWebhookServer.service.loadBalancerSourceRanges }}
|
||||
- {{ $ip -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor }}
|
||||
{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
@@ -19,6 +19,8 @@ spec:
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
{{- end }}
|
||||
interval: {{ .Values.metrics.serviceMonitor.interval }}
|
||||
scrapeTimeout: {{ .Values.metrics.serviceMonitor.timeout }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 6 }}
|
||||
|
||||
@@ -250,14 +250,6 @@ rules:
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
{{- if .Values.runner.statusUpdateHook.enabled }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -311,11 +303,4 @@ rules:
|
||||
- list
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
{{- end }}
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{- if .Values.scope.singleNamespace }}
|
||||
kind: RoleBinding
|
||||
{{- else }}
|
||||
kind: ClusterRoleBinding
|
||||
{{- end }}
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- if .Values.scope.singleNamespace }}
|
||||
kind: Role
|
||||
{{- else }}
|
||||
kind: ClusterRole
|
||||
{{- end }}
|
||||
name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "actions-runner-controller.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -0,0 +1,24 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{- if .Values.scope.singleNamespace }}
|
||||
kind: Role
|
||||
{{- else }}
|
||||
kind: ClusterRole
|
||||
{{- end }}
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
{{- if .Values.rbac.allowGrantingKubernetesContainerModePermissions }}
|
||||
{{/* These permissions are required by ARC to create RBAC resources for the runner pod to use the kubernetes container mode. */}}
|
||||
{{/* See https://github.com/actions/actions-runner-controller/pull/1268/files#r917331632 */}}
|
||||
- create
|
||||
- delete
|
||||
{{- end }}
|
||||
@@ -44,6 +44,7 @@ webhooks:
|
||||
resources:
|
||||
- runners
|
||||
sideEffects: None
|
||||
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
{{- if .Values.scope.singleNamespace }}
|
||||
@@ -74,6 +75,7 @@ webhooks:
|
||||
resources:
|
||||
- runnerdeployments
|
||||
sideEffects: None
|
||||
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
{{- if .Values.scope.singleNamespace }}
|
||||
@@ -104,6 +106,7 @@ webhooks:
|
||||
resources:
|
||||
- runnerreplicasets
|
||||
sideEffects: None
|
||||
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
{{- if .Values.scope.singleNamespace }}
|
||||
@@ -136,6 +139,7 @@ webhooks:
|
||||
objectSelector:
|
||||
matchLabels:
|
||||
"actions-runner-controller/inject-registration-token": "true"
|
||||
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
@@ -177,6 +181,7 @@ webhooks:
|
||||
resources:
|
||||
- runners
|
||||
sideEffects: None
|
||||
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
{{- if .Values.scope.singleNamespace }}
|
||||
@@ -207,6 +212,7 @@ webhooks:
|
||||
resources:
|
||||
- runnerdeployments
|
||||
sideEffects: None
|
||||
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
{{- if .Values.scope.singleNamespace }}
|
||||
@@ -238,6 +244,7 @@ webhooks:
|
||||
- runnerreplicasets
|
||||
sideEffects: None
|
||||
{{ if not (or (hasKey .Values.admissionWebHooks "caBundle") .Values.certManagerEnabled) }}
|
||||
timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
|
||||
@@ -47,6 +47,7 @@ authSecret:
|
||||
#github_basicauth_username: ""
|
||||
#github_basicauth_password: ""
|
||||
|
||||
# http(s) should be specified for dockerRegistryMirror, e.g.: dockerRegistryMirror="https://<your-docker-registry-mirror>"
|
||||
dockerRegistryMirror: ""
|
||||
image:
|
||||
repository: "summerwind/actions-runner-controller"
|
||||
@@ -108,7 +109,10 @@ service:
|
||||
# Metrics service resource
|
||||
metrics:
|
||||
serviceAnnotations: {}
|
||||
serviceMonitor: false
|
||||
serviceMonitor:
|
||||
enable: false
|
||||
timeout: 30s
|
||||
interval: 1m
|
||||
serviceMonitorLabels: {}
|
||||
port: 8443
|
||||
proxy:
|
||||
@@ -188,14 +192,22 @@ admissionWebHooks:
|
||||
# https://github.com/actions/actions-runner-controller/issues/1005#issuecomment-993097155
|
||||
#hostNetwork: true
|
||||
|
||||
# If you use `hostNetwork: true`, then you need dnsPolicy: ClusterFirstWithHostNet
|
||||
# https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
|
||||
#dnsPolicy: ClusterFirst
|
||||
|
||||
## specify log format for actions runner controller. Valid options are "text" and "json"
|
||||
logFormat: text
|
||||
|
||||
# enable setting the docker group id for the runner container
|
||||
# https://github.com/actions/actions-runner-controller/pull/2499
|
||||
#dockerGID: 121
|
||||
|
||||
githubWebhookServer:
|
||||
enabled: false
|
||||
replicaCount: 1
|
||||
useRunnerGroupsVisibility: false
|
||||
## specify log format for github webhook controller. Valid options are "text" and "json"
|
||||
## specify log format for github webhook server. Valid options are "text" and "json"
|
||||
logFormat: text
|
||||
secret:
|
||||
enabled: false
|
||||
@@ -240,6 +252,7 @@ githubWebhookServer:
|
||||
protocol: TCP
|
||||
name: http
|
||||
#nodePort: someFixedPortForUseWithTerraformCdkCfnEtc
|
||||
loadBalancerSourceRanges: []
|
||||
ingress:
|
||||
enabled: false
|
||||
ingressClassName: ""
|
||||
@@ -276,6 +289,21 @@ githubWebhookServer:
|
||||
# minAvailable: 1
|
||||
# maxUnavailable: 3
|
||||
# queueLimit: 100
|
||||
terminationGracePeriodSeconds: 10
|
||||
lifecycle: {}
|
||||
# specify additional environment variables for the webhook server pod.
|
||||
# It's possible to specify either key vale pairs e.g.:
|
||||
# my_env_var: "some value"
|
||||
# my_other_env_var: "other value"
|
||||
|
||||
# or a list of complete environment variable definitions e.g.:
|
||||
# - name: GITHUB_WEBHOOK_SECRET_TOKEN
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# key: GITHUB_WEBHOOK_SECRET_TOKEN
|
||||
# name: prod-gha-controller-webhook-token
|
||||
# optional: true
|
||||
env: {}
|
||||
|
||||
actionsMetrics:
|
||||
serviceAnnotations: {}
|
||||
@@ -283,7 +311,10 @@ actionsMetrics:
|
||||
# as a part of the helm release.
|
||||
# Do note that you also need actionsMetricsServer.enabled=true
|
||||
# to deploy the actions-metrics-server whose k8s service is referenced by the service monitor.
|
||||
serviceMonitor: false
|
||||
serviceMonitor:
|
||||
enable: false
|
||||
timeout: 30s
|
||||
interval: 1m
|
||||
serviceMonitorLabels: {}
|
||||
port: 8443
|
||||
proxy:
|
||||
@@ -298,7 +329,7 @@ actionsMetricsServer:
|
||||
# See the thread below for more context.
|
||||
# https://github.com/actions/actions-runner-controller/pull/1814#discussion_r974758924
|
||||
replicaCount: 1
|
||||
## specify log format for github webhook controller. Valid options are "text" and "json"
|
||||
## specify log format for actions metrics server. Valid options are "text" and "json"
|
||||
logFormat: text
|
||||
secret:
|
||||
enabled: false
|
||||
@@ -343,6 +374,7 @@ actionsMetricsServer:
|
||||
protocol: TCP
|
||||
name: http
|
||||
#nodePort: someFixedPortForUseWithTerraformCdkCfnEtc
|
||||
loadBalancerSourceRanges: []
|
||||
ingress:
|
||||
enabled: false
|
||||
ingressClassName: ""
|
||||
@@ -372,4 +404,5 @@ actionsMetricsServer:
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
terminationGracePeriodSeconds: 10
|
||||
lifecycle: {}
|
||||
|
||||
23
charts/gha-runner-scale-set-controller/.helmignore
Normal file
23
charts/gha-runner-scale-set-controller/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
33
charts/gha-runner-scale-set-controller/Chart.yaml
Normal file
33
charts/gha-runner-scale-set-controller/Chart.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
apiVersion: v2
|
||||
name: gha-runner-scale-set-controller
|
||||
description: A Helm chart for install actions-runner-controller CRD
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.5.0"
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
sources:
|
||||
- "https://github.com/actions/actions-runner-controller"
|
||||
|
||||
maintainers:
|
||||
- name: actions
|
||||
url: https://github.com/actions
|
||||
5
charts/gha-runner-scale-set-controller/ci/ci-values.yaml
Normal file
5
charts/gha-runner-scale-set-controller/ci/ci-values.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
# Set the following to dummy values.
|
||||
# This is only useful in CI
|
||||
image:
|
||||
repository: test-arc
|
||||
tag: dev
|
||||
@@ -0,0 +1,145 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.7.0
|
||||
creationTimestamp: null
|
||||
name: autoscalinglisteners.actions.github.com
|
||||
spec:
|
||||
group: actions.github.com
|
||||
names:
|
||||
kind: AutoscalingListener
|
||||
listKind: AutoscalingListenerList
|
||||
plural: autoscalinglisteners
|
||||
singular: autoscalinglistener
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.githubConfigUrl
|
||||
name: GitHub Configure URL
|
||||
type: string
|
||||
- jsonPath: .spec.autoscalingRunnerSetNamespace
|
||||
name: AutoscalingRunnerSet Namespace
|
||||
type: string
|
||||
- jsonPath: .spec.autoscalingRunnerSetName
|
||||
name: AutoscalingRunnerSet Name
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: AutoscalingListener is the Schema for the autoscalinglisteners API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: AutoscalingListenerSpec defines the desired state of AutoscalingListener
|
||||
properties:
|
||||
autoscalingRunnerSetName:
|
||||
description: Required
|
||||
type: string
|
||||
autoscalingRunnerSetNamespace:
|
||||
description: Required
|
||||
type: string
|
||||
ephemeralRunnerSetName:
|
||||
description: Required
|
||||
type: string
|
||||
githubConfigSecret:
|
||||
description: Required
|
||||
type: string
|
||||
githubConfigUrl:
|
||||
description: Required
|
||||
type: string
|
||||
githubServerTLS:
|
||||
properties:
|
||||
certificateFrom:
|
||||
description: Required
|
||||
properties:
|
||||
configMapKeyRef:
|
||||
description: Required
|
||||
properties:
|
||||
key:
|
||||
description: The key to select.
|
||||
type: string
|
||||
name:
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the ConfigMap or its key must be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
image:
|
||||
description: Required
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
description: Required
|
||||
type: string
|
||||
imagePullSecrets:
|
||||
description: Required
|
||||
items:
|
||||
description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.
|
||||
properties:
|
||||
name:
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
maxRunners:
|
||||
description: Required
|
||||
minimum: 0
|
||||
type: integer
|
||||
minRunners:
|
||||
description: Required
|
||||
minimum: 0
|
||||
type: integer
|
||||
proxy:
|
||||
properties:
|
||||
http:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
https:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
noProxy:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
runnerScaleSetId:
|
||||
description: Required
|
||||
type: integer
|
||||
type: object
|
||||
status:
|
||||
description: AutoscalingListenerStatus defines the observed state of AutoscalingListener
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,3 @@
|
||||
Thank you for installing {{ .Chart.Name }}.
|
||||
|
||||
Your release is named {{ .Release.Name }}.
|
||||
132
charts/gha-runner-scale-set-controller/templates/_helpers.tpl
Normal file
132
charts/gha-runner-scale-set-controller/templates/_helpers.tpl
Normal file
@@ -0,0 +1,132 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
|
||||
|
||||
{{- define "gha-base-name" -}}
|
||||
gha-rs-controller
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.name" -}}
|
||||
{{- default (include "gha-base-name" .) .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "gha-runner-scale-set-controller.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default (include "gha-base-name" .) .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "gha-runner-scale-set-controller.chart" -}}
|
||||
{{- printf "%s-%s" (include "gha-base-name" .) .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "gha-runner-scale-set-controller.labels" -}}
|
||||
helm.sh/chart: {{ include "gha-runner-scale-set-controller.chart" . }}
|
||||
{{ include "gha-runner-scale-set-controller.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/part-of: gha-rs-controller
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- range $k, $v := .Values.labels }}
|
||||
{{ $k }}: {{ $v }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "gha-runner-scale-set-controller.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "gha-runner-scale-set-controller.name" . }}
|
||||
app.kubernetes.io/namespace: {{ .Release.Namespace }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "gha-runner-scale-set-controller.serviceAccountName" -}}
|
||||
{{- if eq .Values.serviceAccount.name "default"}}
|
||||
{{- fail "serviceAccount.name cannot be set to 'default'" }}
|
||||
{{- end }}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "gha-runner-scale-set-controller.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- if not .Values.serviceAccount.name }}
|
||||
{{- fail "serviceAccount.name must be set if serviceAccount.create is false" }}
|
||||
{{- else }}
|
||||
{{- .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerClusterRoleName" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerClusterRoleBinding" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleName" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace-watch
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleBinding" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace-watch
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerListenerRoleName" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-listener
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerListenerRoleBinding" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-listener
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.leaderElectionRoleName" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.leaderElectionRoleBinding" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}}
|
||||
{{- $names := list }}
|
||||
{{- range $k, $v := . }}
|
||||
{{- $names = append $names $v.name }}
|
||||
{{- end }}
|
||||
{{- $names | join ","}}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.serviceMonitorName" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-service-monitor
|
||||
{{- end }}
|
||||
127
charts/gha-runner-scale-set-controller/templates/deployment.yaml
Normal file
127
charts/gha-runner-scale-set-controller/templates/deployment.yaml
Normal file
@@ -0,0 +1,127 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set-controller.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set-controller.labels" . | nindent 4 }}
|
||||
actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }}
|
||||
actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||
{{- if .Values.flags.watchSingleNamespace }}
|
||||
actions.github.com/controller-watch-single-namespace: {{ .Values.flags.watchSingleNamespace }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: {{ default 1 .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/default-container: "manager"
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: gha-rs-controller
|
||||
app.kubernetes.io/component: controller-manager
|
||||
app.kubernetes.io/version: {{ .Chart.Version }}
|
||||
{{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||
{{- with .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.priorityClassName }}
|
||||
priorityClassName: "{{ . }}"
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: manager
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
args:
|
||||
- "--auto-scaling-runner-set-only"
|
||||
{{- if gt (int (default 1 .Values.replicaCount)) 1 }}
|
||||
- "--enable-leader-election"
|
||||
- "--leader-election-id={{ include "gha-runner-scale-set-controller.fullname" . }}"
|
||||
{{- end }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
- "--auto-scaler-image-pull-secrets={{ include "gha-runner-scale-set-controller.imagePullSecretsNames" . }}"
|
||||
{{- end }}
|
||||
{{- with .Values.flags.logLevel }}
|
||||
- "--log-level={{ . }}"
|
||||
{{- end }}
|
||||
{{- with .Values.flags.logFormat }}
|
||||
- "--log-format={{ . }}"
|
||||
{{- end }}
|
||||
{{- with .Values.flags.watchSingleNamespace }}
|
||||
- "--watch-single-namespace={{ . }}"
|
||||
{{- end }}
|
||||
{{- with .Values.flags.updateStrategy }}
|
||||
- "--update-strategy={{ . }}"
|
||||
{{- end }}
|
||||
{{- if .Values.metrics }}
|
||||
{{- with .Values.metrics }}
|
||||
- "--listener-metrics-addr={{ .listenerAddr }}"
|
||||
- "--listener-metrics-endpoint={{ .listenerEndpoint }}"
|
||||
- "--metrics-addr={{ .controllerManagerAddr }}"
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
- "--listener-metrics-addr=0"
|
||||
- "--listener-metrics-endpoint="
|
||||
- "--metrics-addr=0"
|
||||
{{- end }}
|
||||
command:
|
||||
- "/manager"
|
||||
{{- with .Values.metrics }}
|
||||
ports:
|
||||
- containerPort: {{regexReplaceAll ":([0-9]+)" .controllerManagerAddr "${1}"}}
|
||||
protocol: TCP
|
||||
name: metrics
|
||||
{{- end }}
|
||||
env:
|
||||
- name: CONTROLLER_MANAGER_CONTAINER_IMAGE
|
||||
value: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
- name: CONTROLLER_MANAGER_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY
|
||||
value: "{{ .Values.image.pullPolicy | default "IfNotPresent" }}"
|
||||
{{- with .Values.env }}
|
||||
{{- if kindIs "slice" . }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.resources }}
|
||||
resources:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp
|
||||
terminationGracePeriodSeconds: 10
|
||||
volumes:
|
||||
- name: tmp
|
||||
emptyDir: {}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,12 @@
|
||||
{{- if gt (int (default 1 .Values.replicaCount)) 1 }}
|
||||
# permissions to do leader election.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set-controller.leaderElectionRoleName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
{{- end }}
|
||||
@@ -0,0 +1,15 @@
|
||||
{{- if gt (int (default 1 .Values.replicaCount)) 1 }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set-controller.leaderElectionRoleBinding" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "gha-runner-scale-set-controller.leaderElectionRoleName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,144 @@
|
||||
{{- if empty .Values.flags.watchSingleNamespace }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set-controller.managerClusterRoleName" . }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalingrunnersets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalingrunnersets/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalingrunnersets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalinglisteners
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalinglisteners/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalinglisteners/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunners
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunners/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunners/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- rolebindings
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- roles
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
{{- end }}
|
||||
@@ -0,0 +1,14 @@
|
||||
{{- if empty .Values.flags.watchSingleNamespace }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set-controller.managerClusterRoleBinding" . }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ include "gha-runner-scale-set-controller.managerClusterRoleName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,40 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set-controller.managerListenerRoleName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
@@ -0,0 +1,13 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set-controller.managerListenerRoleBinding" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "gha-runner-scale-set-controller.managerListenerRoleName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -0,0 +1,84 @@
|
||||
{{- if .Values.flags.watchSingleNamespace }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalinglisteners
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalinglisteners/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalinglisteners/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- rolebindings
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- roles
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalingrunnersets
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunners
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
{{- end }}
|
||||
@@ -0,0 +1,15 @@
|
||||
{{- if .Values.flags.watchSingleNamespace }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,125 @@
|
||||
{{- if .Values.flags.watchSingleNamespace }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleName" . }}
|
||||
namespace: {{ .Values.flags.watchSingleNamespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalingrunnersets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalingrunnersets/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalingrunnersets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunners
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunners/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunners/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalinglisteners
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- rolebindings
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- roles
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
{{- end }}
|
||||
@@ -0,0 +1,15 @@
|
||||
{{- if .Values.flags.watchSingleNamespace }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleBinding" . }}
|
||||
namespace: {{ .Values.flags.watchSingleNamespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,13 @@
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set-controller.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
1024
charts/gha-runner-scale-set-controller/tests/template_test.go
Normal file
1024
charts/gha-runner-scale-set-controller/tests/template_test.go
Normal file
File diff suppressed because it is too large
Load Diff
115
charts/gha-runner-scale-set-controller/values.yaml
Normal file
115
charts/gha-runner-scale-set-controller/values.yaml
Normal file
@@ -0,0 +1,115 @@
|
||||
# Default values for gha-runner-scale-set-controller.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
labels: {}
|
||||
|
||||
# leaderElection will be enabled when replicaCount>1,
|
||||
# So, only one replica will in charge of reconciliation at a given time
|
||||
# leaderElectionId will be set to {{ define gha-runner-scale-set-controller.fullname }}.
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: "ghcr.io/actions/gha-runner-scale-set-controller"
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
env:
|
||||
## Define environment variables for the controller pod
|
||||
# - name: "ENV_VAR_NAME_1"
|
||||
# value: "ENV_VAR_VALUE_1"
|
||||
# - name: "ENV_VAR_NAME_2"
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# key: ENV_VAR_NAME_2
|
||||
# name: secret-name
|
||||
# optional: true
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created for running the controller pod
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
# You can not use the default service account for this.
|
||||
name: ""
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
resources: {}
|
||||
## We usually recommend not to specify default resources and to leave this as a conscious
|
||||
## choice for the user. This also increases chances charts run on environments with little
|
||||
## resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
# Leverage a PriorityClass to ensure your pods survive resource shortages
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
# PriorityClass: system-cluster-critical
|
||||
priorityClassName: ""
|
||||
|
||||
flags:
|
||||
## Log level can be set here with one of the following values: "debug", "info", "warn", "error".
|
||||
## Defaults to "debug".
|
||||
logLevel: "debug"
|
||||
## Log format can be set with one of the following values: "text", "json"
|
||||
## Defaults to "text"
|
||||
logFormat: "text"
|
||||
|
||||
## Restricts the controller to only watch resources in the desired namespace.
|
||||
## Defaults to watch all namespaces when unset.
|
||||
# watchSingleNamespace: ""
|
||||
|
||||
## Defines how the controller should handle upgrades while having running jobs.
|
||||
##
|
||||
## The srategies available are:
|
||||
## - "immediate": (default) The controller will immediately apply the change causing the
|
||||
## recreation of the listener and ephemeral runner set. This can lead to an
|
||||
## overprovisioning of runners, if there are pending / running jobs. This should not
|
||||
## be a problem at a small scale, but it could lead to a significant increase of
|
||||
## resources if you have a lot of jobs running concurrently.
|
||||
##
|
||||
## - "eventual": The controller will remove the listener and ephemeral runner set
|
||||
## immediately, but will not recreate them (to apply changes) until all
|
||||
## pending / running jobs have completed.
|
||||
## This can lead to a longer time to apply the change but it will ensure
|
||||
## that you don't have any overprovisioning of runners.
|
||||
updateStrategy: "immediate"
|
||||
|
||||
## If `metrics:` object is not provided, or commented out, the following flags
|
||||
## will be applied the controller-manager and listener pods with empty values:
|
||||
## `--metrics-addr`, `--listener-metrics-addr`, `--listener-metrics-endpoint`.
|
||||
## This will disable metrics.
|
||||
##
|
||||
## To enable metrics, uncomment the following lines.
|
||||
# metrics:
|
||||
# controllerManagerAddr: ":8080"
|
||||
# listenerAddr: ":8080"
|
||||
# listenerEndpoint: "/metrics"
|
||||
23
charts/gha-runner-scale-set/.helmignore
Normal file
23
charts/gha-runner-scale-set/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
33
charts/gha-runner-scale-set/Chart.yaml
Normal file
33
charts/gha-runner-scale-set/Chart.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
apiVersion: v2
|
||||
name: gha-runner-scale-set
|
||||
description: A Helm chart for deploying an AutoScalingRunnerSet
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.5.0"
|
||||
|
||||
home: https://github.com/actions/dev-arc
|
||||
|
||||
sources:
|
||||
- "https://github.com/actions/dev-arc"
|
||||
|
||||
maintainers:
|
||||
- name: actions
|
||||
url: https://github.com/actions
|
||||
6
charts/gha-runner-scale-set/ci/ci-values.yaml
Normal file
6
charts/gha-runner-scale-set/ci/ci-values.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
# Set the following to dummy values.
|
||||
# This is only useful in CI
|
||||
githubConfigUrl: https://github.com/actions/actions-runner-controller
|
||||
|
||||
githubConfigSecret:
|
||||
github_token: test
|
||||
3
charts/gha-runner-scale-set/templates/NOTES.txt
Normal file
3
charts/gha-runner-scale-set/templates/NOTES.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
Thank you for installing {{ .Chart.Name }}.
|
||||
|
||||
Your release is named {{ .Release.Name }}.
|
||||
555
charts/gha-runner-scale-set/templates/_helpers.tpl
Normal file
555
charts/gha-runner-scale-set/templates/_helpers.tpl
Normal file
@@ -0,0 +1,555 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
|
||||
{{- define "gha-base-name" -}}
|
||||
gha-rs
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.name" -}}
|
||||
{{- default (include "gha-base-name" .) .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "gha-runner-scale-set.fullname" -}}
|
||||
{{- $name := default (include "gha-base-name" .) }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "gha-runner-scale-set.chart" -}}
|
||||
{{- printf "%s-%s" (include "gha-base-name" .) .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "gha-runner-scale-set.labels" -}}
|
||||
helm.sh/chart: {{ include "gha-runner-scale-set.chart" . }}
|
||||
{{ include "gha-runner-scale-set.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/part-of: gha-rs
|
||||
actions.github.com/scale-set-name: {{ .Release.Name }}
|
||||
actions.github.com/scale-set-namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "gha-runner-scale-set.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "gha-runner-scale-set.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.githubsecret" -}}
|
||||
{{- if kindIs "string" .Values.githubConfigSecret }}
|
||||
{{- if not (empty .Values.githubConfigSecret) }}
|
||||
{{- .Values.githubConfigSecret }}
|
||||
{{- else}}
|
||||
{{- fail "Values.githubConfigSecret is required for setting auth with GitHub server." }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-github-secret
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.noPermissionServiceAccountName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-no-permission
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubeModeRoleName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubeModeRoleBindingName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.dind-init-container" -}}
|
||||
{{- range $i, $val := .Values.template.spec.containers }}
|
||||
{{- if eq $val.name "runner" }}
|
||||
image: {{ $val.image }}
|
||||
command: ["cp"]
|
||||
args: ["-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"]
|
||||
volumeMounts:
|
||||
- name: dind-externals
|
||||
mountPath: /home/runner/tmpDir
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.dind-container" -}}
|
||||
image: docker:dind
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: work
|
||||
mountPath: /home/runner/_work
|
||||
- name: dind-cert
|
||||
mountPath: /certs/client
|
||||
- name: dind-externals
|
||||
mountPath: /home/runner/externals
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.dind-volume" -}}
|
||||
- name: dind-cert
|
||||
emptyDir: {}
|
||||
- name: dind-externals
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.tls-volume" -}}
|
||||
- name: github-server-tls-cert
|
||||
configMap:
|
||||
name: {{ .certificateFrom.configMapKeyRef.name }}
|
||||
items:
|
||||
- key: {{ .certificateFrom.configMapKeyRef.key }}
|
||||
path: {{ .certificateFrom.configMapKeyRef.key }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.dind-work-volume" -}}
|
||||
{{- $createWorkVolume := 1 }}
|
||||
{{- range $i, $volume := .Values.template.spec.volumes }}
|
||||
{{- if eq $volume.name "work" }}
|
||||
{{- $createWorkVolume = 0 }}
|
||||
- {{ $volume | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq $createWorkVolume 1 }}
|
||||
- name: work
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubernetes-mode-work-volume" -}}
|
||||
{{- $createWorkVolume := 1 }}
|
||||
{{- range $i, $volume := .Values.template.spec.volumes }}
|
||||
{{- if eq $volume.name "work" }}
|
||||
{{- $createWorkVolume = 0 }}
|
||||
- {{ $volume | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq $createWorkVolume 1 }}
|
||||
- name: work
|
||||
ephemeral:
|
||||
volumeClaimTemplate:
|
||||
spec:
|
||||
{{- .Values.containerMode.kubernetesModeWorkVolumeClaim | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.non-work-volumes" -}}
|
||||
{{- range $i, $volume := .Values.template.spec.volumes }}
|
||||
{{- if ne $volume.name "work" }}
|
||||
- {{ $volume | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.non-runner-containers" -}}
|
||||
{{- range $i, $container := .Values.template.spec.containers }}
|
||||
{{- if ne $container.name "runner" }}
|
||||
- {{ $container | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.non-runner-non-dind-containers" -}}
|
||||
{{- range $i, $container := .Values.template.spec.containers }}
|
||||
{{- if and (ne $container.name "runner") (ne $container.name "dind") }}
|
||||
- {{ $container | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.dind-runner-container" -}}
|
||||
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
||||
{{- range $i, $container := .Values.template.spec.containers }}
|
||||
{{- if eq $container.name "runner" }}
|
||||
{{- range $key, $val := $container }}
|
||||
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
|
||||
{{ $key }}: {{ $val | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- $setDockerHost := 1 }}
|
||||
{{- $setDockerTlsVerify := 1 }}
|
||||
{{- $setDockerCertPath := 1 }}
|
||||
{{- $setRunnerWaitDocker := 1 }}
|
||||
{{- $setNodeExtraCaCerts := 0 }}
|
||||
{{- $setRunnerUpdateCaCerts := 0 }}
|
||||
{{- if $tlsConfig.runnerMountPath }}
|
||||
{{- $setNodeExtraCaCerts = 1 }}
|
||||
{{- $setRunnerUpdateCaCerts = 1 }}
|
||||
{{- end }}
|
||||
env:
|
||||
{{- with $container.env }}
|
||||
{{- range $i, $env := . }}
|
||||
{{- if eq $env.name "DOCKER_HOST" }}
|
||||
{{- $setDockerHost = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "DOCKER_TLS_VERIFY" }}
|
||||
{{- $setDockerTlsVerify = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "DOCKER_CERT_PATH" }}
|
||||
{{- $setDockerCertPath = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS" }}
|
||||
{{- $setRunnerWaitDocker = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
|
||||
{{- $setNodeExtraCaCerts = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
|
||||
{{- $setRunnerUpdateCaCerts = 0 }}
|
||||
{{- end }}
|
||||
- {{ $env | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $setDockerHost }}
|
||||
- name: DOCKER_HOST
|
||||
value: tcp://localhost:2376
|
||||
{{- end }}
|
||||
{{- if $setDockerTlsVerify }}
|
||||
- name: DOCKER_TLS_VERIFY
|
||||
value: "1"
|
||||
{{- end }}
|
||||
{{- if $setDockerCertPath }}
|
||||
- name: DOCKER_CERT_PATH
|
||||
value: /certs/client
|
||||
{{- end }}
|
||||
{{- if $setRunnerWaitDocker }}
|
||||
- name: RUNNER_WAIT_FOR_DOCKER_IN_SECONDS
|
||||
value: "120"
|
||||
{{- end }}
|
||||
{{- if $setNodeExtraCaCerts }}
|
||||
- name: NODE_EXTRA_CA_CERTS
|
||||
value: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
|
||||
{{- end }}
|
||||
{{- if $setRunnerUpdateCaCerts }}
|
||||
- name: RUNNER_UPDATE_CA_CERTS
|
||||
value: "1"
|
||||
{{- end }}
|
||||
{{- $mountWork := 1 }}
|
||||
{{- $mountDindCert := 1 }}
|
||||
{{- $mountGitHubServerTLS := 0 }}
|
||||
{{- if $tlsConfig.runnerMountPath }}
|
||||
{{- $mountGitHubServerTLS = 1 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
{{- with $container.volumeMounts }}
|
||||
{{- range $i, $volMount := . }}
|
||||
{{- if eq $volMount.name "work" }}
|
||||
{{- $mountWork = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $volMount.name "dind-cert" }}
|
||||
{{- $mountDindCert = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $volMount.name "github-server-tls-cert" }}
|
||||
{{- $mountGitHubServerTLS = 0 }}
|
||||
{{- end }}
|
||||
- {{ $volMount | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $mountWork }}
|
||||
- name: work
|
||||
mountPath: /home/runner/_work
|
||||
{{- end }}
|
||||
{{- if $mountDindCert }}
|
||||
- name: dind-cert
|
||||
mountPath: /certs/client
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if $mountGitHubServerTLS }}
|
||||
- name: github-server-tls-cert
|
||||
mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
|
||||
subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubernetes-mode-runner-container" -}}
|
||||
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
||||
{{- range $i, $container := .Values.template.spec.containers }}
|
||||
{{- if eq $container.name "runner" }}
|
||||
{{- range $key, $val := $container }}
|
||||
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
|
||||
{{ $key }}: {{ $val | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- $setContainerHooks := 1 }}
|
||||
{{- $setPodName := 1 }}
|
||||
{{- $setRequireJobContainer := 1 }}
|
||||
{{- $setNodeExtraCaCerts := 0 }}
|
||||
{{- $setRunnerUpdateCaCerts := 0 }}
|
||||
{{- if $tlsConfig.runnerMountPath }}
|
||||
{{- $setNodeExtraCaCerts = 1 }}
|
||||
{{- $setRunnerUpdateCaCerts = 1 }}
|
||||
{{- end }}
|
||||
env:
|
||||
{{- with $container.env }}
|
||||
{{- range $i, $env := . }}
|
||||
{{- if eq $env.name "ACTIONS_RUNNER_CONTAINER_HOOKS" }}
|
||||
{{- $setContainerHooks = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "ACTIONS_RUNNER_POD_NAME" }}
|
||||
{{- $setPodName = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER" }}
|
||||
{{- $setRequireJobContainer = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
|
||||
{{- $setNodeExtraCaCerts = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
|
||||
{{- $setRunnerUpdateCaCerts = 0 }}
|
||||
{{- end }}
|
||||
- {{ $env | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $setContainerHooks }}
|
||||
- name: ACTIONS_RUNNER_CONTAINER_HOOKS
|
||||
value: /home/runner/k8s/index.js
|
||||
{{- end }}
|
||||
{{- if $setPodName }}
|
||||
- name: ACTIONS_RUNNER_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
{{- end }}
|
||||
{{- if $setRequireJobContainer }}
|
||||
- name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER
|
||||
value: "true"
|
||||
{{- end }}
|
||||
{{- if $setNodeExtraCaCerts }}
|
||||
- name: NODE_EXTRA_CA_CERTS
|
||||
value: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
|
||||
{{- end }}
|
||||
{{- if $setRunnerUpdateCaCerts }}
|
||||
- name: RUNNER_UPDATE_CA_CERTS
|
||||
value: "1"
|
||||
{{- end }}
|
||||
{{- $mountWork := 1 }}
|
||||
{{- $mountGitHubServerTLS := 0 }}
|
||||
{{- if $tlsConfig.runnerMountPath }}
|
||||
{{- $mountGitHubServerTLS = 1 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
{{- with $container.volumeMounts }}
|
||||
{{- range $i, $volMount := . }}
|
||||
{{- if eq $volMount.name "work" }}
|
||||
{{- $mountWork = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $volMount.name "github-server-tls-cert" }}
|
||||
{{- $mountGitHubServerTLS = 0 }}
|
||||
{{- end }}
|
||||
- {{ $volMount | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $mountWork }}
|
||||
- name: work
|
||||
mountPath: /home/runner/_work
|
||||
{{- end }}
|
||||
{{- if $mountGitHubServerTLS }}
|
||||
- name: github-server-tls-cert
|
||||
mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
|
||||
subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.default-mode-runner-containers" -}}
|
||||
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
||||
{{- range $i, $container := .Values.template.spec.containers }}
|
||||
{{- if ne $container.name "runner" }}
|
||||
- {{ $container | toYaml | nindent 2 }}
|
||||
{{- else }}
|
||||
- name: {{ $container.name }}
|
||||
{{- range $key, $val := $container }}
|
||||
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
|
||||
{{ $key }}: {{ $val | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- $setNodeExtraCaCerts := 0 }}
|
||||
{{- $setRunnerUpdateCaCerts := 0 }}
|
||||
{{- if $tlsConfig.runnerMountPath }}
|
||||
{{- $setNodeExtraCaCerts = 1 }}
|
||||
{{- $setRunnerUpdateCaCerts = 1 }}
|
||||
{{- end }}
|
||||
env:
|
||||
{{- with $container.env }}
|
||||
{{- range $i, $env := . }}
|
||||
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
|
||||
{{- $setNodeExtraCaCerts = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
|
||||
{{- $setRunnerUpdateCaCerts = 0 }}
|
||||
{{- end }}
|
||||
- {{ $env | toYaml | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $setNodeExtraCaCerts }}
|
||||
- name: NODE_EXTRA_CA_CERTS
|
||||
value: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
|
||||
{{- end }}
|
||||
{{- if $setRunnerUpdateCaCerts }}
|
||||
- name: RUNNER_UPDATE_CA_CERTS
|
||||
value: "1"
|
||||
{{- end }}
|
||||
{{- $mountGitHubServerTLS := 0 }}
|
||||
{{- if $tlsConfig.runnerMountPath }}
|
||||
{{- $mountGitHubServerTLS = 1 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
{{- with $container.volumeMounts }}
|
||||
{{- range $i, $volMount := . }}
|
||||
{{- if eq $volMount.name "github-server-tls-cert" }}
|
||||
{{- $mountGitHubServerTLS = 0 }}
|
||||
{{- end }}
|
||||
- {{ $volMount | toYaml | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $mountGitHubServerTLS }}
|
||||
- name: github-server-tls-cert
|
||||
mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
|
||||
subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.managerRoleName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-manager
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.managerRoleBindingName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-manager
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.managerServiceAccountName" -}}
|
||||
{{- $searchControllerDeployment := 1 }}
|
||||
{{- if .Values.controllerServiceAccount }}
|
||||
{{- if .Values.controllerServiceAccount.name }}
|
||||
{{- $searchControllerDeployment = 0 }}
|
||||
{{- .Values.controllerServiceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq $searchControllerDeployment 1 }}
|
||||
{{- $multiNamespacesCounter := 0 }}
|
||||
{{- $singleNamespaceCounter := 0 }}
|
||||
{{- $controllerDeployment := dict }}
|
||||
{{- $singleNamespaceControllerDeployments := dict }}
|
||||
{{- $managerServiceAccountName := "" }}
|
||||
{{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }}
|
||||
{{- if kindIs "map" $deployment.metadata.labels }}
|
||||
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-rs-controller" }}
|
||||
{{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }}
|
||||
{{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }}
|
||||
{{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}}
|
||||
{{- else }}
|
||||
{{- $multiNamespacesCounter = add $multiNamespacesCounter 1 }}
|
||||
{{- $controllerDeployment = $deployment }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
|
||||
{{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
|
||||
{{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- if gt $multiNamespacesCounter 1 }}
|
||||
{{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- if eq $multiNamespacesCounter 1 }}
|
||||
{{- with $controllerDeployment.metadata }}
|
||||
{{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }}
|
||||
{{- end }}
|
||||
{{- else if gt $singleNamespaceCounter 0 }}
|
||||
{{- if hasKey $singleNamespaceControllerDeployments .Release.Namespace }}
|
||||
{{- $controllerDeployment = get $singleNamespaceControllerDeployments .Release.Namespace }}
|
||||
{{- with $controllerDeployment.metadata }}
|
||||
{{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq $managerServiceAccountName "" }}
|
||||
{{- fail "No service account name found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-name), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- $managerServiceAccountName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.managerServiceAccountNamespace" -}}
|
||||
{{- $searchControllerDeployment := 1 }}
|
||||
{{- if .Values.controllerServiceAccount }}
|
||||
{{- if .Values.controllerServiceAccount.namespace }}
|
||||
{{- $searchControllerDeployment = 0 }}
|
||||
{{- .Values.controllerServiceAccount.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq $searchControllerDeployment 1 }}
|
||||
{{- $multiNamespacesCounter := 0 }}
|
||||
{{- $singleNamespaceCounter := 0 }}
|
||||
{{- $controllerDeployment := dict }}
|
||||
{{- $singleNamespaceControllerDeployments := dict }}
|
||||
{{- $managerServiceAccountNamespace := "" }}
|
||||
{{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }}
|
||||
{{- if kindIs "map" $deployment.metadata.labels }}
|
||||
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-rs-controller" }}
|
||||
{{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }}
|
||||
{{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }}
|
||||
{{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}}
|
||||
{{- else }}
|
||||
{{- $multiNamespacesCounter = add $multiNamespacesCounter 1 }}
|
||||
{{- $controllerDeployment = $deployment }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
|
||||
{{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
|
||||
{{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- if gt $multiNamespacesCounter 1 }}
|
||||
{{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- if eq $multiNamespacesCounter 1 }}
|
||||
{{- with $controllerDeployment.metadata }}
|
||||
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
|
||||
{{- end }}
|
||||
{{- else if gt $singleNamespaceCounter 0 }}
|
||||
{{- if hasKey $singleNamespaceControllerDeployments .Release.Namespace }}
|
||||
{{- $controllerDeployment = get $singleNamespaceControllerDeployments .Release.Namespace }}
|
||||
{{- with $controllerDeployment.metadata }}
|
||||
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq $managerServiceAccountNamespace "" }}
|
||||
{{- fail "No service account namespace found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- $managerServiceAccountNamespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user