mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 19:50:30 +00:00
Compare commits
71 Commits
actions-ru
...
actions-ru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f1d7c52253 | ||
|
|
76d622b86b | ||
|
|
0b24b0d60b | ||
|
|
5e23c598a8 | ||
|
|
3652932780 | ||
|
|
94065d2fc5 | ||
|
|
b1cc4da5dc | ||
|
|
8b7bfa5ffb | ||
|
|
52fc819339 | ||
|
|
215b245881 | ||
|
|
a3df23b07c | ||
|
|
f5c69654e7 | ||
|
|
abc0b678d3 | ||
|
|
963ab2a748 | ||
|
|
8a41a596b6 | ||
|
|
e10c437f46 | ||
|
|
a0a3916c80 | ||
|
|
1c360d7e26 | ||
|
|
20bb860a37 | ||
|
|
6a75bc0880 | ||
|
|
78271000c0 | ||
|
|
a36b0e58b0 | ||
|
|
336e11a4e9 | ||
|
|
dcb64f0b9e | ||
|
|
0dadfc4d37 | ||
|
|
dc58f6ba13 | ||
|
|
06cbd632b8 | ||
|
|
9f33ae1507 | ||
|
|
63a6b5a7f0 | ||
|
|
fddc5bf1c8 | ||
|
|
d90ce2bed5 | ||
|
|
cd996e7c27 | ||
|
|
297442975e | ||
|
|
5271f316e6 | ||
|
|
9845a934f4 | ||
|
|
e0a7e142e0 | ||
|
|
f9a11a8b0b | ||
|
|
fde1893494 | ||
|
|
6fe8008640 | ||
|
|
2fee26ddce | ||
|
|
685f7162a4 | ||
|
|
d134dee14b | ||
|
|
c33ce998f4 | ||
|
|
78a93566af | ||
|
|
81dea9b3dc | ||
|
|
7ca3df3605 | ||
|
|
2343cd2d7b | ||
|
|
cf18cb3fb0 | ||
|
|
ae8b27a9a3 | ||
|
|
58ee5e8c4e | ||
|
|
fade63a663 | ||
|
|
ac4056f85b | ||
|
|
462d044604 | ||
|
|
94934819c4 | ||
|
|
aac811f210 | ||
|
|
e7ec736738 | ||
|
|
90ea691e72 | ||
|
|
32a653c0ca | ||
|
|
c7b2dd1764 | ||
|
|
80af7fc125 | ||
|
|
34909f0cf1 | ||
|
|
8afef51c8b | ||
|
|
032443fcfd | ||
|
|
91c8991835 | ||
|
|
c5ebe750dc | ||
|
|
34fdbf1231 | ||
|
|
44e9b7d8eb | ||
|
|
7ab516fdab | ||
|
|
e571df52b5 | ||
|
|
706ec17bf4 | ||
|
|
30355f742b |
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
*.png filter=lfs diff=lfs merge=lfs -text
|
||||||
@@ -23,6 +23,14 @@ inputs:
|
|||||||
arc-controller-namespace:
|
arc-controller-namespace:
|
||||||
description: 'The namespace of the configured gha-runner-scale-set-controller'
|
description: 'The namespace of the configured gha-runner-scale-set-controller'
|
||||||
required: true
|
required: true
|
||||||
|
wait-to-finish:
|
||||||
|
description: 'Wait for the workflow run to finish'
|
||||||
|
required: true
|
||||||
|
default: "true"
|
||||||
|
wait-to-running:
|
||||||
|
description: 'Wait for the workflow run to start running'
|
||||||
|
required: true
|
||||||
|
default: "false"
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -118,7 +126,36 @@ runs:
|
|||||||
| ${{steps.query_workflow.outputs.workflow_run_url}} |
|
| ${{steps.query_workflow.outputs.workflow_run_url}} |
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
- name: Wait for workflow to start running
|
||||||
|
if: inputs.wait-to-running == 'true' && inputs.wait-to-finish == 'false'
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
function sleep(ms) {
|
||||||
|
return new Promise(resolve => setTimeout(resolve, ms))
|
||||||
|
}
|
||||||
|
const owner = '${{inputs.repo-owner}}'
|
||||||
|
const repo = '${{inputs.repo-name}}'
|
||||||
|
const workflow_run_id = ${{steps.query_workflow.outputs.workflow_run}}
|
||||||
|
const workflow_job_id = ${{steps.query_workflow.outputs.workflow_job}}
|
||||||
|
let count = 0
|
||||||
|
while (count++<10) {
|
||||||
|
await sleep(30 * 1000);
|
||||||
|
let getRunResponse = await github.rest.actions.getWorkflowRun({
|
||||||
|
owner: owner,
|
||||||
|
repo: repo,
|
||||||
|
run_id: workflow_run_id
|
||||||
|
})
|
||||||
|
console.log(`${getRunResponse.data.html_url}: ${getRunResponse.data.status} (${getRunResponse.data.conclusion})`);
|
||||||
|
if (getRunResponse.data.status == 'in_progress') {
|
||||||
|
console.log(`Workflow run is in progress.`)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
core.setFailed(`The triggered workflow run didn't start properly using ${{inputs.arc-name}}`)
|
||||||
|
|
||||||
- name: Wait for workflow to finish successfully
|
- name: Wait for workflow to finish successfully
|
||||||
|
if: inputs.wait-to-finish == 'true'
|
||||||
uses: actions/github-script@v6
|
uses: actions/github-script@v6
|
||||||
with:
|
with:
|
||||||
script: |
|
script: |
|
||||||
@@ -151,10 +188,15 @@ runs:
|
|||||||
}
|
}
|
||||||
core.setFailed(`The triggered workflow run didn't finish properly using ${{inputs.arc-name}}`)
|
core.setFailed(`The triggered workflow run didn't finish properly using ${{inputs.arc-name}}`)
|
||||||
|
|
||||||
|
- name: cleanup
|
||||||
|
if: inputs.wait-to-finish == 'true'
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
|
||||||
|
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-name}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
|
||||||
|
|
||||||
- name: Gather logs and cleanup
|
- name: Gather logs and cleanup
|
||||||
shell: bash
|
shell: bash
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
|
kubectl logs deployment/arc-gha-rs-controller -n ${{inputs.arc-controller-namespace}}
|
||||||
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-name}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
|
|
||||||
kubectl logs deployment/arc-gha-runner-scale-set-controller -n ${{inputs.arc-controller-namespace}}
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
name: Publish Helm Chart
|
name: Publish ARC Helm Charts
|
||||||
|
|
||||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||||
# for details on why we use this approach
|
# for details on why we use this approach
|
||||||
@@ -8,7 +8,7 @@ on:
|
|||||||
- master
|
- master
|
||||||
paths:
|
paths:
|
||||||
- 'charts/**'
|
- 'charts/**'
|
||||||
- '.github/workflows/publish-chart.yaml'
|
- '.github/workflows/arc-publish-chart.yaml'
|
||||||
- '!charts/actions-runner-controller/docs/**'
|
- '!charts/actions-runner-controller/docs/**'
|
||||||
- '!charts/gha-runner-scale-set-controller/**'
|
- '!charts/gha-runner-scale-set-controller/**'
|
||||||
- '!charts/gha-runner-scale-set/**'
|
- '!charts/gha-runner-scale-set/**'
|
||||||
@@ -28,6 +28,10 @@ env:
|
|||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint-chart:
|
lint-chart:
|
||||||
name: Lint Chart
|
name: Lint Chart
|
||||||
@@ -171,6 +175,7 @@ jobs:
|
|||||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||||
--index-path ${{ github.workspace }}/index.yaml \
|
--index-path ${{ github.workspace }}/index.yaml \
|
||||||
|
--token ${{ secrets.GITHUB_TOKEN }} \
|
||||||
--push \
|
--push \
|
||||||
--pages-branch 'gh-pages' \
|
--pages-branch 'gh-pages' \
|
||||||
--pages-index-path 'index.yaml'
|
--pages-index-path 'index.yaml'
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
name: Publish ARC
|
name: Publish ARC Image
|
||||||
|
|
||||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||||
# for details on why we use this approach
|
# for details on why we use this approach
|
||||||
@@ -25,6 +25,10 @@ env:
|
|||||||
TARGET_ORG: actions-runner-controller
|
TARGET_ORG: actions-runner-controller
|
||||||
TARGET_REPO: actions-runner-controller
|
TARGET_REPO: actions-runner-controller
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release-controller:
|
release-controller:
|
||||||
name: Release
|
name: Release
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
name: Release Runner Images
|
name: Release ARC Runner Images
|
||||||
|
|
||||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||||
# for details on why we use this approach
|
# for details on why we use this approach
|
||||||
@@ -10,7 +10,7 @@ on:
|
|||||||
- 'master'
|
- 'master'
|
||||||
paths:
|
paths:
|
||||||
- 'runner/VERSION'
|
- 'runner/VERSION'
|
||||||
- '.github/workflows/release-runners.yaml'
|
- '.github/workflows/arc-release-runners.yaml'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
# Safeguard to prevent pushing images to registeries after build
|
# Safeguard to prevent pushing images to registeries after build
|
||||||
@@ -19,6 +19,10 @@ env:
|
|||||||
TARGET_WORKFLOW: release-runners.yaml
|
TARGET_WORKFLOW: release-runners.yaml
|
||||||
DOCKER_VERSION: 20.10.23
|
DOCKER_VERSION: 20.10.23
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-runners:
|
build-runners:
|
||||||
name: Trigger Build and Push of Runner Images
|
name: Trigger Build and Push of Runner Images
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
# This workflows polls releases from actions/runner and in case of a new one it
|
# This workflows polls releases from actions/runner and in case of a new one it
|
||||||
# updates files containing runner version and opens a pull request.
|
# updates files containing runner version and opens a pull request.
|
||||||
name: Update runners
|
name: Runner Updates Check (Scheduled Job)
|
||||||
|
|
||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
@@ -146,4 +146,4 @@ jobs:
|
|||||||
git push -u origin HEAD
|
git push -u origin HEAD
|
||||||
|
|
||||||
- name: Create pull request
|
- name: Create pull request
|
||||||
run: gh pr create -f
|
run: gh pr create -f -l "runners update"
|
||||||
@@ -6,7 +6,7 @@ on:
|
|||||||
- master
|
- master
|
||||||
paths:
|
paths:
|
||||||
- 'charts/**'
|
- 'charts/**'
|
||||||
- '.github/workflows/validate-chart.yaml'
|
- '.github/workflows/arc-validate-chart.yaml'
|
||||||
- '!charts/actions-runner-controller/docs/**'
|
- '!charts/actions-runner-controller/docs/**'
|
||||||
- '!**.md'
|
- '!**.md'
|
||||||
- '!charts/gha-runner-scale-set-controller/**'
|
- '!charts/gha-runner-scale-set-controller/**'
|
||||||
@@ -14,7 +14,7 @@ on:
|
|||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- 'charts/**'
|
- 'charts/**'
|
||||||
- '.github/workflows/validate-chart.yaml'
|
- '.github/workflows/arc-validate-chart.yaml'
|
||||||
- '!charts/actions-runner-controller/docs/**'
|
- '!charts/actions-runner-controller/docs/**'
|
||||||
- '!**.md'
|
- '!**.md'
|
||||||
- '!charts/gha-runner-scale-set-controller/**'
|
- '!charts/gha-runner-scale-set-controller/**'
|
||||||
@@ -27,6 +27,13 @@ env:
|
|||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
|
# but not pushes to master branch by making the concurrency group name unique
|
||||||
|
# for pushes
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
validate-chart:
|
validate-chart:
|
||||||
name: Lint Chart
|
name: Lint Chart
|
||||||
@@ -65,7 +72,7 @@ jobs:
|
|||||||
python-version: '3.7'
|
python-version: '3.7'
|
||||||
|
|
||||||
- name: Set up chart-testing
|
- name: Set up chart-testing
|
||||||
uses: helm/chart-testing-action@v2.3.1
|
uses: helm/chart-testing-action@v2.4.0
|
||||||
|
|
||||||
- name: Run chart-testing (list-changed)
|
- name: Run chart-testing (list-changed)
|
||||||
id: list-changed
|
id: list-changed
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
name: Validate Runners
|
name: Validate ARC Runners
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
@@ -12,6 +12,13 @@ on:
|
|||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
|
# but not pushes to master branch by making the concurrency group name unique
|
||||||
|
# for pushes
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
shellcheck:
|
shellcheck:
|
||||||
name: runner / shellcheck
|
name: runner / shellcheck
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
name: CI ARC E2E Linux VM Test
|
name: (gha) E2E Tests
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
@@ -16,7 +16,14 @@ env:
|
|||||||
TARGET_ORG: actions-runner-controller
|
TARGET_ORG: actions-runner-controller
|
||||||
TARGET_REPO: arc_e2e_test_dummy
|
TARGET_REPO: arc_e2e_test_dummy
|
||||||
IMAGE_NAME: "arc-test-image"
|
IMAGE_NAME: "arc-test-image"
|
||||||
IMAGE_VERSION: "0.4.0"
|
IMAGE_VERSION: "0.5.0"
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
|
# but not pushes to master branch by making the concurrency group name unique
|
||||||
|
# for pushes
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
default-setup:
|
default-setup:
|
||||||
@@ -51,21 +58,21 @@ jobs:
|
|||||||
--debug
|
--debug
|
||||||
count=0
|
count=0
|
||||||
while true; do
|
while true; do
|
||||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||||
if [ -n "$POD_NAME" ]; then
|
if [ -n "$POD_NAME" ]; then
|
||||||
echo "Pod found: $POD_NAME"
|
echo "Pod found: $POD_NAME"
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
if [ "$count" -ge 60 ]; then
|
if [ "$count" -ge 60 ]; then
|
||||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
sleep 1
|
sleep 1
|
||||||
count=$((count+1))
|
count=$((count+1))
|
||||||
done
|
done
|
||||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||||
kubectl get pod -n arc-systems
|
kubectl get pod -n arc-systems
|
||||||
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||||
|
|
||||||
- name: Install gha-runner-scale-set
|
- name: Install gha-runner-scale-set
|
||||||
id: install_arc
|
id: install_arc
|
||||||
@@ -142,21 +149,21 @@ jobs:
|
|||||||
--debug
|
--debug
|
||||||
count=0
|
count=0
|
||||||
while true; do
|
while true; do
|
||||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||||
if [ -n "$POD_NAME" ]; then
|
if [ -n "$POD_NAME" ]; then
|
||||||
echo "Pod found: $POD_NAME"
|
echo "Pod found: $POD_NAME"
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
if [ "$count" -ge 60 ]; then
|
if [ "$count" -ge 60 ]; then
|
||||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
sleep 1
|
sleep 1
|
||||||
count=$((count+1))
|
count=$((count+1))
|
||||||
done
|
done
|
||||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||||
kubectl get pod -n arc-systems
|
kubectl get pod -n arc-systems
|
||||||
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||||
|
|
||||||
- name: Install gha-runner-scale-set
|
- name: Install gha-runner-scale-set
|
||||||
id: install_arc
|
id: install_arc
|
||||||
@@ -231,21 +238,21 @@ jobs:
|
|||||||
--debug
|
--debug
|
||||||
count=0
|
count=0
|
||||||
while true; do
|
while true; do
|
||||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||||
if [ -n "$POD_NAME" ]; then
|
if [ -n "$POD_NAME" ]; then
|
||||||
echo "Pod found: $POD_NAME"
|
echo "Pod found: $POD_NAME"
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
if [ "$count" -ge 60 ]; then
|
if [ "$count" -ge 60 ]; then
|
||||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
sleep 1
|
sleep 1
|
||||||
count=$((count+1))
|
count=$((count+1))
|
||||||
done
|
done
|
||||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||||
kubectl get pod -n arc-systems
|
kubectl get pod -n arc-systems
|
||||||
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||||
|
|
||||||
- name: Install gha-runner-scale-set
|
- name: Install gha-runner-scale-set
|
||||||
id: install_arc
|
id: install_arc
|
||||||
@@ -326,21 +333,21 @@ jobs:
|
|||||||
--debug
|
--debug
|
||||||
count=0
|
count=0
|
||||||
while true; do
|
while true; do
|
||||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||||
if [ -n "$POD_NAME" ]; then
|
if [ -n "$POD_NAME" ]; then
|
||||||
echo "Pod found: $POD_NAME"
|
echo "Pod found: $POD_NAME"
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
if [ "$count" -ge 60 ]; then
|
if [ "$count" -ge 60 ]; then
|
||||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
sleep 1
|
sleep 1
|
||||||
count=$((count+1))
|
count=$((count+1))
|
||||||
done
|
done
|
||||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||||
kubectl get pod -n arc-systems
|
kubectl get pod -n arc-systems
|
||||||
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||||
kubectl wait --timeout=30s --for=condition=ready pod -n openebs -l name=openebs-localpv-provisioner
|
kubectl wait --timeout=30s --for=condition=ready pod -n openebs -l name=openebs-localpv-provisioner
|
||||||
|
|
||||||
- name: Install gha-runner-scale-set
|
- name: Install gha-runner-scale-set
|
||||||
@@ -420,21 +427,21 @@ jobs:
|
|||||||
--debug
|
--debug
|
||||||
count=0
|
count=0
|
||||||
while true; do
|
while true; do
|
||||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||||
if [ -n "$POD_NAME" ]; then
|
if [ -n "$POD_NAME" ]; then
|
||||||
echo "Pod found: $POD_NAME"
|
echo "Pod found: $POD_NAME"
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
if [ "$count" -ge 60 ]; then
|
if [ "$count" -ge 60 ]; then
|
||||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
sleep 1
|
sleep 1
|
||||||
count=$((count+1))
|
count=$((count+1))
|
||||||
done
|
done
|
||||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||||
kubectl get pod -n arc-systems
|
kubectl get pod -n arc-systems
|
||||||
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||||
|
|
||||||
- name: Install gha-runner-scale-set
|
- name: Install gha-runner-scale-set
|
||||||
id: install_arc
|
id: install_arc
|
||||||
@@ -521,21 +528,21 @@ jobs:
|
|||||||
--debug
|
--debug
|
||||||
count=0
|
count=0
|
||||||
while true; do
|
while true; do
|
||||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||||
if [ -n "$POD_NAME" ]; then
|
if [ -n "$POD_NAME" ]; then
|
||||||
echo "Pod found: $POD_NAME"
|
echo "Pod found: $POD_NAME"
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
if [ "$count" -ge 60 ]; then
|
if [ "$count" -ge 60 ]; then
|
||||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
sleep 1
|
sleep 1
|
||||||
count=$((count+1))
|
count=$((count+1))
|
||||||
done
|
done
|
||||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||||
kubectl get pod -n arc-systems
|
kubectl get pod -n arc-systems
|
||||||
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||||
|
|
||||||
- name: Install gha-runner-scale-set
|
- name: Install gha-runner-scale-set
|
||||||
id: install_arc
|
id: install_arc
|
||||||
@@ -616,21 +623,21 @@ jobs:
|
|||||||
--debug
|
--debug
|
||||||
count=0
|
count=0
|
||||||
while true; do
|
while true; do
|
||||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||||
if [ -n "$POD_NAME" ]; then
|
if [ -n "$POD_NAME" ]; then
|
||||||
echo "Pod found: $POD_NAME"
|
echo "Pod found: $POD_NAME"
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
if [ "$count" -ge 60 ]; then
|
if [ "$count" -ge 60 ]; then
|
||||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
sleep 1
|
sleep 1
|
||||||
count=$((count+1))
|
count=$((count+1))
|
||||||
done
|
done
|
||||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||||
kubectl get pod -n arc-systems
|
kubectl get pod -n arc-systems
|
||||||
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||||
|
|
||||||
- name: Install gha-runner-scale-set
|
- name: Install gha-runner-scale-set
|
||||||
id: install_arc
|
id: install_arc
|
||||||
@@ -703,3 +710,173 @@ jobs:
|
|||||||
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||||
arc-namespace: "arc-runners"
|
arc-namespace: "arc-runners"
|
||||||
arc-controller-namespace: "arc-systems"
|
arc-controller-namespace: "arc-systems"
|
||||||
|
|
||||||
|
update-strategy-tests:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 20
|
||||||
|
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||||
|
env:
|
||||||
|
WORKFLOW_FILE: "arc-test-sleepy-matrix.yaml"
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{github.head_ref}}
|
||||||
|
|
||||||
|
- uses: ./.github/actions/setup-arc-e2e
|
||||||
|
id: setup
|
||||||
|
with:
|
||||||
|
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||||
|
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||||
|
image-name: ${{env.IMAGE_NAME}}
|
||||||
|
image-tag: ${{env.IMAGE_VERSION}}
|
||||||
|
target-org: ${{env.TARGET_ORG}}
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set-controller
|
||||||
|
id: install_arc_controller
|
||||||
|
run: |
|
||||||
|
helm install arc \
|
||||||
|
--namespace "arc-systems" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||||
|
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||||
|
--set flags.updateStrategy="eventual" \
|
||||||
|
./charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set
|
||||||
|
id: install_arc
|
||||||
|
run: |
|
||||||
|
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||||
|
helm install "$ARC_NAME" \
|
||||||
|
--namespace "arc-runners" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||||
|
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||||
|
./charts/gha-runner-scale-set \
|
||||||
|
--debug
|
||||||
|
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
|
||||||
|
- name: Trigger long running jobs and wait for runners to pick them up
|
||||||
|
uses: ./.github/actions/execute-assert-arc-e2e
|
||||||
|
timeout-minutes: 10
|
||||||
|
with:
|
||||||
|
auth-token: ${{ steps.setup.outputs.token }}
|
||||||
|
repo-owner: ${{ env.TARGET_ORG }}
|
||||||
|
repo-name: ${{env.TARGET_REPO}}
|
||||||
|
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||||
|
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||||
|
arc-namespace: "arc-runners"
|
||||||
|
arc-controller-namespace: "arc-systems"
|
||||||
|
wait-to-running: "true"
|
||||||
|
wait-to-finish: "false"
|
||||||
|
|
||||||
|
- name: Upgrade the gha-runner-scale-set
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
helm upgrade --install "${{ steps.install_arc.outputs.ARC_NAME }}" \
|
||||||
|
--namespace "arc-runners" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{ env.TARGET_REPO }}" \
|
||||||
|
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||||
|
--set template.spec.containers[0].name="runner" \
|
||||||
|
--set template.spec.containers[0].image="ghcr.io/actions/actions-runner:latest" \
|
||||||
|
--set template.spec.containers[0].command={"/home/runner/run.sh"} \
|
||||||
|
--set template.spec.containers[0].env[0].name="TEST" \
|
||||||
|
--set template.spec.containers[0].env[0].value="E2E TESTS" \
|
||||||
|
./charts/gha-runner-scale-set \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
- name: Assert that the listener is deleted while jobs are running
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name=${{ steps.install_arc.outputs.ARC_NAME }} -n arc-systems --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||||
|
RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n arc-runners --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||||
|
RESOURCES="$(kubectl get pods -A)"
|
||||||
|
|
||||||
|
if [ "$LISTENER_COUNT" -eq 0 ]; then
|
||||||
|
echo "Listener has been deleted"
|
||||||
|
echo "$RESOURCES"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for listener to be deleted"
|
||||||
|
echo "$RESOURCES"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Waiting for listener to be deleted"
|
||||||
|
echo "Listener count: $LISTENER_COUNT target: 0 | Runners count: $RUNNERS_COUNT target: 3"
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Assert that the listener goes back up after the jobs are done
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name=${{ steps.install_arc.outputs.ARC_NAME }} -n arc-systems --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||||
|
RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n arc-runners --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||||
|
RESOURCES="$(kubectl get pods -A)"
|
||||||
|
|
||||||
|
if [ "$LISTENER_COUNT" -eq 1 ]; then
|
||||||
|
echo "Listener is up!"
|
||||||
|
echo "$RESOURCES"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 120 ]; then
|
||||||
|
echo "Timeout waiting for listener to be recreated"
|
||||||
|
echo "$RESOURCES"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Waiting for listener to be recreated"
|
||||||
|
echo "Listener count: $LISTENER_COUNT target: 1 | Runners count: $RUNNERS_COUNT target: 0"
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Gather logs and cleanup
|
||||||
|
shell: bash
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
helm uninstall "${{ steps.install_arc.outputs.ARC_NAME }}" --namespace "arc-runners" --debug
|
||||||
|
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n "${{ steps.install_arc.outputs.ARC_NAME }}" -l app.kubernetes.io/instance="${{ steps.install_arc.outputs.ARC_NAME }}"
|
||||||
|
kubectl logs deployment/arc-gha-rs-controller -n "arc-systems"
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
name: Publish Runner Scale Set Controller Charts
|
name: (gha) Publish Helm Charts
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@@ -35,6 +35,10 @@ env:
|
|||||||
permissions:
|
permissions:
|
||||||
packages: write
|
packages: write
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-push-image:
|
build-push-image:
|
||||||
name: Build and push controller image
|
name: Build and push controller image
|
||||||
@@ -101,7 +105,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Job summary
|
- name: Job summary
|
||||||
run: |
|
run: |
|
||||||
echo "The [publish-runner-scale-set.yaml](https://github.com/actions/actions-runner-controller/blob/main/.github/workflows/publish-runner-scale-set.yaml) workflow run was completed successfully!" >> $GITHUB_STEP_SUMMARY
|
echo "The [gha-publish-chart.yaml](https://github.com/actions/actions-runner-controller/blob/main/.github/workflows/gha-publish-chart.yaml) workflow run was completed successfully!" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "" >> $GITHUB_STEP_SUMMARY
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
|
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
name: Validate Helm Chart (gha-runner-scale-set-controller and gha-runner-scale-set)
|
name: (gha) Validate Helm Charts
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
@@ -6,13 +6,13 @@ on:
|
|||||||
- master
|
- master
|
||||||
paths:
|
paths:
|
||||||
- 'charts/**'
|
- 'charts/**'
|
||||||
- '.github/workflows/validate-gha-chart.yaml'
|
- '.github/workflows/gha-validate-chart.yaml'
|
||||||
- '!charts/actions-runner-controller/**'
|
- '!charts/actions-runner-controller/**'
|
||||||
- '!**.md'
|
- '!**.md'
|
||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- 'charts/**'
|
- 'charts/**'
|
||||||
- '.github/workflows/validate-gha-chart.yaml'
|
- '.github/workflows/gha-validate-chart.yaml'
|
||||||
- '!charts/actions-runner-controller/**'
|
- '!charts/actions-runner-controller/**'
|
||||||
- '!**.md'
|
- '!**.md'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@@ -23,6 +23,13 @@ env:
|
|||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
|
# but not pushes to master branch by making the concurrency group name unique
|
||||||
|
# for pushes
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
validate-chart:
|
validate-chart:
|
||||||
name: Lint Chart
|
name: Lint Chart
|
||||||
@@ -61,23 +68,7 @@ jobs:
|
|||||||
python-version: '3.7'
|
python-version: '3.7'
|
||||||
|
|
||||||
- name: Set up chart-testing
|
- name: Set up chart-testing
|
||||||
uses: helm/chart-testing-action@v2.3.1
|
uses: helm/chart-testing-action@v2.4.0
|
||||||
|
|
||||||
- name: Set up latest version chart-testing
|
|
||||||
run: |
|
|
||||||
echo 'deb [trusted=yes] https://repo.goreleaser.com/apt/ /' | sudo tee /etc/apt/sources.list.d/goreleaser.list
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install goreleaser
|
|
||||||
git clone https://github.com/helm/chart-testing
|
|
||||||
cd chart-testing
|
|
||||||
unset CT_CONFIG_DIR
|
|
||||||
goreleaser build --clean --skip-validate
|
|
||||||
./dist/chart-testing_linux_amd64_v1/ct version
|
|
||||||
echo 'Adding ct directory to PATH...'
|
|
||||||
echo "$RUNNER_TEMP/chart-testing/dist/chart-testing_linux_amd64_v1" >> "$GITHUB_PATH"
|
|
||||||
echo 'Setting CT_CONFIG_DIR...'
|
|
||||||
echo "CT_CONFIG_DIR=$RUNNER_TEMP/chart-testing/etc" >> "$GITHUB_ENV"
|
|
||||||
working-directory: ${{ runner.temp }}
|
|
||||||
|
|
||||||
- name: Run chart-testing (list-changed)
|
- name: Run chart-testing (list-changed)
|
||||||
id: list-changed
|
id: list-changed
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
name: Publish Canary Image
|
name: Publish Canary Images
|
||||||
|
|
||||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||||
# for details on why we use this approach
|
# for details on why we use this approach
|
||||||
@@ -11,19 +11,19 @@ on:
|
|||||||
- '.github/actions/**'
|
- '.github/actions/**'
|
||||||
- '.github/ISSUE_TEMPLATE/**'
|
- '.github/ISSUE_TEMPLATE/**'
|
||||||
- '.github/workflows/e2e-test-dispatch-workflow.yaml'
|
- '.github/workflows/e2e-test-dispatch-workflow.yaml'
|
||||||
- '.github/workflows/e2e-test-linux-vm.yaml'
|
- '.github/workflows/gha-e2e-tests.yaml'
|
||||||
- '.github/workflows/publish-arc.yaml'
|
- '.github/workflows/arc-publish.yaml'
|
||||||
- '.github/workflows/publish-chart.yaml'
|
- '.github/workflows/arc-publish-chart.yaml'
|
||||||
- '.github/workflows/publish-runner-scale-set.yaml'
|
- '.github/workflows/gha-publish-chart.yaml'
|
||||||
- '.github/workflows/release-runners.yaml'
|
- '.github/workflows/arc-release-runners.yaml'
|
||||||
- '.github/workflows/run-codeql.yaml'
|
- '.github/workflows/global-run-codeql.yaml'
|
||||||
- '.github/workflows/run-first-interaction.yaml'
|
- '.github/workflows/global-run-first-interaction.yaml'
|
||||||
- '.github/workflows/run-stale.yaml'
|
- '.github/workflows/global-run-stale.yaml'
|
||||||
- '.github/workflows/update-runners.yaml'
|
- '.github/workflows/arc-update-runners-scheduled.yaml'
|
||||||
- '.github/workflows/validate-arc.yaml'
|
- '.github/workflows/validate-arc.yaml'
|
||||||
- '.github/workflows/validate-chart.yaml'
|
- '.github/workflows/arc-validate-chart.yaml'
|
||||||
- '.github/workflows/validate-gha-chart.yaml'
|
- '.github/workflows/gha-validate-chart.yaml'
|
||||||
- '.github/workflows/validate-runners.yaml'
|
- '.github/workflows/arc-validate-runners.yaml'
|
||||||
- '.github/dependabot.yml'
|
- '.github/dependabot.yml'
|
||||||
- '.github/RELEASE_NOTE_TEMPLATE.md'
|
- '.github/RELEASE_NOTE_TEMPLATE.md'
|
||||||
- 'runner/**'
|
- 'runner/**'
|
||||||
@@ -37,6 +37,10 @@ permissions:
|
|||||||
contents: read
|
contents: read
|
||||||
packages: write
|
packages: write
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
# Safeguard to prevent pushing images to registeries after build
|
# Safeguard to prevent pushing images to registeries after build
|
||||||
PUSH_TO_REGISTRIES: true
|
PUSH_TO_REGISTRIES: true
|
||||||
@@ -120,7 +124,7 @@ jobs:
|
|||||||
context: .
|
context: .
|
||||||
file: ./Dockerfile
|
file: ./Dockerfile
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
build-args: VERSION=canary-"${{ github.ref }}"
|
build-args: VERSION=canary-${{ steps.resolve_parameters.outputs.short_sha }}
|
||||||
push: ${{ env.PUSH_TO_REGISTRIES }}
|
push: ${{ env.PUSH_TO_REGISTRIES }}
|
||||||
tags: |
|
tags: |
|
||||||
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary
|
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary
|
||||||
@@ -10,6 +10,13 @@ on:
|
|||||||
schedule:
|
schedule:
|
||||||
- cron: '30 1 * * 0'
|
- cron: '30 1 * * 0'
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
|
# but not pushes to master branch by making the concurrency group name unique
|
||||||
|
# for pushes
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
analyze:
|
analyze:
|
||||||
name: Analyze
|
name: Analyze
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
name: first-interaction
|
name: First Interaction
|
||||||
|
|
||||||
on:
|
on:
|
||||||
issues:
|
issues:
|
||||||
16
.github/workflows/go.yaml
vendored
16
.github/workflows/go.yaml
vendored
@@ -8,7 +8,6 @@ on:
|
|||||||
- '**.go'
|
- '**.go'
|
||||||
- 'go.mod'
|
- 'go.mod'
|
||||||
- 'go.sum'
|
- 'go.sum'
|
||||||
|
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- '.github/workflows/go.yaml'
|
- '.github/workflows/go.yaml'
|
||||||
@@ -19,6 +18,13 @@ on:
|
|||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
|
# but not pushes to master branch by making the concurrency group name unique
|
||||||
|
# for pushes
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
fmt:
|
fmt:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -72,9 +78,11 @@ jobs:
|
|||||||
run: git diff --exit-code
|
run: git diff --exit-code
|
||||||
- name: Install kubebuilder
|
- name: Install kubebuilder
|
||||||
run: |
|
run: |
|
||||||
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz
|
curl -D headers.txt -fsL "https://storage.googleapis.com/kubebuilder-tools/kubebuilder-tools-1.26.1-linux-amd64.tar.gz" -o kubebuilder-tools
|
||||||
tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz
|
echo "$(grep -i etag headers.txt -m 1 | cut -d'"' -f2) kubebuilder-tools" > sum
|
||||||
sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder
|
md5sum -c sum
|
||||||
|
tar -zvxf kubebuilder-tools
|
||||||
|
sudo mv kubebuilder /usr/local/
|
||||||
- name: Run go tests
|
- name: Run go tests
|
||||||
run: |
|
run: |
|
||||||
go test -short `go list ./... | grep -v ./test_e2e_arc`
|
go test -short `go list ./... | grep -v ./test_e2e_arc`
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -35,5 +35,4 @@ bin
|
|||||||
.DS_STORE
|
.DS_STORE
|
||||||
|
|
||||||
/test-assets
|
/test-assets
|
||||||
|
|
||||||
/.tools
|
/.tools
|
||||||
|
|||||||
162
CONTRIBUTING.md
162
CONTRIBUTING.md
@@ -15,6 +15,13 @@
|
|||||||
- [Opening the Pull Request](#opening-the-pull-request)
|
- [Opening the Pull Request](#opening-the-pull-request)
|
||||||
- [Helm Version Changes](#helm-version-changes)
|
- [Helm Version Changes](#helm-version-changes)
|
||||||
- [Testing Controller Built from a Pull Request](#testing-controller-built-from-a-pull-request)
|
- [Testing Controller Built from a Pull Request](#testing-controller-built-from-a-pull-request)
|
||||||
|
- [Release process](#release-process)
|
||||||
|
- [Workflow structure](#workflow-structure)
|
||||||
|
- [Releasing legacy actions-runner-controller image and helm charts](#releasing-legacy-actions-runner-controller-image-and-helm-charts)
|
||||||
|
- [Release actions-runner-controller runner images](#release-actions-runner-controller-runner-images)
|
||||||
|
- [Release gha-runner-scale-set-controller image and helm charts](#release-gha-runner-scale-set-controller-image-and-helm-charts)
|
||||||
|
- [Release actions/runner image](#release-actionsrunner-image)
|
||||||
|
- [Canary releases](#canary-releases)
|
||||||
|
|
||||||
## Welcome
|
## Welcome
|
||||||
|
|
||||||
@@ -25,14 +32,13 @@ reviewed and merged.
|
|||||||
|
|
||||||
## Before contributing code
|
## Before contributing code
|
||||||
|
|
||||||
We welcome code patches, but to make sure things are well coordinated you should discuss any significant change before starting the work.
|
We welcome code patches, but to make sure things are well coordinated you should discuss any significant change before starting the work. The maintainers ask that you signal your intention to contribute to the project using the issue tracker. If there is an existing issue that you want to work on, please let us know so we can get it assigned to you. If you noticed a bug or want to add a new feature, there are issue templates you can fill out.
|
||||||
The maintainers ask that you signal your intention to contribute to the project using the issue tracker.
|
|
||||||
If there is an existing issue that you want to work on, please let us know so we can get it assigned to you.
|
|
||||||
If you noticed a bug or want to add a new feature, there are issue templates you can fill out.
|
|
||||||
|
|
||||||
When filing a feature request, the maintainers will review the change and give you a decision on whether we are willing to accept the feature into the project.
|
When filing a feature request, the maintainers will review the change and give you a decision on whether we are willing to accept the feature into the project.
|
||||||
|
|
||||||
For significantly large and/or complex features, we may request that you write up an architectural decision record ([ADR](https://github.blog/2020-08-13-why-write-adrs/)) detailing the change.
|
For significantly large and/or complex features, we may request that you write up an architectural decision record ([ADR](https://github.blog/2020-08-13-why-write-adrs/)) detailing the change.
|
||||||
Please use the [template](/adrs/0000-TEMPLATE.md) as guidance.
|
|
||||||
|
Please use the [template](/docs/adrs/yyyy-mm-dd-TEMPLATE) as guidance.
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
TODO: Add a pre-requisite section describing what developers should
|
TODO: Add a pre-requisite section describing what developers should
|
||||||
@@ -45,6 +51,7 @@ Depending on what you are patching depends on how you should go about it.
|
|||||||
Below are some guides on how to test patches locally as well as develop the controller and runners.
|
Below are some guides on how to test patches locally as well as develop the controller and runners.
|
||||||
|
|
||||||
When submitting a PR for a change please provide evidence that your change works as we still need to work on improving the CI of the project.
|
When submitting a PR for a change please provide evidence that your change works as we still need to work on improving the CI of the project.
|
||||||
|
|
||||||
Some resources are provided for helping achieve this, see this guide for details.
|
Some resources are provided for helping achieve this, see this guide for details.
|
||||||
|
|
||||||
### Developing the Controller
|
### Developing the Controller
|
||||||
@@ -130,7 +137,7 @@ GINKGO_FOCUS='[It] should create a new Runner resource from the specified templa
|
|||||||
>
|
>
|
||||||
> If you want to stick with `snap`-provided `docker`, do not forget to set `TMPDIR` to somewhere under `$HOME`.
|
> If you want to stick with `snap`-provided `docker`, do not forget to set `TMPDIR` to somewhere under `$HOME`.
|
||||||
> Otherwise `kind load docker-image` fail while running `docker save`.
|
> Otherwise `kind load docker-image` fail while running `docker save`.
|
||||||
> See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap for more information.
|
> See <https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap> for more information.
|
||||||
|
|
||||||
To test your local changes against both PAT and App based authentication please run the `acceptance` make target with the authentication configuration details provided:
|
To test your local changes against both PAT and App based authentication please run the `acceptance` make target with the authentication configuration details provided:
|
||||||
|
|
||||||
@@ -217,3 +224,146 @@ Please also note that you need to replace `$DOCKER_USER` with your own DockerHub
|
|||||||
Only the maintainers can release a new version of actions-runner-controller, publish a new version of the helm charts, and runner images.
|
Only the maintainers can release a new version of actions-runner-controller, publish a new version of the helm charts, and runner images.
|
||||||
|
|
||||||
All release workflows have been moved to [actions-runner-controller/releases](https://github.com/actions-runner-controller/releases) since the packages are owned by the former organization.
|
All release workflows have been moved to [actions-runner-controller/releases](https://github.com/actions-runner-controller/releases) since the packages are owned by the former organization.
|
||||||
|
|
||||||
|
### Workflow structure
|
||||||
|
|
||||||
|
Following the migration of actions-runner-controller into GitHub actions, all the workflows had to be modified to accommodate the move to a new organization. The following table describes the workflows, their purpose and dependencies.
|
||||||
|
|
||||||
|
| Filename | Workflow name | Purpose |
|
||||||
|
|-----------------------------------|--------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| gha-e2e-tests.yaml | (gha) E2E Tests | Tests the Autoscaling Runner Set mode end to end. Coverage is restricted to this mode. Legacy modes are not tested. |
|
||||||
|
| go.yaml | Format, Lint, Unit Tests | Formats, lints and runs unit tests for the entire codebase. |
|
||||||
|
| arc-publish.yaml | Publish ARC Image | Uploads release/actions-runner-controller.yaml as an artifact to the newly created release and triggers the [build and publication of the controller image](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-arc.yaml) |
|
||||||
|
| global-publish-canary.yaml | Publish Canary Images | Builds and publishes canary controller container images for both new and legacy modes. |
|
||||||
|
| arc-publish-chart.yaml | Publish ARC Helm Charts | Packages and publishes charts/actions-runner-controller (via GitHub Pages) |
|
||||||
|
| gha-publish-chart.yaml | (gha) Publish Helm Charts | Packages and publishes charts/gha-runner-scale-set-controller and charts/gha-runner-scale-set charts (OCI to GHCR) |
|
||||||
|
| arc-release-runners.yaml | Release ARC Runner Images | Triggers [release-runners.yaml](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/release-runners.yaml) which will build and push new runner images used with the legacy ARC modes. |
|
||||||
|
| global-run-codeql.yaml | Run CodeQL | Run CodeQL on all the codebase |
|
||||||
|
| global-run-first-interaction.yaml | First Interaction | Informs first time contributors what to expect when they open a new issue / PR |
|
||||||
|
| global-run-stale.yaml | Run Stale Bot | Closes issues / PRs without activity |
|
||||||
|
| arc-update-runners-scheduled.yaml | Runner Updates Check (Scheduled Job) | Polls [actions/runner](https://github.com/actions/runner) and [actions/runner-container-hooks](https://github.com/actions/runner-container-hooks) for new releases. If found, a PR is created to publish new runner images |
|
||||||
|
| arc-validate-chart.yaml | Validate Helm Chart | Run helm chart validators for charts/actions-runner-controller |
|
||||||
|
| gha-validate-chart.yaml | (gha) Validate Helm Charts | Run helm chart validators for charts/gha-runner-scale-set-controller and charts/gha-runner-scale-set charts |
|
||||||
|
| arc-validate-runners.yaml | Validate ARC Runners | Run validators for runners |
|
||||||
|
|
||||||
|
There are 7 components that we release regularly:
|
||||||
|
|
||||||
|
1. legacy [actions-runner-controller controller image](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller)
|
||||||
|
2. legacy [actions-runner-controller helm charts](https://actions-runner-controller.github.io/actions-runner-controller/)
|
||||||
|
3. legacy actions-runner-controller runner images
|
||||||
|
1. [ubuntu-20.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner)
|
||||||
|
2. [ubuntu-22.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner)
|
||||||
|
3. [dind-ubuntu-20.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner-dind)
|
||||||
|
4. [dind-ubuntu-22.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner-dind)
|
||||||
|
5. [dind-rootless-ubuntu-20.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner-dind-rootless)
|
||||||
|
6. [dind-rootless-ubuntu-22.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner-dind-rootless)
|
||||||
|
4. [gha-runner-scale-set-controller image](https://github.com/actions/actions-runner-controller/pkgs/container/gha-runner-scale-set-controller)
|
||||||
|
5. [gha-runner-scale-set-controller helm charts](https://github.com/actions/actions-runner-controller/pkgs/container/actions-runner-controller-charts%2Fgha-runner-scale-set-controller)
|
||||||
|
6. [gha-runner-scale-set runner helm charts](https://github.com/actions/actions-runner-controller/pkgs/container/actions-runner-controller-charts%2Fgha-runner-scale-set)
|
||||||
|
7. [actions/runner image](https://github.com/actions/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner)
|
||||||
|
|
||||||
|
#### Releasing legacy actions-runner-controller image and helm charts
|
||||||
|
|
||||||
|
1. Start by making sure the master branch is stable and all CI jobs are passing
|
||||||
|
2. Create a new release in <https://github.com/actions/actions-runner-controller/releases> (Draft a new release)
|
||||||
|
3. Bump up the `version` and `appVersion` in charts/actions-runner-controller/Chart.yaml - make sure the `version` matches the release version you just created. (Example: <https://github.com/actions/actions-runner-controller/pull/2577>)
|
||||||
|
4. When the workflows finish execution, you will see:
|
||||||
|
1. A new controller image published to: <https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller>
|
||||||
|
2. Helm charts published to: <https://github.com/actions-runner-controller/actions-runner-controller.github.io/tree/master/actions-runner-controller> (the index.yaml file is updated)
|
||||||
|
|
||||||
|
When a new release is created, the [Publish ARC Image](https://github.com/actions/actions-runner-controller/blob/master/.github/workflows/arc-publish.yaml) workflow is triggered.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
subgraph repository: actions/actions-runner-controller
|
||||||
|
event_a{{"release: published"}} -- triggers --> workflow_a["arc-publish.yaml"]
|
||||||
|
event_b{{"workflow_dispatch"}} -- triggers --> workflow_a["arc-publish.yaml"]
|
||||||
|
workflow_a["arc-publish.yaml"] -- uploads --> package["actions-runner-controller.tar.gz"]
|
||||||
|
end
|
||||||
|
subgraph repository: actions-runner-controller/releases
|
||||||
|
workflow_a["arc-publish.yaml"] -- triggers --> event_d{{"repository_dispatch"}} --> workflow_b["publish-arc.yaml"]
|
||||||
|
workflow_b["publish-arc.yaml"] -- push --> A["GHCR: \nactions-runner-controller/actions-runner-controller:*"]
|
||||||
|
workflow_b["publish-arc.yaml"] -- push --> B["DockerHub: \nsummerwind/actions-runner-controller:*"]
|
||||||
|
end
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Release actions-runner-controller runner images
|
||||||
|
|
||||||
|
**Manual steps:**
|
||||||
|
|
||||||
|
1. Navigate to the [actions-runner-controller/releases](https://github.com/actions-runner-controller/releases) repository
|
||||||
|
2. Trigger [the release-runners.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/release-runners.yaml) workflow.
|
||||||
|
1. The list of input prameters for this workflow is defined in the table below (always inspect the workflow file for the latest version)
|
||||||
|
|
||||||
|
<!-- Table of Paramters -->
|
||||||
|
| Parameter | Description | Default |
|
||||||
|
|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
|
||||||
|
| `runner_version` | The version of the [actions/runner](https://github.com/actions/runner) to use | `2.300.2` |
|
||||||
|
| `docker_version` | The version of docker to use | `20.10.12` |
|
||||||
|
| `runner_container_hooks_version` | The version of [actions/runner-container-hooks](https://github.com/actions/runner-container-hooks) to use | `0.2.0` |
|
||||||
|
| `sha` | The commit sha from [actions/actions-runner-controller](https://github.com/actions/actions-runner-controller) to be used to build the runner images. This will be provided to `actions/checkout` & used to tag the container images | Empty string. |
|
||||||
|
| `push_to_registries` | Whether to push the images to the registries. Use false to test the build | false |
|
||||||
|
|
||||||
|
**Automated steps:**
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
workflow["release-runners.yaml"] -- workflow_dispatch* --> workflow_b["release-runners.yaml"]
|
||||||
|
subgraph repository: actions/actions-runner-controller
|
||||||
|
runner_updates_check["arc-update-runners-scheduled.yaml"] -- "polls (daily)" --> runner_releases["actions/runner/releases"]
|
||||||
|
runner_updates_check -- creates --> runner_update_pr["PR: update /runner/VERSION"]****
|
||||||
|
runner_update_pr --> runner_update_pr_merge{{"merge"}}
|
||||||
|
runner_update_pr_merge -- triggers --> workflow["release-runners.yaml"]
|
||||||
|
end
|
||||||
|
subgraph repository: actions-runner-controller/releases
|
||||||
|
workflow_b["release-runners.yaml"] -- push --> A["GHCR: \n actions-runner-controller/actions-runner:* \n actions-runner-controller/actions-runner-dind:* \n actions-runner-controller/actions-runner-dind-rootless:*"]
|
||||||
|
workflow_b["release-runners.yaml"] -- push --> B["DockerHub: \n summerwind/actions-runner:* \n summerwind/actions-runner-dind:* \n summerwind/actions-runner-dind-rootless:*"]
|
||||||
|
event_b{{"workflow_dispatch"}} -- triggers --> workflow_b["release-runners.yaml"]
|
||||||
|
end
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Release gha-runner-scale-set-controller image and helm charts
|
||||||
|
|
||||||
|
1. Make sure the master branch is stable and all CI jobs are passing
|
||||||
|
1. Prepare a release PR (example: <https://github.com/actions/actions-runner-controller/pull/2467>)
|
||||||
|
1. Bump up the version of the chart in: charts/gha-runner-scale-set-controller/Chart.yaml
|
||||||
|
2. Bump up the version of the chart in: charts/gha-runner-scale-set/Chart.yaml
|
||||||
|
1. Make sure that `version`, `appVersion` of both charts are always the same. These versions cannot diverge.
|
||||||
|
3. Update the quickstart guide to reflect the latest versions: docs/preview/gha-runner-scale-set-controller/README.md
|
||||||
|
4. Add changelog to the PR as well as the quickstart guide
|
||||||
|
1. Merge the release PR
|
||||||
|
1. Manually trigger the [(gha) Publish Helm Charts](https://github.com/actions/actions-runner-controller/actions/workflows/gha-publish-chart.yaml) workflow
|
||||||
|
1. Manually create a tag and release in [actions/actions-runner-controller](https://github.com/actions/actions-runner-controller/releases) with the format: `gha-runner-scale-set-x.x.x` where the version (x.x.x) matches that of the Helm chart
|
||||||
|
|
||||||
|
| Parameter | Description | Default |
|
||||||
|
|-------------------------------------------------|--------------------------------------------------------------------------------------------------------|----------------|
|
||||||
|
| `ref` | The branch, tag or SHA to cut a release from. | default branch |
|
||||||
|
| `release_tag_name` | The tag of the controller image. This is not a git tag. | canary |
|
||||||
|
| `push_to_registries` | Push images to registries. Use false to test the build process. | false |
|
||||||
|
| `publish_gha_runner_scale_set_controller_chart` | Publish new helm chart for gha-runner-scale-set-controller. This will push the new OCI archive to GHCR | false |
|
||||||
|
| `publish_gha_runner_scale_set_chart` | Publish new helm chart for gha-runner-scale-set. This will push the new OCI archive to GHCR | false |
|
||||||
|
|
||||||
|
#### Release actions/runner image
|
||||||
|
|
||||||
|
A new runner image is built and published to <https://github.com/actions/runner/pkgs/container/actions-runner> whenever a new runner binary has been released. There's nothing to do here.
|
||||||
|
|
||||||
|
#### Canary releases
|
||||||
|
|
||||||
|
We publish canary images for both the legacy actions-runner-controller and gha-runner-scale-set-controller images.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
subgraph org: actions
|
||||||
|
event_a{{"push: [master]"}} -- triggers --> workflow_a["publish-canary.yaml"]
|
||||||
|
end
|
||||||
|
subgraph org: actions-runner-controller
|
||||||
|
workflow_a["publish-canary.yaml"] -- triggers --> event_d{{"repository_dispatch"}} --> workflow_b["publish-canary.yaml"]
|
||||||
|
workflow_b["publish-canary.yaml"] -- push --> A["GHCR: \nactions-runner-controller/actions-runner-controller:canary"]
|
||||||
|
workflow_b["publish-canary.yaml"] -- push --> B["DockerHub: \nsummerwind/actions-runner-controller:canary"]
|
||||||
|
end
|
||||||
|
```
|
||||||
|
|
||||||
|
1. [actions-runner-controller canary image](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller)
|
||||||
|
2. [gha-runner-scale-set-controller image](https://github.com/actions/actions-runner-controller/pkgs/container/gha-runner-scale-set-controller)
|
||||||
|
|
||||||
|
These canary images are automatically built and released on each push to the master branch.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Build the manager binary
|
# Build the manager binary
|
||||||
FROM --platform=$BUILDPLATFORM golang:1.19.4 as builder
|
FROM --platform=$BUILDPLATFORM golang:1.20.7 as builder
|
||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
|
|||||||
5
Makefile
5
Makefile
@@ -5,7 +5,7 @@ else
|
|||||||
endif
|
endif
|
||||||
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
||||||
VERSION ?= dev
|
VERSION ?= dev
|
||||||
RUNNER_VERSION ?= 2.304.0
|
RUNNER_VERSION ?= 2.308.0
|
||||||
TARGETPLATFORM ?= $(shell arch)
|
TARGETPLATFORM ?= $(shell arch)
|
||||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||||
RUNNER_TAG ?= ${VERSION}
|
RUNNER_TAG ?= ${VERSION}
|
||||||
@@ -95,7 +95,8 @@ run: generate fmt vet manifests
|
|||||||
run-scaleset: generate fmt vet
|
run-scaleset: generate fmt vet
|
||||||
CONTROLLER_MANAGER_POD_NAMESPACE=default \
|
CONTROLLER_MANAGER_POD_NAMESPACE=default \
|
||||||
CONTROLLER_MANAGER_CONTAINER_IMAGE="${DOCKER_IMAGE_NAME}:${VERSION}" \
|
CONTROLLER_MANAGER_CONTAINER_IMAGE="${DOCKER_IMAGE_NAME}:${VERSION}" \
|
||||||
go run ./main.go --auto-scaling-runner-set-only
|
go run -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=$(VERSION)'" \
|
||||||
|
./main.go --auto-scaling-runner-set-only
|
||||||
|
|
||||||
# Install CRDs into a cluster
|
# Install CRDs into a cluster
|
||||||
install: manifests
|
install: manifests
|
||||||
|
|||||||
41
README.md
41
README.md
@@ -4,39 +4,40 @@
|
|||||||
[](https://github.com/jonico/awesome-runners)
|
[](https://github.com/jonico/awesome-runners)
|
||||||
[](https://artifacthub.io/packages/search?repo=actions-runner-controller)
|
[](https://artifacthub.io/packages/search?repo=actions-runner-controller)
|
||||||
|
|
||||||
|
## About
|
||||||
|
|
||||||
|
Actions Runner Controller (ARC) is a Kubernetes operator that orchestrates and scales self-hosted runners for GitHub Actions.
|
||||||
|
|
||||||
|
With ARC, you can create runner scale sets that automatically scale based on the number of workflows running in your repository, organization, or enterprise. Because controlled runners can be ephemeral and based on containers, new runner instances can scale up or down rapidly and cleanly. For more information about autoscaling, see ["Autoscaling with self-hosted runners."](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/autoscaling-with-self-hosted-runners)
|
||||||
|
|
||||||
|
You can set up ARC on Kubernetes using Helm, then create and run a workflow that uses runner scale sets. For more information about runner scale sets, see ["Deploying runner scale sets with Actions Runner Controller."](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller#runner-scale-set)
|
||||||
## People
|
## People
|
||||||
|
|
||||||
`actions-runner-controller` is an open-source project currently developed and maintained in collaboration with the GitHub Actions team, external maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions).
|
Actions Runner Controller (ARC) is an open-source project currently developed and maintained in collaboration with the GitHub Actions team, external maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions).
|
||||||
|
|
||||||
If you think the project is awesome and is adding value to your business, please consider directly sponsoring [community maintainers](https://github.com/sponsors/actions-runner-controller) and individual contributors via GitHub Sponsors.
|
If you think the project is awesome and is adding value to your business, please consider directly sponsoring [community maintainers](https://github.com/sponsors/actions-runner-controller) and individual contributors via GitHub Sponsors.
|
||||||
|
|
||||||
In case you are already the employer of one of contributors, sponsoring via GitHub Sponsors might not be an option. Just support them in other means!
|
In case you are already the employer of one of contributors, sponsoring via GitHub Sponsors might not be an option. Just support them in other means!
|
||||||
|
|
||||||
|
|
||||||
See [the sponsorship dashboard](https://github.com/sponsors/actions-runner-controller) for the former and the current sponsors.
|
See [the sponsorship dashboard](https://github.com/sponsors/actions-runner-controller) for the former and the current sponsors.
|
||||||
|
|
||||||
## Status
|
|
||||||
|
|
||||||
Even though actions-runner-controller is used in production environments, it is still in its early stage of development, hence versioned 0.x.
|
|
||||||
|
|
||||||
actions-runner-controller complies to Semantic Versioning 2.0.0 in which v0.x means that there could be backward-incompatible changes for every release.
|
|
||||||
|
|
||||||
The documentation is kept inline with master@HEAD, we do our best to highlight any features that require a specific ARC version or higher however this is not always easily done due to there being many moving parts. Additionally, we actively do not retain compatibly with every GitHub Enterprise Server version nor every Kubernetes version so you will need to ensure you stay current within a reasonable timespan.
|
|
||||||
|
|
||||||
## About
|
|
||||||
|
|
||||||
[GitHub Actions](https://github.com/features/actions) is a very useful tool for automating development. GitHub Actions jobs are run in the cloud by default, but you may want to run your jobs in your environment. [Self-hosted runner](https://github.com/actions/runner) can be used for such use cases, but requires the provisioning and configuration of a virtual machine instance. Instead if you already have a Kubernetes cluster, it makes more sense to run the self-hosted runner on top of it.
|
|
||||||
|
|
||||||
**actions-runner-controller** makes that possible. Just create a *Runner* resource on your Kubernetes, and it will run and operate the self-hosted runner for the specified repository. Combined with Kubernetes RBAC, you can also build simple Self-hosted runners as a Service.
|
|
||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
To give ARC a try with just a handful of commands, Please refer to the [Quickstart guide](/docs/quickstart.md).
|
|
||||||
|
|
||||||
For an overview of ARC, please refer to [About ARC](https://github.com/actions/actions-runner-controller/blob/master/docs/about-arc.md)
|
To give ARC a try with just a handful of commands, Please refer to the [Quickstart guide](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller).
|
||||||
|
|
||||||
For more information, please refer to detailed documentation below!
|
For an overview of ARC, please refer to [About ARC](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/about-actions-runner-controller)
|
||||||
|
|
||||||
## Documentation
|
With the introduction of [autoscaling runner scale sets](https://github.com/actions/actions-runner-controller/discussions/2775), the existing [autoscaling modes](./docs/automatically-scaling-runners.md) are now legacy. The legacy modes have certain use cases and will continue to be maintained by the community only.
|
||||||
|
|
||||||
|
For further information on what is supported by GitHub and what's managed by the community, please refer to [this announcement discussion.](https://github.com/actions/actions-runner-controller/discussions/2775)
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
|
||||||
|
ARC documentation is available on [docs.github.com](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller).
|
||||||
|
|
||||||
|
### Legacy documentation
|
||||||
|
|
||||||
|
The following documentation is for the legacy autoscaling modes that continue to be maintained by the community
|
||||||
|
|
||||||
- [Quickstart guide](/docs/quickstart.md)
|
- [Quickstart guide](/docs/quickstart.md)
|
||||||
- [About ARC](/docs/about-arc.md)
|
- [About ARC](/docs/about-arc.md)
|
||||||
|
|||||||
@@ -304,3 +304,27 @@ If you noticed that it takes several minutes for sidecar dind container to be cr
|
|||||||
**Solution**
|
**Solution**
|
||||||
|
|
||||||
The solution is to switch to using faster storage, if you are experiencing this issue you are probably using HDD storage. Switching to SSD storage fixed the problem in my case. Most cloud providers have a list of storage options to use just pick something faster that your current disk, for on prem clusters you will need to invest in some SSDs.
|
The solution is to switch to using faster storage, if you are experiencing this issue you are probably using HDD storage. Switching to SSD storage fixed the problem in my case. Most cloud providers have a list of storage options to use just pick something faster that your current disk, for on prem clusters you will need to invest in some SSDs.
|
||||||
|
|
||||||
|
### Dockerd no space left on device
|
||||||
|
|
||||||
|
**Problem**
|
||||||
|
|
||||||
|
If you are running many containers on your runner you might encounter an issue where docker daemon is unable to start new containers and you see error `no space left on device`.
|
||||||
|
|
||||||
|
**Solution**
|
||||||
|
|
||||||
|
Add a `dockerVarRunVolumeSizeLimit` key in your runner's spec with a higher size limit (the default is 1M) For instance:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: RunnerDeployment
|
||||||
|
metadata:
|
||||||
|
name: github-runner
|
||||||
|
namespace: github-system
|
||||||
|
spec:
|
||||||
|
replicas: 6
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
dockerVarRunVolumeSizeLimit: 50M
|
||||||
|
env: []
|
||||||
|
```
|
||||||
@@ -22,7 +22,7 @@ import (
|
|||||||
|
|
||||||
// HorizontalRunnerAutoscalerSpec defines the desired state of HorizontalRunnerAutoscaler
|
// HorizontalRunnerAutoscalerSpec defines the desired state of HorizontalRunnerAutoscaler
|
||||||
type HorizontalRunnerAutoscalerSpec struct {
|
type HorizontalRunnerAutoscalerSpec struct {
|
||||||
// ScaleTargetRef sis the reference to scaled resource like RunnerDeployment
|
// ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
||||||
ScaleTargetRef ScaleTargetRef `json:"scaleTargetRef,omitempty"`
|
ScaleTargetRef ScaleTargetRef `json:"scaleTargetRef,omitempty"`
|
||||||
|
|
||||||
// MinReplicas is the minimum number of replicas the deployment is allowed to scale
|
// MinReplicas is the minimum number of replicas the deployment is allowed to scale
|
||||||
|
|||||||
@@ -70,6 +70,8 @@ type RunnerConfig struct {
|
|||||||
// +optional
|
// +optional
|
||||||
DockerRegistryMirror *string `json:"dockerRegistryMirror,omitempty"`
|
DockerRegistryMirror *string `json:"dockerRegistryMirror,omitempty"`
|
||||||
// +optional
|
// +optional
|
||||||
|
DockerVarRunVolumeSizeLimit *resource.Quantity `json:"dockerVarRunVolumeSizeLimit,omitempty"`
|
||||||
|
// +optional
|
||||||
VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"`
|
VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"`
|
||||||
// +optional
|
// +optional
|
||||||
VolumeStorageMedium *string `json:"volumeStorageMedium,omitempty"`
|
VolumeStorageMedium *string `json:"volumeStorageMedium,omitempty"`
|
||||||
|
|||||||
@@ -436,6 +436,11 @@ func (in *RunnerConfig) DeepCopyInto(out *RunnerConfig) {
|
|||||||
*out = new(string)
|
*out = new(string)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.DockerVarRunVolumeSizeLimit != nil {
|
||||||
|
in, out := &in.DockerVarRunVolumeSizeLimit, &out.DockerVarRunVolumeSizeLimit
|
||||||
|
x := (*in).DeepCopy()
|
||||||
|
*out = &x
|
||||||
|
}
|
||||||
if in.VolumeSizeLimit != nil {
|
if in.VolumeSizeLimit != nil {
|
||||||
in, out := &in.VolumeSizeLimit, &out.VolumeSizeLimit
|
in, out := &in.VolumeSizeLimit, &out.VolumeSizeLimit
|
||||||
x := (*in).DeepCopy()
|
x := (*in).DeepCopy()
|
||||||
|
|||||||
@@ -15,10 +15,10 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.23.3
|
version: 0.23.5
|
||||||
|
|
||||||
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||||
appVersion: 0.27.4
|
appVersion: 0.27.5
|
||||||
|
|
||||||
home: https://github.com/actions/actions-runner-controller
|
home: https://github.com/actions/actions-runner-controller
|
||||||
|
|
||||||
|
|||||||
@@ -35,13 +35,16 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
|||||||
| `authSecret.github_basicauth_password` | Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
| `authSecret.github_basicauth_password` | Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
||||||
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. | |
|
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. | |
|
||||||
| `hostNetwork` | The "hostNetwork" of the controller container | false |
|
| `hostNetwork` | The "hostNetwork" of the controller container | false |
|
||||||
|
| `dnsPolicy` | The "dnsPolicy" of the controller container | ClusterFirst |
|
||||||
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
|
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
|
||||||
| `image.tag` | The tag of the controller container | |
|
| `image.tag` | The tag of the controller container | |
|
||||||
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
|
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
|
||||||
| `image.actionsRunnerImagePullSecrets` | Optional image pull secrets to be included in the runner pod's ImagePullSecrets | |
|
| `image.actionsRunnerImagePullSecrets` | Optional image pull secrets to be included in the runner pod's ImagePullSecrets | |
|
||||||
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
|
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
|
||||||
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
|
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
|
||||||
| `metrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
| `metrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||||
|
| `metrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
|
||||||
|
| `metrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
|
||||||
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
|
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
|
||||||
| `metrics.port` | Set port of metrics service | 8443 |
|
| `metrics.port` | Set port of metrics service | 8443 |
|
||||||
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||||
@@ -148,7 +151,9 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
|||||||
| `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
| `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||||
| `actionsMetricsServer.ingress.tls` | Set tls configuration for ingress | |
|
| `actionsMetricsServer.ingress.tls` | Set tls configuration for ingress | |
|
||||||
| `actionsMetricsServer.ingress.ingressClassName` | Set ingress class name | |
|
| `actionsMetricsServer.ingress.ingressClassName` | Set ingress class name | |
|
||||||
| `actionsMetrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
| `actionsMetrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||||
|
| `actionsMetrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
|
||||||
|
| `actionsMetrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
|
||||||
| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | |
|
| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | |
|
||||||
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
|
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
|
||||||
| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||||
|
|||||||
@@ -113,7 +113,7 @@ spec:
|
|||||||
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop)
|
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop)
|
||||||
type: integer
|
type: integer
|
||||||
scaleTargetRef:
|
scaleTargetRef:
|
||||||
description: ScaleTargetRef sis the reference to scaled resource like RunnerDeployment
|
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
||||||
properties:
|
properties:
|
||||||
kind:
|
kind:
|
||||||
description: Kind is the type of resource being referenced
|
description: Kind is the type of resource being referenced
|
||||||
|
|||||||
@@ -1497,6 +1497,12 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
dockerRegistryMirror:
|
dockerRegistryMirror:
|
||||||
type: string
|
type: string
|
||||||
|
dockerVarRunVolumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
dockerVolumeMounts:
|
dockerVolumeMounts:
|
||||||
items:
|
items:
|
||||||
description: VolumeMount describes a mounting of a Volume within a container.
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
|||||||
@@ -1479,6 +1479,12 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
dockerRegistryMirror:
|
dockerRegistryMirror:
|
||||||
type: string
|
type: string
|
||||||
|
dockerVarRunVolumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
dockerVolumeMounts:
|
dockerVolumeMounts:
|
||||||
items:
|
items:
|
||||||
description: VolumeMount describes a mounting of a Volume within a container.
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
|||||||
@@ -1432,6 +1432,12 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
dockerRegistryMirror:
|
dockerRegistryMirror:
|
||||||
type: string
|
type: string
|
||||||
|
dockerVarRunVolumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
dockerVolumeMounts:
|
dockerVolumeMounts:
|
||||||
items:
|
items:
|
||||||
description: VolumeMount describes a mounting of a Volume within a container.
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
|||||||
@@ -55,6 +55,12 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
dockerRegistryMirror:
|
dockerRegistryMirror:
|
||||||
type: string
|
type: string
|
||||||
|
dockerVarRunVolumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
dockerdWithinRunnerContainer:
|
dockerdWithinRunnerContainer:
|
||||||
type: boolean
|
type: boolean
|
||||||
effectiveTime:
|
effectiveTime:
|
||||||
|
|||||||
@@ -36,8 +36,8 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
containers:
|
containers:
|
||||||
- args:
|
- args:
|
||||||
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
{{- $metricsHost := .Values.actionsMetrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||||
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
{{- $metricsPort := .Values.actionsMetrics.proxy.enabled | ternary "8080" .Values.actionsMetrics.port }}
|
||||||
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
||||||
{{- if .Values.actionsMetricsServer.logLevel }}
|
{{- if .Values.actionsMetricsServer.logLevel }}
|
||||||
- "--log-level={{ .Values.actionsMetricsServer.logLevel }}"
|
- "--log-level={{ .Values.actionsMetricsServer.logLevel }}"
|
||||||
@@ -111,10 +111,14 @@ spec:
|
|||||||
name: {{ include "actions-runner-controller.secretName" . }}
|
name: {{ include "actions-runner-controller.secretName" . }}
|
||||||
optional: true
|
optional: true
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if kindIs "slice" .Values.actionsMetricsServer.env }}
|
||||||
|
{{- toYaml .Values.actionsMetricsServer.env | nindent 8 }}
|
||||||
|
{{- else }}
|
||||||
{{- range $key, $val := .Values.actionsMetricsServer.env }}
|
{{- range $key, $val := .Values.actionsMetricsServer.env }}
|
||||||
- name: {{ $key }}
|
- name: {{ $key }}
|
||||||
value: {{ $val | quote }}
|
value: {{ $val | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||||
name: actions-metrics-server
|
name: actions-metrics-server
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
@@ -122,8 +126,8 @@ spec:
|
|||||||
- containerPort: 8000
|
- containerPort: 8000
|
||||||
name: http
|
name: http
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
{{- if not .Values.metrics.proxy.enabled }}
|
{{- if not .Values.actionsMetrics.proxy.enabled }}
|
||||||
- containerPort: {{ .Values.metrics.port }}
|
- containerPort: {{ .Values.actionsMetrics.port }}
|
||||||
name: metrics-port
|
name: metrics-port
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -131,17 +135,17 @@ spec:
|
|||||||
{{- toYaml .Values.actionsMetricsServer.resources | nindent 12 }}
|
{{- toYaml .Values.actionsMetricsServer.resources | nindent 12 }}
|
||||||
securityContext:
|
securityContext:
|
||||||
{{- toYaml .Values.actionsMetricsServer.securityContext | nindent 12 }}
|
{{- toYaml .Values.actionsMetricsServer.securityContext | nindent 12 }}
|
||||||
{{- if .Values.metrics.proxy.enabled }}
|
{{- if .Values.actionsMetrics.proxy.enabled }}
|
||||||
- args:
|
- args:
|
||||||
- "--secure-listen-address=0.0.0.0:{{ .Values.metrics.port }}"
|
- "--secure-listen-address=0.0.0.0:{{ .Values.actionsMetrics.port }}"
|
||||||
- "--upstream=http://127.0.0.1:8080/"
|
- "--upstream=http://127.0.0.1:8080/"
|
||||||
- "--logtostderr=true"
|
- "--logtostderr=true"
|
||||||
- "--v=10"
|
- "--v=10"
|
||||||
image: "{{ .Values.metrics.proxy.image.repository }}:{{ .Values.metrics.proxy.image.tag }}"
|
image: "{{ .Values.actionsMetrics.proxy.image.repository }}:{{ .Values.actionsMetrics.proxy.image.tag }}"
|
||||||
name: kube-rbac-proxy
|
name: kube-rbac-proxy
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
ports:
|
ports:
|
||||||
- containerPort: {{ .Values.metrics.port }}
|
- containerPort: {{ .Values.actionsMetrics.port }}
|
||||||
name: metrics-port
|
name: metrics-port
|
||||||
resources:
|
resources:
|
||||||
{{- toYaml .Values.resources | nindent 12 }}
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
|||||||
@@ -16,9 +16,9 @@ spec:
|
|||||||
{{ range $_, $port := .Values.actionsMetricsServer.service.ports -}}
|
{{ range $_, $port := .Values.actionsMetricsServer.service.ports -}}
|
||||||
- {{ $port | toYaml | nindent 6 }}
|
- {{ $port | toYaml | nindent 6 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.metrics.serviceMonitor }}
|
{{- if .Values.actionsMetrics.serviceMonitor.enable }}
|
||||||
- name: metrics-port
|
- name: metrics-port
|
||||||
port: {{ .Values.metrics.port }}
|
port: {{ .Values.actionsMetrics.port }}
|
||||||
targetPort: metrics-port
|
targetPort: metrics-port
|
||||||
{{- end }}
|
{{- end }}
|
||||||
selector:
|
selector:
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
{{- if and .Values.actionsMetricsServer.enabled .Values.actionsMetrics.serviceMonitor }}
|
{{- if and .Values.actionsMetricsServer.enabled .Values.actionsMetrics.serviceMonitor.enable }}
|
||||||
apiVersion: monitoring.coreos.com/v1
|
apiVersion: monitoring.coreos.com/v1
|
||||||
kind: ServiceMonitor
|
kind: ServiceMonitor
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
{{- with .Values.actionsMetricsServer.serviceMonitorLabels }}
|
{{- with .Values.actionsMetrics.serviceMonitorLabels }}
|
||||||
{{- toYaml . | nindent 4 }}
|
{{- toYaml . | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
name: {{ include "actions-runner-controller-actions-metrics-server.serviceMonitorName" . }}
|
name: {{ include "actions-runner-controller-actions-metrics-server.serviceMonitorName" . }}
|
||||||
@@ -19,6 +19,8 @@ spec:
|
|||||||
tlsConfig:
|
tlsConfig:
|
||||||
insecureSkipVerify: true
|
insecureSkipVerify: true
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
interval: {{ .Values.actionsMetrics.serviceMonitor.interval }}
|
||||||
|
scrapeTimeout: {{ .Values.actionsMetrics.serviceMonitor.timeout }}
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 6 }}
|
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 6 }}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
{{- if .Values.metrics.serviceMonitor }}
|
{{- if .Values.metrics.serviceMonitor.enable }}
|
||||||
apiVersion: monitoring.coreos.com/v1
|
apiVersion: monitoring.coreos.com/v1
|
||||||
kind: ServiceMonitor
|
kind: ServiceMonitor
|
||||||
metadata:
|
metadata:
|
||||||
@@ -19,6 +19,8 @@ spec:
|
|||||||
tlsConfig:
|
tlsConfig:
|
||||||
insecureSkipVerify: true
|
insecureSkipVerify: true
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
interval: {{ .Values.metrics.serviceMonitor.interval }}
|
||||||
|
scrapeTimeout: {{ .Values.metrics.serviceMonitor.timeout }}
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
{{- include "actions-runner-controller.selectorLabels" . | nindent 6 }}
|
{{- include "actions-runner-controller.selectorLabels" . | nindent 6 }}
|
||||||
|
|||||||
@@ -70,6 +70,9 @@ spec:
|
|||||||
{{- if .Values.logFormat }}
|
{{- if .Values.logFormat }}
|
||||||
- "--log-format={{ .Values.logFormat }}"
|
- "--log-format={{ .Values.logFormat }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if .Values.dockerGID }}
|
||||||
|
- "--docker-gid={{ .Values.dockerGID }}"
|
||||||
|
{{- end }}
|
||||||
command:
|
command:
|
||||||
- "/manager"
|
- "/manager"
|
||||||
env:
|
env:
|
||||||
@@ -211,3 +214,6 @@ spec:
|
|||||||
{{- if .Values.hostNetwork }}
|
{{- if .Values.hostNetwork }}
|
||||||
hostNetwork: {{ .Values.hostNetwork }}
|
hostNetwork: {{ .Values.hostNetwork }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if .Values.dnsPolicy }}
|
||||||
|
dnsPolicy: {{ .Values.dnsPolicy }}
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ metadata:
|
|||||||
name: {{ include "actions-runner-controller-github-webhook-server.fullname" . }}
|
name: {{ include "actions-runner-controller-github-webhook-server.fullname" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 4 }}
|
||||||
{{- if .Values.githubWebhookServer.service.annotations }}
|
{{- if .Values.githubWebhookServer.service.annotations }}
|
||||||
annotations:
|
annotations:
|
||||||
{{ toYaml .Values.githubWebhookServer.service.annotations | nindent 4 }}
|
{{ toYaml .Values.githubWebhookServer.service.annotations | nindent 4 }}
|
||||||
@@ -16,7 +16,7 @@ spec:
|
|||||||
{{ range $_, $port := .Values.githubWebhookServer.service.ports -}}
|
{{ range $_, $port := .Values.githubWebhookServer.service.ports -}}
|
||||||
- {{ $port | toYaml | nindent 6 }}
|
- {{ $port | toYaml | nindent 6 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.metrics.serviceMonitor }}
|
{{- if .Values.metrics.serviceMonitor.enable }}
|
||||||
- name: metrics-port
|
- name: metrics-port
|
||||||
port: {{ .Values.metrics.port }}
|
port: {{ .Values.metrics.port }}
|
||||||
targetPort: metrics-port
|
targetPort: metrics-port
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor }}
|
{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor.enable }}
|
||||||
apiVersion: monitoring.coreos.com/v1
|
apiVersion: monitoring.coreos.com/v1
|
||||||
kind: ServiceMonitor
|
kind: ServiceMonitor
|
||||||
metadata:
|
metadata:
|
||||||
@@ -19,6 +19,8 @@ spec:
|
|||||||
tlsConfig:
|
tlsConfig:
|
||||||
insecureSkipVerify: true
|
insecureSkipVerify: true
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
interval: {{ .Values.metrics.serviceMonitor.interval }}
|
||||||
|
scrapeTimeout: {{ .Values.metrics.serviceMonitor.timeout }}
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 6 }}
|
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 6 }}
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ webhooks:
|
|||||||
{{- if .Values.scope.singleNamespace }}
|
{{- if .Values.scope.singleNamespace }}
|
||||||
namespaceSelector:
|
namespaceSelector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
|
kubernetes.io/metadata.name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
clientConfig:
|
clientConfig:
|
||||||
{{- if .Values.admissionWebHooks.caBundle }}
|
{{- if .Values.admissionWebHooks.caBundle }}
|
||||||
@@ -50,7 +50,7 @@ webhooks:
|
|||||||
{{- if .Values.scope.singleNamespace }}
|
{{- if .Values.scope.singleNamespace }}
|
||||||
namespaceSelector:
|
namespaceSelector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
|
kubernetes.io/metadata.name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
clientConfig:
|
clientConfig:
|
||||||
{{- if .Values.admissionWebHooks.caBundle }}
|
{{- if .Values.admissionWebHooks.caBundle }}
|
||||||
@@ -81,7 +81,7 @@ webhooks:
|
|||||||
{{- if .Values.scope.singleNamespace }}
|
{{- if .Values.scope.singleNamespace }}
|
||||||
namespaceSelector:
|
namespaceSelector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
|
kubernetes.io/metadata.name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
clientConfig:
|
clientConfig:
|
||||||
{{- if .Values.admissionWebHooks.caBundle }}
|
{{- if .Values.admissionWebHooks.caBundle }}
|
||||||
@@ -112,7 +112,7 @@ webhooks:
|
|||||||
{{- if .Values.scope.singleNamespace }}
|
{{- if .Values.scope.singleNamespace }}
|
||||||
namespaceSelector:
|
namespaceSelector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
|
kubernetes.io/metadata.name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
clientConfig:
|
clientConfig:
|
||||||
{{- if .Values.admissionWebHooks.caBundle }}
|
{{- if .Values.admissionWebHooks.caBundle }}
|
||||||
@@ -156,7 +156,7 @@ webhooks:
|
|||||||
{{- if .Values.scope.singleNamespace }}
|
{{- if .Values.scope.singleNamespace }}
|
||||||
namespaceSelector:
|
namespaceSelector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
|
kubernetes.io/metadata.name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
clientConfig:
|
clientConfig:
|
||||||
{{- if .Values.admissionWebHooks.caBundle }}
|
{{- if .Values.admissionWebHooks.caBundle }}
|
||||||
@@ -187,7 +187,7 @@ webhooks:
|
|||||||
{{- if .Values.scope.singleNamespace }}
|
{{- if .Values.scope.singleNamespace }}
|
||||||
namespaceSelector:
|
namespaceSelector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
|
kubernetes.io/metadata.name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
clientConfig:
|
clientConfig:
|
||||||
{{- if .Values.admissionWebHooks.caBundle }}
|
{{- if .Values.admissionWebHooks.caBundle }}
|
||||||
@@ -218,7 +218,7 @@ webhooks:
|
|||||||
{{- if .Values.scope.singleNamespace }}
|
{{- if .Values.scope.singleNamespace }}
|
||||||
namespaceSelector:
|
namespaceSelector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
|
kubernetes.io/metadata.name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
clientConfig:
|
clientConfig:
|
||||||
{{- if .Values.admissionWebHooks.caBundle }}
|
{{- if .Values.admissionWebHooks.caBundle }}
|
||||||
|
|||||||
@@ -109,7 +109,10 @@ service:
|
|||||||
# Metrics service resource
|
# Metrics service resource
|
||||||
metrics:
|
metrics:
|
||||||
serviceAnnotations: {}
|
serviceAnnotations: {}
|
||||||
serviceMonitor: false
|
serviceMonitor:
|
||||||
|
enable: false
|
||||||
|
timeout: 30s
|
||||||
|
interval: 1m
|
||||||
serviceMonitorLabels: {}
|
serviceMonitorLabels: {}
|
||||||
port: 8443
|
port: 8443
|
||||||
proxy:
|
proxy:
|
||||||
@@ -148,8 +151,7 @@ podDisruptionBudget:
|
|||||||
# PriorityClass: system-cluster-critical
|
# PriorityClass: system-cluster-critical
|
||||||
priorityClassName: ""
|
priorityClassName: ""
|
||||||
|
|
||||||
env:
|
# env:
|
||||||
{}
|
|
||||||
# specify additional environment variables for the controller pod.
|
# specify additional environment variables for the controller pod.
|
||||||
# It's possible to specify either key vale pairs e.g.:
|
# It's possible to specify either key vale pairs e.g.:
|
||||||
# http_proxy: "proxy.com:8080"
|
# http_proxy: "proxy.com:8080"
|
||||||
@@ -189,9 +191,17 @@ admissionWebHooks:
|
|||||||
# https://github.com/actions/actions-runner-controller/issues/1005#issuecomment-993097155
|
# https://github.com/actions/actions-runner-controller/issues/1005#issuecomment-993097155
|
||||||
#hostNetwork: true
|
#hostNetwork: true
|
||||||
|
|
||||||
|
# If you use `hostNetwork: true`, then you need dnsPolicy: ClusterFirstWithHostNet
|
||||||
|
# https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
|
||||||
|
#dnsPolicy: ClusterFirst
|
||||||
|
|
||||||
## specify log format for actions runner controller. Valid options are "text" and "json"
|
## specify log format for actions runner controller. Valid options are "text" and "json"
|
||||||
logFormat: text
|
logFormat: text
|
||||||
|
|
||||||
|
# enable setting the docker group id for the runner container
|
||||||
|
# https://github.com/actions/actions-runner-controller/pull/2499
|
||||||
|
#dockerGID: 121
|
||||||
|
|
||||||
githubWebhookServer:
|
githubWebhookServer:
|
||||||
enabled: false
|
enabled: false
|
||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
@@ -292,7 +302,7 @@ githubWebhookServer:
|
|||||||
# key: GITHUB_WEBHOOK_SECRET_TOKEN
|
# key: GITHUB_WEBHOOK_SECRET_TOKEN
|
||||||
# name: prod-gha-controller-webhook-token
|
# name: prod-gha-controller-webhook-token
|
||||||
# optional: true
|
# optional: true
|
||||||
env: {}
|
# env:
|
||||||
|
|
||||||
actionsMetrics:
|
actionsMetrics:
|
||||||
serviceAnnotations: {}
|
serviceAnnotations: {}
|
||||||
@@ -300,7 +310,10 @@ actionsMetrics:
|
|||||||
# as a part of the helm release.
|
# as a part of the helm release.
|
||||||
# Do note that you also need actionsMetricsServer.enabled=true
|
# Do note that you also need actionsMetricsServer.enabled=true
|
||||||
# to deploy the actions-metrics-server whose k8s service is referenced by the service monitor.
|
# to deploy the actions-metrics-server whose k8s service is referenced by the service monitor.
|
||||||
serviceMonitor: false
|
serviceMonitor:
|
||||||
|
enable: false
|
||||||
|
timeout: 30s
|
||||||
|
interval: 1m
|
||||||
serviceMonitorLabels: {}
|
serviceMonitorLabels: {}
|
||||||
port: 8443
|
port: 8443
|
||||||
proxy:
|
proxy:
|
||||||
@@ -308,6 +321,19 @@ actionsMetrics:
|
|||||||
image:
|
image:
|
||||||
repository: quay.io/brancz/kube-rbac-proxy
|
repository: quay.io/brancz/kube-rbac-proxy
|
||||||
tag: v0.13.1
|
tag: v0.13.1
|
||||||
|
# specify additional environment variables for the webhook server pod.
|
||||||
|
# It's possible to specify either key vale pairs e.g.:
|
||||||
|
# my_env_var: "some value"
|
||||||
|
# my_other_env_var: "other value"
|
||||||
|
|
||||||
|
# or a list of complete environment variable definitions e.g.:
|
||||||
|
# - name: GITHUB_WEBHOOK_SECRET_TOKEN
|
||||||
|
# valueFrom:
|
||||||
|
# secretKeyRef:
|
||||||
|
# key: GITHUB_WEBHOOK_SECRET_TOKEN
|
||||||
|
# name: prod-gha-controller-webhook-token
|
||||||
|
# optional: true
|
||||||
|
# env:
|
||||||
|
|
||||||
actionsMetricsServer:
|
actionsMetricsServer:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
|||||||
@@ -15,13 +15,13 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.4.0
|
version: 0.5.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "0.4.0"
|
appVersion: "0.5.0"
|
||||||
|
|
||||||
home: https://github.com/actions/actions-runner-controller
|
home: https://github.com/actions/actions-runner-controller
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,14 @@
|
|||||||
{{/*
|
{{/*
|
||||||
Expand the name of the chart.
|
Expand the name of the chart.
|
||||||
*/}}
|
*/}}
|
||||||
|
|
||||||
|
|
||||||
|
{{- define "gha-base-name" -}}
|
||||||
|
gha-rs-controller
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set-controller.name" -}}
|
{{- define "gha-runner-scale-set-controller.name" -}}
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
{{- default (include "gha-base-name" .) .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{/*
|
{{/*
|
||||||
@@ -14,7 +20,7 @@ If release name contains chart name it will be used as a full name.
|
|||||||
{{- if .Values.fullnameOverride }}
|
{{- if .Values.fullnameOverride }}
|
||||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
{{- else }}
|
{{- else }}
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
{{- $name := default (include "gha-base-name" .) .Values.nameOverride }}
|
||||||
{{- if contains $name .Release.Name }}
|
{{- if contains $name .Release.Name }}
|
||||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||||
{{- else }}
|
{{- else }}
|
||||||
@@ -27,7 +33,7 @@ If release name contains chart name it will be used as a full name.
|
|||||||
Create chart name and version as used by the chart label.
|
Create chart name and version as used by the chart label.
|
||||||
*/}}
|
*/}}
|
||||||
{{- define "gha-runner-scale-set-controller.chart" -}}
|
{{- define "gha-runner-scale-set-controller.chart" -}}
|
||||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
{{- printf "%s-%s" (include "gha-base-name" .) .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{/*
|
{{/*
|
||||||
@@ -39,7 +45,7 @@ helm.sh/chart: {{ include "gha-runner-scale-set-controller.chart" . }}
|
|||||||
{{- if .Chart.AppVersion }}
|
{{- if .Chart.AppVersion }}
|
||||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
app.kubernetes.io/part-of: gha-runner-scale-set-controller
|
app.kubernetes.io/part-of: gha-rs-controller
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
{{- range $k, $v := .Values.labels }}
|
{{- range $k, $v := .Values.labels }}
|
||||||
{{ $k }}: {{ $v }}
|
{{ $k }}: {{ $v }}
|
||||||
@@ -51,6 +57,7 @@ Selector labels
|
|||||||
*/}}
|
*/}}
|
||||||
{{- define "gha-runner-scale-set-controller.selectorLabels" -}}
|
{{- define "gha-runner-scale-set-controller.selectorLabels" -}}
|
||||||
app.kubernetes.io/name: {{ include "gha-runner-scale-set-controller.name" . }}
|
app.kubernetes.io/name: {{ include "gha-runner-scale-set-controller.name" . }}
|
||||||
|
app.kubernetes.io/namespace: {{ .Release.Namespace }}
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
@@ -73,35 +80,43 @@ Create the name of the service account to use
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set-controller.managerClusterRoleName" -}}
|
{{- define "gha-runner-scale-set-controller.managerClusterRoleName" -}}
|
||||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-cluster-role
|
{{- include "gha-runner-scale-set-controller.fullname" . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set-controller.managerClusterRoleBinding" -}}
|
{{- define "gha-runner-scale-set-controller.managerClusterRoleBinding" -}}
|
||||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-cluster-rolebinding
|
{{- include "gha-runner-scale-set-controller.fullname" . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" -}}
|
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" -}}
|
||||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-role
|
{{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" -}}
|
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" -}}
|
||||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-rolebinding
|
{{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleName" -}}
|
||||||
|
{{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace-watch
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleBinding" -}}
|
||||||
|
{{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace-watch
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set-controller.managerListenerRoleName" -}}
|
{{- define "gha-runner-scale-set-controller.managerListenerRoleName" -}}
|
||||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-role
|
{{- include "gha-runner-scale-set-controller.fullname" . }}-listener
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set-controller.managerListenerRoleBinding" -}}
|
{{- define "gha-runner-scale-set-controller.managerListenerRoleBinding" -}}
|
||||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-rolebinding
|
{{- include "gha-runner-scale-set-controller.fullname" . }}-listener
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set-controller.leaderElectionRoleName" -}}
|
{{- define "gha-runner-scale-set-controller.leaderElectionRoleName" -}}
|
||||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election-role
|
{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set-controller.leaderElectionRoleBinding" -}}
|
{{- define "gha-runner-scale-set-controller.leaderElectionRoleBinding" -}}
|
||||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election-rolebinding
|
{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}}
|
{{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}}
|
||||||
@@ -111,3 +126,7 @@ Create the name of the service account to use
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- $names | join ","}}
|
{{- $names | join ","}}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "gha-runner-scale-set-controller.serviceMonitorName" -}}
|
||||||
|
{{- include "gha-runner-scale-set-controller.fullname" . }}-service-monitor
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ spec:
|
|||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/part-of: gha-runner-scale-set-controller
|
app.kubernetes.io/part-of: gha-rs-controller
|
||||||
app.kubernetes.io/component: controller-manager
|
app.kubernetes.io/component: controller-manager
|
||||||
app.kubernetes.io/version: {{ .Chart.Version }}
|
app.kubernetes.io/version: {{ .Chart.Version }}
|
||||||
{{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }}
|
{{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }}
|
||||||
@@ -56,11 +56,34 @@ spec:
|
|||||||
{{- with .Values.flags.logLevel }}
|
{{- with .Values.flags.logLevel }}
|
||||||
- "--log-level={{ . }}"
|
- "--log-level={{ . }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- with .Values.flags.logFormat }}
|
||||||
|
- "--log-format={{ . }}"
|
||||||
|
{{- end }}
|
||||||
{{- with .Values.flags.watchSingleNamespace }}
|
{{- with .Values.flags.watchSingleNamespace }}
|
||||||
- "--watch-single-namespace={{ . }}"
|
- "--watch-single-namespace={{ . }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- with .Values.flags.updateStrategy }}
|
||||||
|
- "--update-strategy={{ . }}"
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.metrics }}
|
||||||
|
{{- with .Values.metrics }}
|
||||||
|
- "--listener-metrics-addr={{ .listenerAddr }}"
|
||||||
|
- "--listener-metrics-endpoint={{ .listenerEndpoint }}"
|
||||||
|
- "--metrics-addr={{ .controllerManagerAddr }}"
|
||||||
|
{{- end }}
|
||||||
|
{{- else }}
|
||||||
|
- "--listener-metrics-addr=0"
|
||||||
|
- "--listener-metrics-endpoint="
|
||||||
|
- "--metrics-addr=0"
|
||||||
|
{{- end }}
|
||||||
command:
|
command:
|
||||||
- "/manager"
|
- "/manager"
|
||||||
|
{{- with .Values.metrics }}
|
||||||
|
ports:
|
||||||
|
- containerPort: {{regexReplaceAll ":([0-9]+)" .controllerManagerAddr "${1}"}}
|
||||||
|
protocol: TCP
|
||||||
|
name: metrics
|
||||||
|
{{- end }}
|
||||||
env:
|
env:
|
||||||
- name: CONTROLLER_MANAGER_CONTAINER_IMAGE
|
- name: CONTROLLER_MANAGER_CONTAINER_IMAGE
|
||||||
value: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
value: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: Role
|
kind: Role
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
|
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleName" . }}
|
||||||
namespace: {{ .Values.flags.watchSingleNamespace }}
|
namespace: {{ .Values.flags.watchSingleNamespace }}
|
||||||
rules:
|
rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
|
|||||||
@@ -2,12 +2,12 @@
|
|||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: RoleBinding
|
kind: RoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" . }}
|
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleBinding" . }}
|
||||||
namespace: {{ .Values.flags.watchSingleNamespace }}
|
namespace: {{ .Values.flags.watchSingleNamespace }}
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
kind: Role
|
kind: Role
|
||||||
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
|
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleName" . }}
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package tests
|
package tests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -8,6 +9,7 @@ import (
|
|||||||
|
|
||||||
"github.com/gruntwork-io/terratest/modules/helm"
|
"github.com/gruntwork-io/terratest/modules/helm"
|
||||||
"github.com/gruntwork-io/terratest/modules/k8s"
|
"github.com/gruntwork-io/terratest/modules/k8s"
|
||||||
|
"github.com/gruntwork-io/terratest/modules/logger"
|
||||||
"github.com/gruntwork-io/terratest/modules/random"
|
"github.com/gruntwork-io/terratest/modules/random"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@@ -33,6 +35,7 @@ func TestTemplate_CreateServiceAccount(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"serviceAccount.create": "true",
|
"serviceAccount.create": "true",
|
||||||
"serviceAccount.annotations.foo": "bar",
|
"serviceAccount.annotations.foo": "bar",
|
||||||
@@ -46,7 +49,7 @@ func TestTemplate_CreateServiceAccount(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
|
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", serviceAccount.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller", serviceAccount.Name)
|
||||||
assert.Equal(t, "bar", string(serviceAccount.Annotations["foo"]))
|
assert.Equal(t, "bar", string(serviceAccount.Annotations["foo"]))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -61,6 +64,7 @@ func TestTemplate_CreateServiceAccount_OverwriteName(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"serviceAccount.create": "true",
|
"serviceAccount.create": "true",
|
||||||
"serviceAccount.name": "overwritten-name",
|
"serviceAccount.name": "overwritten-name",
|
||||||
@@ -90,6 +94,7 @@ func TestTemplate_CreateServiceAccount_CannotUseDefaultServiceAccount(t *testing
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"serviceAccount.create": "true",
|
"serviceAccount.create": "true",
|
||||||
"serviceAccount.name": "default",
|
"serviceAccount.name": "default",
|
||||||
@@ -113,6 +118,7 @@ func TestTemplate_NotCreateServiceAccount(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"serviceAccount.create": "false",
|
"serviceAccount.create": "false",
|
||||||
"serviceAccount.name": "overwritten-name",
|
"serviceAccount.name": "overwritten-name",
|
||||||
@@ -136,6 +142,7 @@ func TestTemplate_NotCreateServiceAccount_ServiceAccountNotSet(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"serviceAccount.create": "false",
|
"serviceAccount.create": "false",
|
||||||
"serviceAccount.annotations.foo": "bar",
|
"serviceAccount.annotations.foo": "bar",
|
||||||
@@ -158,6 +165,7 @@ func TestTemplate_CreateManagerClusterRole(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{},
|
SetValues: map[string]string{},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -168,7 +176,7 @@ func TestTemplate_CreateManagerClusterRole(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &managerClusterRole)
|
helm.UnmarshalK8SYaml(t, output, &managerClusterRole)
|
||||||
|
|
||||||
assert.Empty(t, managerClusterRole.Namespace, "ClusterRole should not have a namespace")
|
assert.Empty(t, managerClusterRole.Namespace, "ClusterRole should not have a namespace")
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRole.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller", managerClusterRole.Name)
|
||||||
assert.Equal(t, 16, len(managerClusterRole.Rules))
|
assert.Equal(t, 16, len(managerClusterRole.Rules))
|
||||||
|
|
||||||
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"})
|
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"})
|
||||||
@@ -189,6 +197,7 @@ func TestTemplate_ManagerClusterRoleBinding(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"serviceAccount.create": "true",
|
"serviceAccount.create": "true",
|
||||||
},
|
},
|
||||||
@@ -201,9 +210,9 @@ func TestTemplate_ManagerClusterRoleBinding(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &managerClusterRoleBinding)
|
helm.UnmarshalK8SYaml(t, output, &managerClusterRoleBinding)
|
||||||
|
|
||||||
assert.Empty(t, managerClusterRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace")
|
assert.Empty(t, managerClusterRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace")
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-rolebinding", managerClusterRoleBinding.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller", managerClusterRoleBinding.Name)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRoleBinding.RoleRef.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller", managerClusterRoleBinding.RoleRef.Name)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerClusterRoleBinding.Subjects[0].Name)
|
assert.Equal(t, "test-arc-gha-rs-controller", managerClusterRoleBinding.Subjects[0].Name)
|
||||||
assert.Equal(t, namespaceName, managerClusterRoleBinding.Subjects[0].Namespace)
|
assert.Equal(t, namespaceName, managerClusterRoleBinding.Subjects[0].Namespace)
|
||||||
|
|
||||||
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role_binding.yaml"})
|
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role_binding.yaml"})
|
||||||
@@ -224,6 +233,7 @@ func TestTemplate_CreateManagerListenerRole(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{},
|
SetValues: map[string]string{},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -234,7 +244,7 @@ func TestTemplate_CreateManagerListenerRole(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &managerListenerRole)
|
helm.UnmarshalK8SYaml(t, output, &managerListenerRole)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, managerListenerRole.Namespace, "Role should have a namespace")
|
assert.Equal(t, namespaceName, managerListenerRole.Namespace, "Role should have a namespace")
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-role", managerListenerRole.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller-listener", managerListenerRole.Name)
|
||||||
assert.Equal(t, 4, len(managerListenerRole.Rules))
|
assert.Equal(t, 4, len(managerListenerRole.Rules))
|
||||||
assert.Equal(t, "pods", managerListenerRole.Rules[0].Resources[0])
|
assert.Equal(t, "pods", managerListenerRole.Rules[0].Resources[0])
|
||||||
assert.Equal(t, "pods/status", managerListenerRole.Rules[1].Resources[0])
|
assert.Equal(t, "pods/status", managerListenerRole.Rules[1].Resources[0])
|
||||||
@@ -253,6 +263,7 @@ func TestTemplate_ManagerListenerRoleBinding(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"serviceAccount.create": "true",
|
"serviceAccount.create": "true",
|
||||||
},
|
},
|
||||||
@@ -265,9 +276,9 @@ func TestTemplate_ManagerListenerRoleBinding(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &managerListenerRoleBinding)
|
helm.UnmarshalK8SYaml(t, output, &managerListenerRoleBinding)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, managerListenerRoleBinding.Namespace, "RoleBinding should have a namespace")
|
assert.Equal(t, namespaceName, managerListenerRoleBinding.Namespace, "RoleBinding should have a namespace")
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-rolebinding", managerListenerRoleBinding.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller-listener", managerListenerRoleBinding.Name)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-role", managerListenerRoleBinding.RoleRef.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller-listener", managerListenerRoleBinding.RoleRef.Name)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerListenerRoleBinding.Subjects[0].Name)
|
assert.Equal(t, "test-arc-gha-rs-controller", managerListenerRoleBinding.Subjects[0].Name)
|
||||||
assert.Equal(t, namespaceName, managerListenerRoleBinding.Subjects[0].Namespace)
|
assert.Equal(t, namespaceName, managerListenerRoleBinding.Subjects[0].Namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -289,6 +300,7 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"image.tag": "dev",
|
"image.tag": "dev",
|
||||||
},
|
},
|
||||||
@@ -301,29 +313,29 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Name)
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
|
assert.Equal(t, "gha-rs-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "gha-rs-controller", deployment.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
||||||
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
||||||
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
||||||
assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"])
|
assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"])
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"])
|
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Labels["actions.github.com/controller-service-account-name"])
|
||||||
assert.NotContains(t, deployment.Labels, "actions.github.com/controller-watch-single-namespace")
|
assert.NotContains(t, deployment.Labels, "actions.github.com/controller-watch-single-namespace")
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/part-of"])
|
assert.Equal(t, "gha-rs-controller", deployment.Labels["app.kubernetes.io/part-of"])
|
||||||
|
|
||||||
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
||||||
|
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
|
assert.Equal(t, "gha-rs-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
|
||||||
|
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "gha-rs-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
|
||||||
|
|
||||||
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0)
|
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.ServiceAccountName)
|
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Spec.Template.Spec.ServiceAccountName)
|
||||||
assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext)
|
assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext)
|
||||||
assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName)
|
assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName)
|
||||||
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
|
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
|
||||||
@@ -345,9 +357,16 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
|||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
||||||
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 2)
|
expectedArgs := []string{
|
||||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
"--auto-scaling-runner-set-only",
|
||||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
"--log-level=debug",
|
||||||
|
"--log-format=text",
|
||||||
|
"--update-strategy=immediate",
|
||||||
|
"--metrics-addr=0",
|
||||||
|
"--listener-metrics-addr=0",
|
||||||
|
"--listener-metrics-endpoint=",
|
||||||
|
}
|
||||||
|
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||||
@@ -384,6 +403,7 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"labels.foo": "bar",
|
"labels.foo": "bar",
|
||||||
"labels.github": "actions",
|
"labels.github": "actions",
|
||||||
@@ -391,11 +411,11 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
"image.pullPolicy": "Always",
|
"image.pullPolicy": "Always",
|
||||||
"image.tag": "dev",
|
"image.tag": "dev",
|
||||||
"imagePullSecrets[0].name": "dockerhub",
|
"imagePullSecrets[0].name": "dockerhub",
|
||||||
"nameOverride": "gha-runner-scale-set-controller-override",
|
"nameOverride": "gha-rs-controller-override",
|
||||||
"fullnameOverride": "gha-runner-scale-set-controller-fullname-override",
|
"fullnameOverride": "gha-rs-controller-fullname-override",
|
||||||
"env[0].name": "ENV_VAR_NAME_1",
|
"env[0].name": "ENV_VAR_NAME_1",
|
||||||
"env[0].value": "ENV_VAR_VALUE_1",
|
"env[0].value": "ENV_VAR_VALUE_1",
|
||||||
"serviceAccount.name": "gha-runner-scale-set-controller-sa",
|
"serviceAccount.name": "gha-rs-controller-sa",
|
||||||
"podAnnotations.foo": "bar",
|
"podAnnotations.foo": "bar",
|
||||||
"podSecurityContext.fsGroup": "1000",
|
"podSecurityContext.fsGroup": "1000",
|
||||||
"securityContext.runAsUser": "1000",
|
"securityContext.runAsUser": "1000",
|
||||||
@@ -406,6 +426,9 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key": "foo",
|
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key": "foo",
|
||||||
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar",
|
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar",
|
||||||
"priorityClassName": "test-priority-class",
|
"priorityClassName": "test-priority-class",
|
||||||
|
"flags.updateStrategy": "eventual",
|
||||||
|
"flags.logLevel": "info",
|
||||||
|
"flags.logFormat": "json",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -416,22 +439,22 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller-fullname-override", deployment.Name)
|
assert.Equal(t, "gha-rs-controller-fullname-override", deployment.Name)
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
|
assert.Equal(t, "gha-rs-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller-override", deployment.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "gha-rs-controller-override", deployment.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
||||||
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
||||||
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/part-of"])
|
assert.Equal(t, "gha-rs-controller", deployment.Labels["app.kubernetes.io/part-of"])
|
||||||
assert.Equal(t, "bar", deployment.Labels["foo"])
|
assert.Equal(t, "bar", deployment.Labels["foo"])
|
||||||
assert.Equal(t, "actions", deployment.Labels["github"])
|
assert.Equal(t, "actions", deployment.Labels["github"])
|
||||||
|
|
||||||
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
||||||
|
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller-override", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
|
assert.Equal(t, "gha-rs-controller-override", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
|
||||||
|
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller-override", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "gha-rs-controller-override", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
|
||||||
|
|
||||||
assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"])
|
assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"])
|
||||||
@@ -442,7 +465,7 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1)
|
||||||
assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name)
|
assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name)
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller-sa", deployment.Spec.Template.Spec.ServiceAccountName)
|
assert.Equal(t, "gha-rs-controller-sa", deployment.Spec.Template.Spec.ServiceAccountName)
|
||||||
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup)
|
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup)
|
||||||
assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName)
|
assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName)
|
||||||
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
|
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
|
||||||
@@ -470,10 +493,18 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
||||||
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3)
|
expectArgs := []string{
|
||||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
"--auto-scaling-runner-set-only",
|
||||||
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
"--auto-scaler-image-pull-secrets=dockerhub",
|
||||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
"--log-level=info",
|
||||||
|
"--log-format=json",
|
||||||
|
"--update-strategy=eventual",
|
||||||
|
"--listener-metrics-addr=0",
|
||||||
|
"--listener-metrics-endpoint=",
|
||||||
|
"--metrics-addr=0",
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, expectArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 4)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 4)
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||||
@@ -508,6 +539,7 @@ func TestTemplate_EnableLeaderElectionRole(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"replicaCount": "2",
|
"replicaCount": "2",
|
||||||
},
|
},
|
||||||
@@ -519,7 +551,7 @@ func TestTemplate_EnableLeaderElectionRole(t *testing.T) {
|
|||||||
var leaderRole rbacv1.Role
|
var leaderRole rbacv1.Role
|
||||||
helm.UnmarshalK8SYaml(t, output, &leaderRole)
|
helm.UnmarshalK8SYaml(t, output, &leaderRole)
|
||||||
|
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-leader-election-role", leaderRole.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller-leader-election", leaderRole.Name)
|
||||||
assert.Equal(t, namespaceName, leaderRole.Namespace)
|
assert.Equal(t, namespaceName, leaderRole.Namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -534,6 +566,7 @@ func TestTemplate_EnableLeaderElectionRoleBinding(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"replicaCount": "2",
|
"replicaCount": "2",
|
||||||
},
|
},
|
||||||
@@ -545,10 +578,10 @@ func TestTemplate_EnableLeaderElectionRoleBinding(t *testing.T) {
|
|||||||
var leaderRoleBinding rbacv1.RoleBinding
|
var leaderRoleBinding rbacv1.RoleBinding
|
||||||
helm.UnmarshalK8SYaml(t, output, &leaderRoleBinding)
|
helm.UnmarshalK8SYaml(t, output, &leaderRoleBinding)
|
||||||
|
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-leader-election-rolebinding", leaderRoleBinding.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller-leader-election", leaderRoleBinding.Name)
|
||||||
assert.Equal(t, namespaceName, leaderRoleBinding.Namespace)
|
assert.Equal(t, namespaceName, leaderRoleBinding.Namespace)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-leader-election-role", leaderRoleBinding.RoleRef.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller-leader-election", leaderRoleBinding.RoleRef.Name)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", leaderRoleBinding.Subjects[0].Name)
|
assert.Equal(t, "test-arc-gha-rs-controller", leaderRoleBinding.Subjects[0].Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTemplate_EnableLeaderElection(t *testing.T) {
|
func TestTemplate_EnableLeaderElection(t *testing.T) {
|
||||||
@@ -562,6 +595,7 @@ func TestTemplate_EnableLeaderElection(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"replicaCount": "2",
|
"replicaCount": "2",
|
||||||
"image.tag": "dev",
|
"image.tag": "dev",
|
||||||
@@ -575,7 +609,7 @@ func TestTemplate_EnableLeaderElection(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Name)
|
||||||
|
|
||||||
assert.Equal(t, int32(2), *deployment.Spec.Replicas)
|
assert.Equal(t, int32(2), *deployment.Spec.Replicas)
|
||||||
|
|
||||||
@@ -587,11 +621,19 @@ func TestTemplate_EnableLeaderElection(t *testing.T) {
|
|||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
||||||
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 4)
|
expectedArgs := []string{
|
||||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
"--auto-scaling-runner-set-only",
|
||||||
assert.Equal(t, "--enable-leader-election", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
"--enable-leader-election",
|
||||||
assert.Equal(t, "--leader-election-id=test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
"--leader-election-id=test-arc-gha-rs-controller",
|
||||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[3])
|
"--log-level=debug",
|
||||||
|
"--log-format=text",
|
||||||
|
"--update-strategy=immediate",
|
||||||
|
"--listener-metrics-addr=0",
|
||||||
|
"--listener-metrics-endpoint=",
|
||||||
|
"--metrics-addr=0",
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
|
func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
|
||||||
@@ -605,6 +647,7 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"imagePullSecrets[0].name": "dockerhub",
|
"imagePullSecrets[0].name": "dockerhub",
|
||||||
"imagePullSecrets[1].name": "ghcr",
|
"imagePullSecrets[1].name": "ghcr",
|
||||||
@@ -619,10 +662,18 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3)
|
expectedArgs := []string{
|
||||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
"--auto-scaling-runner-set-only",
|
||||||
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub,ghcr", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
"--auto-scaler-image-pull-secrets=dockerhub,ghcr",
|
||||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
"--log-level=debug",
|
||||||
|
"--log-format=text",
|
||||||
|
"--update-strategy=immediate",
|
||||||
|
"--listener-metrics-addr=0",
|
||||||
|
"--listener-metrics-endpoint=",
|
||||||
|
"--metrics-addr=0",
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
||||||
@@ -643,6 +694,7 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"image.tag": "dev",
|
"image.tag": "dev",
|
||||||
"flags.watchSingleNamespace": "demo",
|
"flags.watchSingleNamespace": "demo",
|
||||||
@@ -656,28 +708,28 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Name)
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
|
assert.Equal(t, "gha-rs-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "gha-rs-controller", deployment.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
||||||
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
||||||
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
||||||
assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"])
|
assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"])
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"])
|
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Labels["actions.github.com/controller-service-account-name"])
|
||||||
assert.Equal(t, "demo", deployment.Labels["actions.github.com/controller-watch-single-namespace"])
|
assert.Equal(t, "demo", deployment.Labels["actions.github.com/controller-watch-single-namespace"])
|
||||||
|
|
||||||
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
||||||
|
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
|
assert.Equal(t, "gha-rs-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
|
||||||
|
|
||||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "gha-rs-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
|
||||||
|
|
||||||
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0)
|
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.ServiceAccountName)
|
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Spec.Template.Spec.ServiceAccountName)
|
||||||
assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext)
|
assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext)
|
||||||
assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName)
|
assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName)
|
||||||
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
|
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
|
||||||
@@ -699,10 +751,18 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
|||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
||||||
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3)
|
expectedArgs := []string{
|
||||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
"--auto-scaling-runner-set-only",
|
||||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
"--log-level=debug",
|
||||||
assert.Equal(t, "--watch-single-namespace=demo", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
"--log-format=text",
|
||||||
|
"--watch-single-namespace=demo",
|
||||||
|
"--update-strategy=immediate",
|
||||||
|
"--listener-metrics-addr=0",
|
||||||
|
"--listener-metrics-endpoint=",
|
||||||
|
"--metrics-addr=0",
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||||
@@ -732,6 +792,7 @@ func TestTemplate_ControllerContainerEnvironmentVariables(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"env[0].Name": "ENV_VAR_NAME_1",
|
"env[0].Name": "ENV_VAR_NAME_1",
|
||||||
"env[0].Value": "ENV_VAR_VALUE_1",
|
"env[0].Value": "ENV_VAR_VALUE_1",
|
||||||
@@ -752,7 +813,7 @@ func TestTemplate_ControllerContainerEnvironmentVariables(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Name)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 7)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 7)
|
||||||
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||||
@@ -778,6 +839,7 @@ func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T)
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"flags.watchSingleNamespace": "demo",
|
"flags.watchSingleNamespace": "demo",
|
||||||
},
|
},
|
||||||
@@ -799,6 +861,7 @@ func TestTemplate_WatchSingleNamespace_NotManagerClusterRoleBinding(t *testing.T
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"serviceAccount.create": "true",
|
"serviceAccount.create": "true",
|
||||||
"flags.watchSingleNamespace": "demo",
|
"flags.watchSingleNamespace": "demo",
|
||||||
@@ -821,6 +884,7 @@ func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"flags.watchSingleNamespace": "demo",
|
"flags.watchSingleNamespace": "demo",
|
||||||
},
|
},
|
||||||
@@ -832,7 +896,7 @@ func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) {
|
|||||||
var managerSingleNamespaceControllerRole rbacv1.Role
|
var managerSingleNamespaceControllerRole rbacv1.Role
|
||||||
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRole)
|
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRole)
|
||||||
|
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceControllerRole.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller-single-namespace", managerSingleNamespaceControllerRole.Name)
|
||||||
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRole.Namespace)
|
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRole.Namespace)
|
||||||
assert.Equal(t, 10, len(managerSingleNamespaceControllerRole.Rules))
|
assert.Equal(t, 10, len(managerSingleNamespaceControllerRole.Rules))
|
||||||
|
|
||||||
@@ -841,7 +905,7 @@ func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) {
|
|||||||
var managerSingleNamespaceWatchRole rbacv1.Role
|
var managerSingleNamespaceWatchRole rbacv1.Role
|
||||||
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRole)
|
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRole)
|
||||||
|
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRole.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller-single-namespace-watch", managerSingleNamespaceWatchRole.Name)
|
||||||
assert.Equal(t, "demo", managerSingleNamespaceWatchRole.Namespace)
|
assert.Equal(t, "demo", managerSingleNamespaceWatchRole.Namespace)
|
||||||
assert.Equal(t, 14, len(managerSingleNamespaceWatchRole.Rules))
|
assert.Equal(t, 14, len(managerSingleNamespaceWatchRole.Rules))
|
||||||
}
|
}
|
||||||
@@ -857,6 +921,7 @@ func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"flags.watchSingleNamespace": "demo",
|
"flags.watchSingleNamespace": "demo",
|
||||||
},
|
},
|
||||||
@@ -868,10 +933,10 @@ func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) {
|
|||||||
var managerSingleNamespaceControllerRoleBinding rbacv1.RoleBinding
|
var managerSingleNamespaceControllerRoleBinding rbacv1.RoleBinding
|
||||||
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRoleBinding)
|
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRoleBinding)
|
||||||
|
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-rolebinding", managerSingleNamespaceControllerRoleBinding.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller-single-namespace", managerSingleNamespaceControllerRoleBinding.Name)
|
||||||
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Namespace)
|
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Namespace)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceControllerRoleBinding.RoleRef.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller-single-namespace", managerSingleNamespaceControllerRoleBinding.RoleRef.Name)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerSingleNamespaceControllerRoleBinding.Subjects[0].Name)
|
assert.Equal(t, "test-arc-gha-rs-controller", managerSingleNamespaceControllerRoleBinding.Subjects[0].Name)
|
||||||
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Subjects[0].Namespace)
|
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Subjects[0].Namespace)
|
||||||
|
|
||||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role_binding.yaml"})
|
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role_binding.yaml"})
|
||||||
@@ -879,9 +944,81 @@ func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) {
|
|||||||
var managerSingleNamespaceWatchRoleBinding rbacv1.RoleBinding
|
var managerSingleNamespaceWatchRoleBinding rbacv1.RoleBinding
|
||||||
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRoleBinding)
|
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRoleBinding)
|
||||||
|
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-rolebinding", managerSingleNamespaceWatchRoleBinding.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller-single-namespace-watch", managerSingleNamespaceWatchRoleBinding.Name)
|
||||||
assert.Equal(t, "demo", managerSingleNamespaceWatchRoleBinding.Namespace)
|
assert.Equal(t, "demo", managerSingleNamespaceWatchRoleBinding.Namespace)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRoleBinding.RoleRef.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller-single-namespace-watch", managerSingleNamespaceWatchRoleBinding.RoleRef.Name)
|
||||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerSingleNamespaceWatchRoleBinding.Subjects[0].Name)
|
assert.Equal(t, "test-arc-gha-rs-controller", managerSingleNamespaceWatchRoleBinding.Subjects[0].Name)
|
||||||
assert.Equal(t, namespaceName, managerSingleNamespaceWatchRoleBinding.Subjects[0].Namespace)
|
assert.Equal(t, namespaceName, managerSingleNamespaceWatchRoleBinding.Subjects[0].Namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestControllerDeployment_MetricsPorts(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
chart := new(Chart)
|
||||||
|
err = yaml.Unmarshal(chartContent, chart)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-arc"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"image.tag": "dev",
|
||||||
|
"metrics.controllerManagerAddr": ":8080",
|
||||||
|
"metrics.listenerAddr": ":8081",
|
||||||
|
"metrics.listenerEndpoint": "/metrics",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
|
||||||
|
|
||||||
|
var deployment appsv1.Deployment
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||||
|
|
||||||
|
require.Len(t, deployment.Spec.Template.Spec.Containers, 1, "Expected one container")
|
||||||
|
container := deployment.Spec.Template.Spec.Containers[0]
|
||||||
|
assert.Len(t, container.Ports, 1)
|
||||||
|
port := container.Ports[0]
|
||||||
|
assert.Equal(t, corev1.Protocol("TCP"), port.Protocol)
|
||||||
|
assert.Equal(t, int32(8080), port.ContainerPort)
|
||||||
|
|
||||||
|
metricsFlags := map[string]*struct {
|
||||||
|
expect string
|
||||||
|
frequency int
|
||||||
|
}{
|
||||||
|
"--listener-metrics-addr": {
|
||||||
|
expect: ":8081",
|
||||||
|
},
|
||||||
|
"--listener-metrics-endpoint": {
|
||||||
|
expect: "/metrics",
|
||||||
|
},
|
||||||
|
"--metrics-addr": {
|
||||||
|
expect: ":8080",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, cmd := range container.Args {
|
||||||
|
s := strings.Split(cmd, "=")
|
||||||
|
if len(s) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
flag, ok := metricsFlags[s[0]]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
flag.frequency++
|
||||||
|
assert.Equal(t, flag.expect, s[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, value := range metricsFlags {
|
||||||
|
assert.Equal(t, value.frequency, 1, fmt.Sprintf("frequency of %q is not 1", key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -75,11 +75,41 @@ affinity: {}
|
|||||||
# PriorityClass: system-cluster-critical
|
# PriorityClass: system-cluster-critical
|
||||||
priorityClassName: ""
|
priorityClassName: ""
|
||||||
|
|
||||||
|
## If `metrics:` object is not provided, or commented out, the following flags
|
||||||
|
## will be applied the controller-manager and listener pods with empty values:
|
||||||
|
## `--metrics-addr`, `--listener-metrics-addr`, `--listener-metrics-endpoint`.
|
||||||
|
## This will disable metrics.
|
||||||
|
##
|
||||||
|
## To enable metrics, uncomment the following lines.
|
||||||
|
# metrics:
|
||||||
|
# controllerManagerAddr: ":8080"
|
||||||
|
# listenerAddr: ":8080"
|
||||||
|
# listenerEndpoint: "/metrics"
|
||||||
|
|
||||||
flags:
|
flags:
|
||||||
# Log level can be set here with one of the following values: "debug", "info", "warn", "error".
|
## Log level can be set here with one of the following values: "debug", "info", "warn", "error".
|
||||||
# Defaults to "debug".
|
## Defaults to "debug".
|
||||||
logLevel: "debug"
|
logLevel: "debug"
|
||||||
|
## Log format can be set with one of the following values: "text", "json"
|
||||||
|
## Defaults to "text"
|
||||||
|
logFormat: "text"
|
||||||
|
|
||||||
## Restricts the controller to only watch resources in the desired namespace.
|
## Restricts the controller to only watch resources in the desired namespace.
|
||||||
## Defaults to watch all namespaces when unset.
|
## Defaults to watch all namespaces when unset.
|
||||||
# watchSingleNamespace: ""
|
# watchSingleNamespace: ""
|
||||||
|
|
||||||
|
## Defines how the controller should handle upgrades while having running jobs.
|
||||||
|
##
|
||||||
|
## The srategies available are:
|
||||||
|
## - "immediate": (default) The controller will immediately apply the change causing the
|
||||||
|
## recreation of the listener and ephemeral runner set. This can lead to an
|
||||||
|
## overprovisioning of runners, if there are pending / running jobs. This should not
|
||||||
|
## be a problem at a small scale, but it could lead to a significant increase of
|
||||||
|
## resources if you have a lot of jobs running concurrently.
|
||||||
|
##
|
||||||
|
## - "eventual": The controller will remove the listener and ephemeral runner set
|
||||||
|
## immediately, but will not recreate them (to apply changes) until all
|
||||||
|
## pending / running jobs have completed.
|
||||||
|
## This can lead to a longer time to apply the change but it will ensure
|
||||||
|
## that you don't have any overprovisioning of runners.
|
||||||
|
updateStrategy: "immediate"
|
||||||
|
|||||||
@@ -15,13 +15,13 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.4.0
|
version: 0.5.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "0.4.0"
|
appVersion: "0.5.0"
|
||||||
|
|
||||||
home: https://github.com/actions/dev-arc
|
home: https://github.com/actions/dev-arc
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,13 @@
|
|||||||
{{/*
|
{{/*
|
||||||
Expand the name of the chart.
|
Expand the name of the chart.
|
||||||
*/}}
|
*/}}
|
||||||
|
|
||||||
|
{{- define "gha-base-name" -}}
|
||||||
|
gha-rs
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set.name" -}}
|
{{- define "gha-runner-scale-set.name" -}}
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
{{- default (include "gha-base-name" .) .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{/*
|
{{/*
|
||||||
@@ -11,7 +16,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
|
|||||||
If release name contains chart name it will be used as a full name.
|
If release name contains chart name it will be used as a full name.
|
||||||
*/}}
|
*/}}
|
||||||
{{- define "gha-runner-scale-set.fullname" -}}
|
{{- define "gha-runner-scale-set.fullname" -}}
|
||||||
{{- $name := default .Chart.Name }}
|
{{- $name := default (include "gha-base-name" .) }}
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
@@ -19,7 +24,7 @@ If release name contains chart name it will be used as a full name.
|
|||||||
Create chart name and version as used by the chart label.
|
Create chart name and version as used by the chart label.
|
||||||
*/}}
|
*/}}
|
||||||
{{- define "gha-runner-scale-set.chart" -}}
|
{{- define "gha-runner-scale-set.chart" -}}
|
||||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
{{- printf "%s-%s" (include "gha-base-name" .) .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{/*
|
{{/*
|
||||||
@@ -32,7 +37,7 @@ helm.sh/chart: {{ include "gha-runner-scale-set.chart" . }}
|
|||||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
app.kubernetes.io/part-of: gha-runner-scale-set
|
app.kubernetes.io/part-of: gha-rs
|
||||||
actions.github.com/scale-set-name: {{ .Release.Name }}
|
actions.github.com/scale-set-name: {{ .Release.Name }}
|
||||||
actions.github.com/scale-set-namespace: {{ .Release.Namespace }}
|
actions.github.com/scale-set-namespace: {{ .Release.Namespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -58,19 +63,19 @@ app.kubernetes.io/instance: {{ .Release.Name }}
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set.noPermissionServiceAccountName" -}}
|
{{- define "gha-runner-scale-set.noPermissionServiceAccountName" -}}
|
||||||
{{- include "gha-runner-scale-set.fullname" . }}-no-permission-service-account
|
{{- include "gha-runner-scale-set.fullname" . }}-no-permission
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set.kubeModeRoleName" -}}
|
{{- define "gha-runner-scale-set.kubeModeRoleName" -}}
|
||||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role
|
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set.kubeModeRoleBindingName" -}}
|
{{- define "gha-runner-scale-set.kubeModeRoleBindingName" -}}
|
||||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role-binding
|
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}}
|
{{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}}
|
||||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-service-account
|
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set.dind-init-container" -}}
|
{{- define "gha-runner-scale-set.dind-init-container" -}}
|
||||||
@@ -428,11 +433,11 @@ volumeMounts:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set.managerRoleName" -}}
|
{{- define "gha-runner-scale-set.managerRoleName" -}}
|
||||||
{{- include "gha-runner-scale-set.fullname" . }}-manager-role
|
{{- include "gha-runner-scale-set.fullname" . }}-manager
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set.managerRoleBindingName" -}}
|
{{- define "gha-runner-scale-set.managerRoleBindingName" -}}
|
||||||
{{- include "gha-runner-scale-set.fullname" . }}-manager-role-binding
|
{{- include "gha-runner-scale-set.fullname" . }}-manager
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set.managerServiceAccountName" -}}
|
{{- define "gha-runner-scale-set.managerServiceAccountName" -}}
|
||||||
@@ -451,7 +456,7 @@ volumeMounts:
|
|||||||
{{- $managerServiceAccountName := "" }}
|
{{- $managerServiceAccountName := "" }}
|
||||||
{{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }}
|
{{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }}
|
||||||
{{- if kindIs "map" $deployment.metadata.labels }}
|
{{- if kindIs "map" $deployment.metadata.labels }}
|
||||||
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-runner-scale-set-controller" }}
|
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-rs-controller" }}
|
||||||
{{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }}
|
{{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }}
|
||||||
{{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }}
|
{{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }}
|
||||||
{{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}}
|
{{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}}
|
||||||
@@ -463,13 +468,13 @@ volumeMounts:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
|
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
|
||||||
{{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
{{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
|
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
|
||||||
{{- fail "Found both gha-runner-scale-set-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
{{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if gt $multiNamespacesCounter 1 }}
|
{{- if gt $multiNamespacesCounter 1 }}
|
||||||
{{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
{{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $multiNamespacesCounter 1 }}
|
{{- if eq $multiNamespacesCounter 1 }}
|
||||||
{{- with $controllerDeployment.metadata }}
|
{{- with $controllerDeployment.metadata }}
|
||||||
@@ -482,11 +487,11 @@ volumeMounts:
|
|||||||
{{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }}
|
{{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- else }}
|
{{- else }}
|
||||||
{{- fail "No gha-runner-scale-set-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
{{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $managerServiceAccountName "" }}
|
{{- if eq $managerServiceAccountName "" }}
|
||||||
{{- fail "No service account name found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-name), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
{{- fail "No service account name found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-name), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- $managerServiceAccountName }}
|
{{- $managerServiceAccountName }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -508,7 +513,7 @@ volumeMounts:
|
|||||||
{{- $managerServiceAccountNamespace := "" }}
|
{{- $managerServiceAccountNamespace := "" }}
|
||||||
{{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }}
|
{{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }}
|
||||||
{{- if kindIs "map" $deployment.metadata.labels }}
|
{{- if kindIs "map" $deployment.metadata.labels }}
|
||||||
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-runner-scale-set-controller" }}
|
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-rs-controller" }}
|
||||||
{{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }}
|
{{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }}
|
||||||
{{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }}
|
{{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }}
|
||||||
{{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}}
|
{{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}}
|
||||||
@@ -520,13 +525,13 @@ volumeMounts:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
|
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
|
||||||
{{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
{{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
|
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
|
||||||
{{- fail "Found both gha-runner-scale-set-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
{{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if gt $multiNamespacesCounter 1 }}
|
{{- if gt $multiNamespacesCounter 1 }}
|
||||||
{{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
{{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $multiNamespacesCounter 1 }}
|
{{- if eq $multiNamespacesCounter 1 }}
|
||||||
{{- with $controllerDeployment.metadata }}
|
{{- with $controllerDeployment.metadata }}
|
||||||
@@ -539,11 +544,11 @@ volumeMounts:
|
|||||||
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
|
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- else }}
|
{{- else }}
|
||||||
{{- fail "No gha-runner-scale-set-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
{{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $managerServiceAccountNamespace "" }}
|
{{- if eq $managerServiceAccountNamespace "" }}
|
||||||
{{- fail "No service account namespace found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
{{- fail "No service account namespace found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- $managerServiceAccountNamespace }}
|
{{- $managerServiceAccountNamespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -119,7 +119,7 @@ spec:
|
|||||||
{{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }}
|
{{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- with .Values.template.spec.initContainers }}
|
{{- with .Values.template.spec.initContainers }}
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 6 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
containers:
|
containers:
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
actionsgithubcom "github.com/actions/actions-runner-controller/controllers/actions.github.com"
|
actionsgithubcom "github.com/actions/actions-runner-controller/controllers/actions.github.com"
|
||||||
"github.com/gruntwork-io/terratest/modules/helm"
|
"github.com/gruntwork-io/terratest/modules/helm"
|
||||||
"github.com/gruntwork-io/terratest/modules/k8s"
|
"github.com/gruntwork-io/terratest/modules/k8s"
|
||||||
|
"github.com/gruntwork-io/terratest/modules/logger"
|
||||||
"github.com/gruntwork-io/terratest/modules/random"
|
"github.com/gruntwork-io/terratest/modules/random"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@@ -28,6 +29,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -43,7 +45,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &githubSecret)
|
helm.UnmarshalK8SYaml(t, output, &githubSecret)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, githubSecret.Namespace)
|
assert.Equal(t, namespaceName, githubSecret.Namespace)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", githubSecret.Name)
|
assert.Equal(t, "test-runners-gha-rs-github-secret", githubSecret.Name)
|
||||||
assert.Equal(t, "gh_token12345", string(githubSecret.Data["github_token"]))
|
assert.Equal(t, "gh_token12345", string(githubSecret.Data["github_token"]))
|
||||||
assert.Equal(t, "actions.github.com/cleanup-protection", githubSecret.Finalizers[0])
|
assert.Equal(t, "actions.github.com/cleanup-protection", githubSecret.Finalizers[0])
|
||||||
}
|
}
|
||||||
@@ -59,6 +61,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_app_id": "10",
|
"githubConfigSecret.github_app_id": "10",
|
||||||
@@ -92,6 +95,7 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAuthInput(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_app_id": "",
|
"githubConfigSecret.github_app_id": "",
|
||||||
@@ -119,6 +123,7 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAppInput(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_app_id": "10",
|
"githubConfigSecret.github_app_id": "10",
|
||||||
@@ -145,6 +150,7 @@ func TestTemplateNotRenderedGitHubSecretWithPredefinedSecret(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "pre-defined-secret",
|
"githubConfigSecret": "pre-defined-secret",
|
||||||
@@ -169,6 +175,7 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -183,13 +190,13 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
|
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", serviceAccount.Name)
|
assert.Equal(t, "test-runners-gha-rs-no-permission", serviceAccount.Name)
|
||||||
|
|
||||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||||
var ars v1alpha1.AutoscalingRunnerSet
|
var ars v1alpha1.AutoscalingRunnerSet
|
||||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||||
|
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", ars.Spec.Template.Spec.ServiceAccountName)
|
assert.Equal(t, "test-runners-gha-rs-no-permission", ars.Spec.Template.Spec.ServiceAccountName)
|
||||||
assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) // no finalizer protections in place
|
assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) // no finalizer protections in place
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -204,6 +211,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -219,7 +227,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
|
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", serviceAccount.Name)
|
assert.Equal(t, "test-runners-gha-rs-kube-mode", serviceAccount.Name)
|
||||||
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
||||||
|
|
||||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
|
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
|
||||||
@@ -227,7 +235,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &role)
|
helm.UnmarshalK8SYaml(t, output, &role)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, role.Namespace)
|
assert.Equal(t, namespaceName, role.Namespace)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", role.Name)
|
assert.Equal(t, "test-runners-gha-rs-kube-mode", role.Name)
|
||||||
|
|
||||||
assert.Equal(t, "actions.github.com/cleanup-protection", role.Finalizers[0])
|
assert.Equal(t, "actions.github.com/cleanup-protection", role.Finalizers[0])
|
||||||
|
|
||||||
@@ -243,11 +251,11 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &roleBinding)
|
helm.UnmarshalK8SYaml(t, output, &roleBinding)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, roleBinding.Namespace)
|
assert.Equal(t, namespaceName, roleBinding.Namespace)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role-binding", roleBinding.Name)
|
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.Name)
|
||||||
assert.Len(t, roleBinding.Subjects, 1)
|
assert.Len(t, roleBinding.Subjects, 1)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", roleBinding.Subjects[0].Name)
|
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.Subjects[0].Name)
|
||||||
assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace)
|
assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.RoleRef.Name)
|
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.RoleRef.Name)
|
||||||
assert.Equal(t, "Role", roleBinding.RoleRef.Kind)
|
assert.Equal(t, "Role", roleBinding.RoleRef.Kind)
|
||||||
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
||||||
|
|
||||||
@@ -255,7 +263,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
|||||||
var ars v1alpha1.AutoscalingRunnerSet
|
var ars v1alpha1.AutoscalingRunnerSet
|
||||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||||
|
|
||||||
expectedServiceAccountName := "test-runners-gha-runner-scale-set-kube-mode-service-account"
|
expectedServiceAccountName := "test-runners-gha-rs-kube-mode"
|
||||||
assert.Equal(t, expectedServiceAccountName, ars.Spec.Template.Spec.ServiceAccountName)
|
assert.Equal(t, expectedServiceAccountName, ars.Spec.Template.Spec.ServiceAccountName)
|
||||||
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
|
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
|
||||||
}
|
}
|
||||||
@@ -271,6 +279,7 @@ func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -303,6 +312,7 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -320,14 +330,14 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
|
|||||||
assert.Equal(t, namespaceName, ars.Namespace)
|
assert.Equal(t, namespaceName, ars.Namespace)
|
||||||
assert.Equal(t, "test-runners", ars.Name)
|
assert.Equal(t, "test-runners", ars.Name)
|
||||||
|
|
||||||
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||||
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/part-of"])
|
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/part-of"])
|
||||||
assert.Equal(t, "autoscaling-runner-set", ars.Labels["app.kubernetes.io/component"])
|
assert.Equal(t, "autoscaling-runner-set", ars.Labels["app.kubernetes.io/component"])
|
||||||
assert.NotEmpty(t, ars.Labels["app.kubernetes.io/version"])
|
assert.NotEmpty(t, ars.Labels["app.kubernetes.io/version"])
|
||||||
|
|
||||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret)
|
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
||||||
|
|
||||||
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
||||||
|
|
||||||
@@ -354,6 +364,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -372,10 +383,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) {
|
|||||||
assert.Equal(t, namespaceName, ars.Namespace)
|
assert.Equal(t, namespaceName, ars.Namespace)
|
||||||
assert.Equal(t, "test-runners", ars.Name)
|
assert.Equal(t, "test-runners", ars.Name)
|
||||||
|
|
||||||
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret)
|
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
||||||
assert.Equal(t, "test-runner-scale-set-name", ars.Spec.RunnerScaleSetName)
|
assert.Equal(t, "test-runner-scale-set-name", ars.Spec.RunnerScaleSetName)
|
||||||
|
|
||||||
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
||||||
@@ -403,6 +414,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ProvideMetadata(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -450,6 +462,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MaxRunnersValidationError(t *testi
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -477,6 +490,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinRunnersValidationError(t *testi
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -505,6 +519,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationError(t *te
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -533,6 +548,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationSameValue(t
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -564,6 +580,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMin(t
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -594,6 +611,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMax(t
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -627,6 +645,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunners_FromValuesFile(t *te
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
ValuesFiles: []string{testValuesPath},
|
ValuesFiles: []string{testValuesPath},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -654,6 +673,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraVolumes(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"controllerServiceAccount.name": "arc",
|
"controllerServiceAccount.name": "arc",
|
||||||
"controllerServiceAccount.namespace": "arc-system",
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
@@ -674,6 +694,50 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraVolumes(t *testing.T) {
|
|||||||
assert.Equal(t, "/data", ars.Spec.Template.Spec.Volumes[2].HostPath.Path, "Volume host path should be /data")
|
assert.Equal(t, "/data", ars.Spec.Template.Spec.Volumes[2].HostPath.Path, "Volume host path should be /data")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraInitContainers(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
testValuesPath, err := filepath.Abs("../tests/values_dind_extra_init_containers.yaml")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-runners"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
|
ValuesFiles: []string{testValuesPath},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||||
|
|
||||||
|
var ars v1alpha1.AutoscalingRunnerSet
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||||
|
|
||||||
|
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 3, "InitContainers should be 3")
|
||||||
|
assert.Equal(t, "kube-init", ars.Spec.Template.Spec.InitContainers[1].Name, "InitContainers[1] Name should be kube-init")
|
||||||
|
assert.Equal(t, "runner-image:latest", ars.Spec.Template.Spec.InitContainers[1].Image, "InitContainers[1] Image should be runner-image:latest")
|
||||||
|
assert.Equal(t, "sudo", ars.Spec.Template.Spec.InitContainers[1].Command[0], "InitContainers[1] Command[0] should be sudo")
|
||||||
|
assert.Equal(t, "chown", ars.Spec.Template.Spec.InitContainers[1].Command[1], "InitContainers[1] Command[1] should be chown")
|
||||||
|
assert.Equal(t, "-R", ars.Spec.Template.Spec.InitContainers[1].Command[2], "InitContainers[1] Command[2] should be -R")
|
||||||
|
assert.Equal(t, "1001:123", ars.Spec.Template.Spec.InitContainers[1].Command[3], "InitContainers[1] Command[3] should be 1001:123")
|
||||||
|
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].Command[4], "InitContainers[1] Command[4] should be /home/runner/_work")
|
||||||
|
assert.Equal(t, "work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].Name, "InitContainers[1] VolumeMounts[0] Name should be work")
|
||||||
|
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].MountPath, "InitContainers[1] VolumeMounts[0] MountPath should be /home/runner/_work")
|
||||||
|
|
||||||
|
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Name, "InitContainers[2] Name should be ls")
|
||||||
|
assert.Equal(t, "ubuntu:latest", ars.Spec.Template.Spec.InitContainers[2].Image, "InitContainers[2] Image should be ubuntu:latest")
|
||||||
|
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[2] Command[0] should be ls")
|
||||||
|
}
|
||||||
|
|
||||||
func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
|
func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
@@ -688,6 +752,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"controllerServiceAccount.name": "arc",
|
"controllerServiceAccount.name": "arc",
|
||||||
"controllerServiceAccount.namespace": "arc-system",
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
@@ -724,6 +789,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_K8S_ExtraVolumes(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"controllerServiceAccount.name": "arc",
|
"controllerServiceAccount.name": "arc",
|
||||||
"controllerServiceAccount.namespace": "arc-system",
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
@@ -755,6 +821,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -773,10 +840,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
|
|||||||
assert.Equal(t, namespaceName, ars.Namespace)
|
assert.Equal(t, namespaceName, ars.Namespace)
|
||||||
assert.Equal(t, "test-runners", ars.Name)
|
assert.Equal(t, "test-runners", ars.Name)
|
||||||
|
|
||||||
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret)
|
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
||||||
|
|
||||||
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
||||||
|
|
||||||
@@ -846,6 +913,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T)
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -864,10 +932,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T)
|
|||||||
assert.Equal(t, namespaceName, ars.Namespace)
|
assert.Equal(t, namespaceName, ars.Namespace)
|
||||||
assert.Equal(t, "test-runners", ars.Name)
|
assert.Equal(t, "test-runners", ars.Name)
|
||||||
|
|
||||||
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret)
|
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
||||||
|
|
||||||
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
||||||
assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil")
|
assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil")
|
||||||
@@ -903,6 +971,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_UsePredefinedSecret(t *testing.T)
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "pre-defined-secrets",
|
"githubConfigSecret": "pre-defined-secrets",
|
||||||
@@ -920,7 +989,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_UsePredefinedSecret(t *testing.T)
|
|||||||
assert.Equal(t, namespaceName, ars.Namespace)
|
assert.Equal(t, namespaceName, ars.Namespace)
|
||||||
assert.Equal(t, "test-runners", ars.Name)
|
assert.Equal(t, "test-runners", ars.Name)
|
||||||
|
|
||||||
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||||
assert.Equal(t, "pre-defined-secrets", ars.Spec.GitHubConfigSecret)
|
assert.Equal(t, "pre-defined-secrets", ars.Spec.GitHubConfigSecret)
|
||||||
@@ -937,6 +1006,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ErrorOnEmptyPredefinedSecret(t *te
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "",
|
"githubConfigSecret": "",
|
||||||
@@ -963,6 +1033,7 @@ func TestTemplateRenderedWithProxy(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "pre-defined-secrets",
|
"githubConfigSecret": "pre-defined-secrets",
|
||||||
@@ -1026,6 +1097,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
|||||||
t.Run("providing githubServerTLS.runnerMountPath", func(t *testing.T) {
|
t.Run("providing githubServerTLS.runnerMountPath", func(t *testing.T) {
|
||||||
t.Run("mode: default", func(t *testing.T) {
|
t.Run("mode: default", func(t *testing.T) {
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "pre-defined-secrets",
|
"githubConfigSecret": "pre-defined-secrets",
|
||||||
@@ -1084,6 +1156,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("mode: dind", func(t *testing.T) {
|
t.Run("mode: dind", func(t *testing.T) {
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "pre-defined-secrets",
|
"githubConfigSecret": "pre-defined-secrets",
|
||||||
@@ -1143,6 +1216,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("mode: kubernetes", func(t *testing.T) {
|
t.Run("mode: kubernetes", func(t *testing.T) {
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "pre-defined-secrets",
|
"githubConfigSecret": "pre-defined-secrets",
|
||||||
@@ -1204,6 +1278,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
|||||||
t.Run("without providing githubServerTLS.runnerMountPath", func(t *testing.T) {
|
t.Run("without providing githubServerTLS.runnerMountPath", func(t *testing.T) {
|
||||||
t.Run("mode: default", func(t *testing.T) {
|
t.Run("mode: default", func(t *testing.T) {
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "pre-defined-secrets",
|
"githubConfigSecret": "pre-defined-secrets",
|
||||||
@@ -1258,6 +1333,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("mode: dind", func(t *testing.T) {
|
t.Run("mode: dind", func(t *testing.T) {
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "pre-defined-secrets",
|
"githubConfigSecret": "pre-defined-secrets",
|
||||||
@@ -1313,6 +1389,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("mode: kubernetes", func(t *testing.T) {
|
t.Run("mode: kubernetes", func(t *testing.T) {
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "pre-defined-secrets",
|
"githubConfigSecret": "pre-defined-secrets",
|
||||||
@@ -1402,6 +1479,7 @@ func TestTemplateNamingConstraints(t *testing.T) {
|
|||||||
for name, tc := range tt {
|
for name, tc := range tt {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: setValues,
|
SetValues: setValues,
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", tc.namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", tc.namespaceName),
|
||||||
}
|
}
|
||||||
@@ -1423,6 +1501,7 @@ func TestTemplateRenderedGitHubConfigUrlEndsWIthSlash(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions/",
|
"githubConfigUrl": "https://github.com/actions/",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -1453,6 +1532,7 @@ func TestTemplate_CreateManagerRole(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -1468,7 +1548,7 @@ func TestTemplate_CreateManagerRole(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &managerRole)
|
helm.UnmarshalK8SYaml(t, output, &managerRole)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
|
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
|
assert.Equal(t, "test-runners-gha-rs-manager", managerRole.Name)
|
||||||
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
|
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
|
||||||
assert.Equal(t, 6, len(managerRole.Rules))
|
assert.Equal(t, 6, len(managerRole.Rules))
|
||||||
|
|
||||||
@@ -1487,6 +1567,7 @@ func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -1503,7 +1584,7 @@ func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &managerRole)
|
helm.UnmarshalK8SYaml(t, output, &managerRole)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
|
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
|
assert.Equal(t, "test-runners-gha-rs-manager", managerRole.Name)
|
||||||
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
|
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
|
||||||
assert.Equal(t, 7, len(managerRole.Rules))
|
assert.Equal(t, 7, len(managerRole.Rules))
|
||||||
assert.Equal(t, "configmaps", managerRole.Rules[6].Resources[0])
|
assert.Equal(t, "configmaps", managerRole.Rules[6].Resources[0])
|
||||||
@@ -1520,6 +1601,7 @@ func TestTemplate_CreateManagerRoleBinding(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -1535,8 +1617,8 @@ func TestTemplate_CreateManagerRoleBinding(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &managerRoleBinding)
|
helm.UnmarshalK8SYaml(t, output, &managerRoleBinding)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, managerRoleBinding.Namespace, "namespace should match the namespace of the Helm release")
|
assert.Equal(t, namespaceName, managerRoleBinding.Namespace, "namespace should match the namespace of the Helm release")
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role-binding", managerRoleBinding.Name)
|
assert.Equal(t, "test-runners-gha-rs-manager", managerRoleBinding.Name)
|
||||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRoleBinding.RoleRef.Name)
|
assert.Equal(t, "test-runners-gha-rs-manager", managerRoleBinding.RoleRef.Name)
|
||||||
assert.Equal(t, "actions.github.com/cleanup-protection", managerRoleBinding.Finalizers[0])
|
assert.Equal(t, "actions.github.com/cleanup-protection", managerRoleBinding.Finalizers[0])
|
||||||
assert.Equal(t, "arc", managerRoleBinding.Subjects[0].Name)
|
assert.Equal(t, "arc", managerRoleBinding.Subjects[0].Name)
|
||||||
assert.Equal(t, "arc-system", managerRoleBinding.Subjects[0].Namespace)
|
assert.Equal(t, "arc-system", managerRoleBinding.Subjects[0].Namespace)
|
||||||
@@ -1556,6 +1638,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraContainers(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"controllerServiceAccount.name": "arc",
|
"controllerServiceAccount.name": "arc",
|
||||||
"controllerServiceAccount.namespace": "arc-system",
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
@@ -1603,6 +1686,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraPodSpec(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"controllerServiceAccount.name": "arc",
|
"controllerServiceAccount.name": "arc",
|
||||||
"controllerServiceAccount.namespace": "arc-system",
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
@@ -1636,6 +1720,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinDMergePodSpec(t *testing.T) {
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"controllerServiceAccount.name": "arc",
|
"controllerServiceAccount.name": "arc",
|
||||||
"controllerServiceAccount.namespace": "arc-system",
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
@@ -1681,6 +1766,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_KubeModeMergePodSpec(t *testing.T)
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"controllerServiceAccount.name": "arc",
|
"controllerServiceAccount.name": "arc",
|
||||||
"controllerServiceAccount.namespace": "arc-system",
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
@@ -1722,6 +1808,7 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing.
|
|||||||
|
|
||||||
annotationExpectedTests := map[string]*helm.Options{
|
annotationExpectedTests := map[string]*helm.Options{
|
||||||
"GitHub token": {
|
"GitHub token": {
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -1731,6 +1818,7 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing.
|
|||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
},
|
},
|
||||||
"GitHub app": {
|
"GitHub app": {
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_app_id": "10",
|
"githubConfigSecret.github_app_id": "10",
|
||||||
@@ -1755,6 +1843,7 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing.
|
|||||||
|
|
||||||
t.Run("Annotation should not be set", func(t *testing.T) {
|
t.Run("Annotation should not be set", func(t *testing.T) {
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret": "pre-defined-secret",
|
"githubConfigSecret": "pre-defined-secret",
|
||||||
@@ -1782,6 +1871,7 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t
|
|||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
@@ -1797,12 +1887,12 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t
|
|||||||
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||||
|
|
||||||
annotationValues := map[string]string{
|
annotationValues := map[string]string{
|
||||||
actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-runner-scale-set-github-secret",
|
actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-rs-github-secret",
|
||||||
actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-runner-scale-set-manager-role",
|
actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-rs-manager",
|
||||||
actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-runner-scale-set-manager-role-binding",
|
actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-rs-manager",
|
||||||
actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-runner-scale-set-kube-mode-service-account",
|
actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-rs-kube-mode",
|
||||||
actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-runner-scale-set-kube-mode-role",
|
actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-rs-kube-mode",
|
||||||
actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-runner-scale-set-kube-mode-role-binding",
|
actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-rs-kube-mode",
|
||||||
}
|
}
|
||||||
|
|
||||||
for annotation, value := range annotationValues {
|
for annotation, value := range annotationValues {
|
||||||
|
|||||||
@@ -0,0 +1,17 @@
|
|||||||
|
githubConfigUrl: https://github.com/actions/actions-runner-controller
|
||||||
|
githubConfigSecret:
|
||||||
|
github_token: test
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
initContainers:
|
||||||
|
- name: kube-init
|
||||||
|
image: runner-image:latest
|
||||||
|
command: ["sudo", "chown", "-R", "1001:123", "/home/runner/_work"]
|
||||||
|
volumeMounts:
|
||||||
|
- name: work
|
||||||
|
mountPath: /home/runner/_work
|
||||||
|
- name: ls
|
||||||
|
image: ubuntu:latest
|
||||||
|
command: ["ls"]
|
||||||
|
containerMode:
|
||||||
|
type: dind
|
||||||
@@ -68,6 +68,12 @@ githubConfigSecret:
|
|||||||
# key: ca.crt
|
# key: ca.crt
|
||||||
# runnerMountPath: /usr/local/share/ca-certificates/
|
# runnerMountPath: /usr/local/share/ca-certificates/
|
||||||
|
|
||||||
|
## Container mode is is an object that provides out-of-box configuration
|
||||||
|
## for dind and kubernetes mode. Template will be modified as documented under the
|
||||||
|
## template object.
|
||||||
|
##
|
||||||
|
## If any customization is required for dind or kubernetes mode, containerMode should remain
|
||||||
|
## empty, and configuration should be applied to the template.
|
||||||
# containerMode:
|
# containerMode:
|
||||||
# type: "dind" ## type can be set to dind or kubernetes
|
# type: "dind" ## type can be set to dind or kubernetes
|
||||||
# ## the following is required when containerMode.type=kubernetes
|
# ## the following is required when containerMode.type=kubernetes
|
||||||
|
|||||||
@@ -114,7 +114,14 @@ func createSession(ctx context.Context, logger *logr.Logger, client actions.Acti
|
|||||||
return runnerScaleSetSession, initialMessage, nil
|
return runnerScaleSetSession, initialMessage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return runnerScaleSetSession, nil, nil
|
initialMessage := &actions.RunnerScaleSetMessage{
|
||||||
|
MessageId: 0,
|
||||||
|
MessageType: "RunnerScaleSetJobMessages",
|
||||||
|
Statistics: runnerScaleSetSession.Statistics,
|
||||||
|
Body: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
return runnerScaleSetSession, initialMessage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *AutoScalerClient) Close() error {
|
func (m *AutoScalerClient) Close() error {
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ func TestCreateSession(t *testing.T) {
|
|||||||
|
|
||||||
require.NoError(t, err, "Error creating autoscaler client")
|
require.NoError(t, err, "Error creating autoscaler client")
|
||||||
assert.Equal(t, session, session, "Session is not correct")
|
assert.Equal(t, session, session, "Session is not correct")
|
||||||
assert.Nil(t, asClient.initialMessage, "Initial message should be nil")
|
assert.NotNil(t, asClient.initialMessage, "Initial message should not be nil")
|
||||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0")
|
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0")
|
||||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||||
}
|
}
|
||||||
@@ -188,7 +188,7 @@ func TestCreateSession_RetrySessionConflict(t *testing.T) {
|
|||||||
|
|
||||||
require.NoError(t, err, "Error creating autoscaler client")
|
require.NoError(t, err, "Error creating autoscaler client")
|
||||||
assert.Equal(t, session, session, "Session is not correct")
|
assert.Equal(t, session, session, "Session is not correct")
|
||||||
assert.Nil(t, asClient.initialMessage, "Initial message should be nil")
|
assert.NotNil(t, asClient.initialMessage, "Initial message should not be nil")
|
||||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0")
|
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0")
|
||||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||||
}
|
}
|
||||||
@@ -334,6 +334,14 @@ func TestGetRunnerScaleSetMessage(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
|
assert.NoError(t, err, "Error getting message")
|
||||||
|
assert.Equal(t, int64(0), asClient.lastMessageId, "Initial message")
|
||||||
|
|
||||||
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
assert.NoError(t, err, "Error getting message")
|
assert.NoError(t, err, "Error getting message")
|
||||||
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
|
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
|
||||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||||
@@ -371,13 +379,21 @@ func TestGetRunnerScaleSetMessage_HandleFailed(t *testing.T) {
|
|||||||
})
|
})
|
||||||
require.NoError(t, err, "Error creating autoscaler client")
|
require.NoError(t, err, "Error creating autoscaler client")
|
||||||
|
|
||||||
|
// read initial message
|
||||||
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.NoError(t, err, "Error getting message")
|
||||||
|
|
||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
return fmt.Errorf("error")
|
return fmt.Errorf("error")
|
||||||
})
|
})
|
||||||
|
|
||||||
assert.ErrorContains(t, err, "handle message failed. error", "Error getting message")
|
assert.ErrorContains(t, err, "handle message failed. error", "Error getting message")
|
||||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated")
|
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should not be updated")
|
||||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||||
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
|
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
|
||||||
}
|
}
|
||||||
@@ -513,6 +529,12 @@ func TestGetRunnerScaleSetMessage_RetryUntilGetMessage(t *testing.T) {
|
|||||||
})
|
})
|
||||||
require.NoError(t, err, "Error creating autoscaler client")
|
require.NoError(t, err, "Error creating autoscaler client")
|
||||||
|
|
||||||
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
assert.NoError(t, err, "Error getting initial message")
|
||||||
|
|
||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
return nil
|
return nil
|
||||||
@@ -550,6 +572,12 @@ func TestGetRunnerScaleSetMessage_ErrorOnGetMessage(t *testing.T) {
|
|||||||
})
|
})
|
||||||
require.NoError(t, err, "Error creating autoscaler client")
|
require.NoError(t, err, "Error creating autoscaler client")
|
||||||
|
|
||||||
|
// process initial message
|
||||||
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
assert.NoError(t, err, "Error getting initial message")
|
||||||
|
|
||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
return fmt.Errorf("Should not be called")
|
return fmt.Errorf("Should not be called")
|
||||||
})
|
})
|
||||||
@@ -592,6 +620,12 @@ func TestDeleteRunnerScaleSetMessage_Error(t *testing.T) {
|
|||||||
})
|
})
|
||||||
require.NoError(t, err, "Error creating autoscaler client")
|
require.NoError(t, err, "Error creating autoscaler client")
|
||||||
|
|
||||||
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
assert.NoError(t, err, "Error getting initial message")
|
||||||
|
|
||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -25,6 +26,31 @@ type Service struct {
|
|||||||
kubeManager KubernetesManager
|
kubeManager KubernetesManager
|
||||||
settings *ScaleSettings
|
settings *ScaleSettings
|
||||||
currentRunnerCount int
|
currentRunnerCount int
|
||||||
|
metricsExporter metricsExporter
|
||||||
|
errs []error
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithPrometheusMetrics(conf RunnerScaleSetListenerConfig) func(*Service) {
|
||||||
|
return func(svc *Service) {
|
||||||
|
parsedURL, err := actions.ParseGitHubConfigFromURL(conf.ConfigureUrl)
|
||||||
|
if err != nil {
|
||||||
|
svc.errs = append(svc.errs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
svc.metricsExporter.withBaseLabels(baseLabels{
|
||||||
|
scaleSetName: conf.EphemeralRunnerSetName,
|
||||||
|
scaleSetNamespace: conf.EphemeralRunnerSetNamespace,
|
||||||
|
enterprise: parsedURL.Enterprise,
|
||||||
|
organization: parsedURL.Organization,
|
||||||
|
repository: parsedURL.Repository,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithLogger(logger logr.Logger) func(*Service) {
|
||||||
|
return func(s *Service) {
|
||||||
|
s.logger = logger.WithName("service")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewService(
|
func NewService(
|
||||||
@@ -33,13 +59,13 @@ func NewService(
|
|||||||
manager KubernetesManager,
|
manager KubernetesManager,
|
||||||
settings *ScaleSettings,
|
settings *ScaleSettings,
|
||||||
options ...func(*Service),
|
options ...func(*Service),
|
||||||
) *Service {
|
) (*Service, error) {
|
||||||
s := &Service{
|
s := &Service{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
rsClient: rsClient,
|
rsClient: rsClient,
|
||||||
kubeManager: manager,
|
kubeManager: manager,
|
||||||
settings: settings,
|
settings: settings,
|
||||||
currentRunnerCount: 0,
|
currentRunnerCount: -1, // force patch on startup
|
||||||
logger: logr.FromContextOrDiscard(ctx),
|
logger: logr.FromContextOrDiscard(ctx),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,18 +73,14 @@ func NewService(
|
|||||||
option(s)
|
option(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
return s
|
if len(s.errs) > 0 {
|
||||||
|
return nil, errors.Join(s.errs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) Start() error {
|
func (s *Service) Start() error {
|
||||||
if s.settings.MinRunners > 0 {
|
|
||||||
s.logger.Info("scale to match minimal runners.")
|
|
||||||
err := s.scaleForAssignedJobCount(0)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("could not scale to match minimal runners. %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
s.logger.Info("waiting for message...")
|
s.logger.Info("waiting for message...")
|
||||||
select {
|
select {
|
||||||
@@ -89,11 +111,17 @@ func (s *Service) processMessage(message *actions.RunnerScaleSetMessage) error {
|
|||||||
"busy runners", message.Statistics.TotalBusyRunners,
|
"busy runners", message.Statistics.TotalBusyRunners,
|
||||||
"idle runners", message.Statistics.TotalIdleRunners)
|
"idle runners", message.Statistics.TotalIdleRunners)
|
||||||
|
|
||||||
|
s.metricsExporter.publishStatistics(message.Statistics)
|
||||||
|
|
||||||
if message.MessageType != "RunnerScaleSetJobMessages" {
|
if message.MessageType != "RunnerScaleSetJobMessages" {
|
||||||
s.logger.Info("skip message with unknown message type.", "messageType", message.MessageType)
|
s.logger.Info("skip message with unknown message type.", "messageType", message.MessageType)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if message.MessageId == 0 && message.Body == "" { // initial message with statistics only
|
||||||
|
return s.scaleForAssignedJobCount(message.Statistics.TotalAssignedJobs)
|
||||||
|
}
|
||||||
|
|
||||||
var batchedMessages []json.RawMessage
|
var batchedMessages []json.RawMessage
|
||||||
if err := json.NewDecoder(strings.NewReader(message.Body)).Decode(&batchedMessages); err != nil {
|
if err := json.NewDecoder(strings.NewReader(message.Body)).Decode(&batchedMessages); err != nil {
|
||||||
return fmt.Errorf("could not decode job messages. %w", err)
|
return fmt.Errorf("could not decode job messages. %w", err)
|
||||||
@@ -114,27 +142,54 @@ func (s *Service) processMessage(message *actions.RunnerScaleSetMessage) error {
|
|||||||
if err := json.Unmarshal(message, &jobAvailable); err != nil {
|
if err := json.Unmarshal(message, &jobAvailable); err != nil {
|
||||||
return fmt.Errorf("could not decode job available message. %w", err)
|
return fmt.Errorf("could not decode job available message. %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Info("job available message received.", "RequestId", jobAvailable.RunnerRequestId)
|
s.logger.Info(
|
||||||
|
"job available message received.",
|
||||||
|
"RequestId",
|
||||||
|
jobAvailable.RunnerRequestId,
|
||||||
|
)
|
||||||
availableJobs = append(availableJobs, jobAvailable.RunnerRequestId)
|
availableJobs = append(availableJobs, jobAvailable.RunnerRequestId)
|
||||||
case "JobAssigned":
|
case "JobAssigned":
|
||||||
var jobAssigned actions.JobAssigned
|
var jobAssigned actions.JobAssigned
|
||||||
if err := json.Unmarshal(message, &jobAssigned); err != nil {
|
if err := json.Unmarshal(message, &jobAssigned); err != nil {
|
||||||
return fmt.Errorf("could not decode job assigned message. %w", err)
|
return fmt.Errorf("could not decode job assigned message. %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Info("job assigned message received.", "RequestId", jobAssigned.RunnerRequestId)
|
s.logger.Info(
|
||||||
|
"job assigned message received.",
|
||||||
|
"RequestId",
|
||||||
|
jobAssigned.RunnerRequestId,
|
||||||
|
)
|
||||||
|
// s.metricsExporter.publishJobAssigned(&jobAssigned)
|
||||||
case "JobStarted":
|
case "JobStarted":
|
||||||
var jobStarted actions.JobStarted
|
var jobStarted actions.JobStarted
|
||||||
if err := json.Unmarshal(message, &jobStarted); err != nil {
|
if err := json.Unmarshal(message, &jobStarted); err != nil {
|
||||||
return fmt.Errorf("could not decode job started message. %w", err)
|
return fmt.Errorf("could not decode job started message. %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Info("job started message received.", "RequestId", jobStarted.RunnerRequestId, "RunnerId", jobStarted.RunnerId)
|
s.logger.Info(
|
||||||
|
"job started message received.",
|
||||||
|
"RequestId",
|
||||||
|
jobStarted.RunnerRequestId,
|
||||||
|
"RunnerId",
|
||||||
|
jobStarted.RunnerId,
|
||||||
|
)
|
||||||
|
s.metricsExporter.publishJobStarted(&jobStarted)
|
||||||
s.updateJobInfoForRunner(jobStarted)
|
s.updateJobInfoForRunner(jobStarted)
|
||||||
case "JobCompleted":
|
case "JobCompleted":
|
||||||
var jobCompleted actions.JobCompleted
|
var jobCompleted actions.JobCompleted
|
||||||
if err := json.Unmarshal(message, &jobCompleted); err != nil {
|
if err := json.Unmarshal(message, &jobCompleted); err != nil {
|
||||||
return fmt.Errorf("could not decode job completed message. %w", err)
|
return fmt.Errorf("could not decode job completed message. %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Info("job completed message received.", "RequestId", jobCompleted.RunnerRequestId, "Result", jobCompleted.Result, "RunnerId", jobCompleted.RunnerId, "RunnerName", jobCompleted.RunnerName)
|
s.logger.Info(
|
||||||
|
"job completed message received.",
|
||||||
|
"RequestId",
|
||||||
|
jobCompleted.RunnerRequestId,
|
||||||
|
"Result",
|
||||||
|
jobCompleted.Result,
|
||||||
|
"RunnerId",
|
||||||
|
jobCompleted.RunnerId,
|
||||||
|
"RunnerName",
|
||||||
|
jobCompleted.RunnerName,
|
||||||
|
)
|
||||||
|
s.metricsExporter.publishJobCompleted(&jobCompleted)
|
||||||
default:
|
default:
|
||||||
s.logger.Info("unknown job message type.", "messageType", messageType.MessageType)
|
s.logger.Info("unknown job message type.", "messageType", messageType.MessageType)
|
||||||
}
|
}
|
||||||
@@ -150,13 +205,15 @@ func (s *Service) processMessage(message *actions.RunnerScaleSetMessage) error {
|
|||||||
|
|
||||||
func (s *Service) scaleForAssignedJobCount(count int) error {
|
func (s *Service) scaleForAssignedJobCount(count int) error {
|
||||||
targetRunnerCount := int(math.Max(math.Min(float64(s.settings.MaxRunners), float64(count)), float64(s.settings.MinRunners)))
|
targetRunnerCount := int(math.Max(math.Min(float64(s.settings.MaxRunners), float64(count)), float64(s.settings.MinRunners)))
|
||||||
|
s.metricsExporter.publishDesiredRunners(targetRunnerCount)
|
||||||
if targetRunnerCount != s.currentRunnerCount {
|
if targetRunnerCount != s.currentRunnerCount {
|
||||||
s.logger.Info("try scale runner request up/down base on assigned job count",
|
s.logger.Info("try scale runner request up/down base on assigned job count",
|
||||||
"assigned job", count,
|
"assigned job", count,
|
||||||
"decision", targetRunnerCount,
|
"decision", targetRunnerCount,
|
||||||
"min", s.settings.MinRunners,
|
"min", s.settings.MinRunners,
|
||||||
"max", s.settings.MaxRunners,
|
"max", s.settings.MaxRunners,
|
||||||
"currentRunnerCount", s.currentRunnerCount)
|
"currentRunnerCount", s.currentRunnerCount,
|
||||||
|
)
|
||||||
err := s.kubeManager.ScaleEphemeralRunnerSet(s.ctx, s.settings.Namespace, s.settings.ResourceName, targetRunnerCount)
|
err := s.kubeManager.ScaleEphemeralRunnerSet(s.ctx, s.settings.Namespace, s.settings.ResourceName, targetRunnerCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not scale ephemeral runner set (%s/%s). %w", s.settings.Namespace, s.settings.ResourceName, err)
|
return fmt.Errorf("could not scale ephemeral runner set (%s/%s). %w", s.settings.Namespace, s.settings.ResourceName, err)
|
||||||
@@ -177,7 +234,8 @@ func (s *Service) updateJobInfoForRunner(jobInfo actions.JobStarted) {
|
|||||||
"workflowRef", jobInfo.JobWorkflowRef,
|
"workflowRef", jobInfo.JobWorkflowRef,
|
||||||
"workflowRunId", jobInfo.WorkflowRunId,
|
"workflowRunId", jobInfo.WorkflowRunId,
|
||||||
"jobDisplayName", jobInfo.JobDisplayName,
|
"jobDisplayName", jobInfo.JobDisplayName,
|
||||||
"requestId", jobInfo.RunnerRequestId)
|
"requestId", jobInfo.RunnerRequestId,
|
||||||
|
)
|
||||||
err := s.kubeManager.UpdateEphemeralRunnerWithJobInfo(s.ctx, s.settings.Namespace, jobInfo.RunnerName, jobInfo.OwnerName, jobInfo.RepositoryName, jobInfo.JobWorkflowRef, jobInfo.JobDisplayName, jobInfo.WorkflowRunId, jobInfo.RunnerRequestId)
|
err := s.kubeManager.UpdateEphemeralRunnerWithJobInfo(s.ctx, s.settings.Namespace, jobInfo.RunnerName, jobInfo.OwnerName, jobInfo.RepositoryName, jobInfo.JobWorkflowRef, jobInfo.JobDisplayName, jobInfo.WorkflowRunId, jobInfo.RunnerRequestId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Error(err, "could not update ephemeral runner with job info", "runnerName", jobInfo.RunnerName, "requestId", jobInfo.RunnerRequestId)
|
s.logger.Error(err, "could not update ephemeral runner with job info", "runnerName", jobInfo.RunnerName, "requestId", jobInfo.RunnerRequestId)
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ func TestNewService(t *testing.T) {
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -36,6 +36,7 @@ func TestNewService(t *testing.T) {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
assert.Equal(t, logger, service.logger)
|
assert.Equal(t, logger, service.logger)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,7 +48,7 @@ func TestStart(t *testing.T) {
|
|||||||
require.NoError(t, log_err, "Error creating logger")
|
require.NoError(t, log_err, "Error creating logger")
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -61,9 +62,11 @@ func TestStart(t *testing.T) {
|
|||||||
s.logger = logger
|
s.logger = logger
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||||
|
|
||||||
err := service.Start()
|
err = service.Start()
|
||||||
|
|
||||||
assert.NoError(t, err, "Unexpected error")
|
assert.NoError(t, err, "Unexpected error")
|
||||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||||
@@ -72,13 +75,14 @@ func TestStart(t *testing.T) {
|
|||||||
|
|
||||||
func TestStart_ScaleToMinRunners(t *testing.T) {
|
func TestStart_ScaleToMinRunners(t *testing.T) {
|
||||||
mockRsClient := &MockRunnerScaleSetClient{}
|
mockRsClient := &MockRunnerScaleSetClient{}
|
||||||
|
|
||||||
mockKubeManager := &MockKubernetesManager{}
|
mockKubeManager := &MockKubernetesManager{}
|
||||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||||
logger = logger.WithName(t.Name())
|
logger = logger.WithName(t.Name())
|
||||||
require.NoError(t, log_err, "Error creating logger")
|
require.NoError(t, log_err, "Error creating logger")
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -92,11 +96,17 @@ func TestStart_ScaleToMinRunners(t *testing.T) {
|
|||||||
s.logger = logger
|
s.logger = logger
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything).Run(func(args mock.Arguments) {
|
||||||
|
_ = service.scaleForAssignedJobCount(5)
|
||||||
|
}).Return(nil)
|
||||||
|
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||||
|
|
||||||
err := service.Start()
|
err = service.Start()
|
||||||
|
|
||||||
assert.NoError(t, err, "Unexpected error")
|
assert.NoError(t, err, "Unexpected error")
|
||||||
|
|
||||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||||
}
|
}
|
||||||
@@ -110,7 +120,7 @@ func TestStart_ScaleToMinRunnersFailed(t *testing.T) {
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -124,11 +134,16 @@ func TestStart_ScaleToMinRunnersFailed(t *testing.T) {
|
|||||||
s.logger = logger
|
s.logger = logger
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(fmt.Errorf("error")).Once()
|
require.NoError(t, err)
|
||||||
|
|
||||||
err := service.Start()
|
c := mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(fmt.Errorf("error")).Once()
|
||||||
|
mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything).Run(func(args mock.Arguments) {
|
||||||
|
_ = service.scaleForAssignedJobCount(5)
|
||||||
|
}).Return(c.ReturnArguments.Get(0))
|
||||||
|
|
||||||
assert.ErrorContains(t, err, "could not scale to match minimal runners", "Unexpected error")
|
err = service.Start()
|
||||||
|
|
||||||
|
assert.ErrorContains(t, err, "could not get and process message", "Unexpected error")
|
||||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||||
}
|
}
|
||||||
@@ -141,7 +156,7 @@ func TestStart_GetMultipleMessages(t *testing.T) {
|
|||||||
require.NoError(t, log_err, "Error creating logger")
|
require.NoError(t, log_err, "Error creating logger")
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -155,10 +170,12 @@ func TestStart_GetMultipleMessages(t *testing.T) {
|
|||||||
s.logger = logger
|
s.logger = logger
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(nil).Times(5)
|
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(nil).Times(5)
|
||||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||||
|
|
||||||
err := service.Start()
|
err = service.Start()
|
||||||
|
|
||||||
assert.NoError(t, err, "Unexpected error")
|
assert.NoError(t, err, "Unexpected error")
|
||||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||||
@@ -174,7 +191,7 @@ func TestStart_ErrorOnMessage(t *testing.T) {
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -188,10 +205,12 @@ func TestStart_ErrorOnMessage(t *testing.T) {
|
|||||||
s.logger = logger
|
s.logger = logger
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(nil).Times(2)
|
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(nil).Times(2)
|
||||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(fmt.Errorf("error")).Once()
|
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(fmt.Errorf("error")).Once()
|
||||||
|
|
||||||
err := service.Start()
|
err = service.Start()
|
||||||
|
|
||||||
assert.ErrorContains(t, err, "could not get and process message. error", "Unexpected error")
|
assert.ErrorContains(t, err, "could not get and process message. error", "Unexpected error")
|
||||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||||
@@ -207,7 +226,7 @@ func TestProcessMessage_NoStatistic(t *testing.T) {
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -221,8 +240,9 @@ func TestProcessMessage_NoStatistic(t *testing.T) {
|
|||||||
s.logger = logger
|
s.logger = logger
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
MessageType: "test",
|
MessageType: "test",
|
||||||
Body: "test",
|
Body: "test",
|
||||||
@@ -242,7 +262,7 @@ func TestProcessMessage_IgnoreUnknownMessageType(t *testing.T) {
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -256,8 +276,9 @@ func TestProcessMessage_IgnoreUnknownMessageType(t *testing.T) {
|
|||||||
s.logger = logger
|
s.logger = logger
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
MessageType: "unknown",
|
MessageType: "unknown",
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{
|
Statistics: &actions.RunnerScaleSetStatistic{
|
||||||
@@ -280,7 +301,7 @@ func TestProcessMessage_InvalidBatchMessageJson(t *testing.T) {
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -295,7 +316,9 @@ func TestProcessMessage_InvalidBatchMessageJson(t *testing.T) {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
MessageType: "RunnerScaleSetJobMessages",
|
MessageType: "RunnerScaleSetJobMessages",
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{
|
Statistics: &actions.RunnerScaleSetStatistic{
|
||||||
@@ -318,7 +341,7 @@ func TestProcessMessage_InvalidJobMessageJson(t *testing.T) {
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -332,8 +355,9 @@ func TestProcessMessage_InvalidJobMessageJson(t *testing.T) {
|
|||||||
s.logger = logger
|
s.logger = logger
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
MessageType: "RunnerScaleSetJobMessages",
|
MessageType: "RunnerScaleSetJobMessages",
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{
|
Statistics: &actions.RunnerScaleSetStatistic{
|
||||||
@@ -356,7 +380,7 @@ func TestProcessMessage_MultipleMessages(t *testing.T) {
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -370,10 +394,12 @@ func TestProcessMessage_MultipleMessages(t *testing.T) {
|
|||||||
s.logger = logger
|
s.logger = logger
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 3 && ids[1] == 4 })).Return(nil).Once()
|
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 3 && ids[1] == 4 })).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||||
|
|
||||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
MessageType: "RunnerScaleSetJobMessages",
|
MessageType: "RunnerScaleSetJobMessages",
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{
|
Statistics: &actions.RunnerScaleSetStatistic{
|
||||||
@@ -397,7 +423,7 @@ func TestProcessMessage_AcquireJobsFailed(t *testing.T) {
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -411,9 +437,11 @@ func TestProcessMessage_AcquireJobsFailed(t *testing.T) {
|
|||||||
s.logger = logger
|
s.logger = logger
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 })).Return(fmt.Errorf("error")).Once()
|
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 })).Return(fmt.Errorf("error")).Once()
|
||||||
|
|
||||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
MessageType: "RunnerScaleSetJobMessages",
|
MessageType: "RunnerScaleSetJobMessages",
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{
|
Statistics: &actions.RunnerScaleSetStatistic{
|
||||||
@@ -437,7 +465,7 @@ func TestScaleForAssignedJobCount_DeDupScale(t *testing.T) {
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -451,9 +479,11 @@ func TestScaleForAssignedJobCount_DeDupScale(t *testing.T) {
|
|||||||
s.logger = logger
|
s.logger = logger
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil).Once()
|
||||||
|
|
||||||
err := service.scaleForAssignedJobCount(2)
|
err = service.scaleForAssignedJobCount(2)
|
||||||
require.NoError(t, err, "Unexpected error")
|
require.NoError(t, err, "Unexpected error")
|
||||||
err = service.scaleForAssignedJobCount(2)
|
err = service.scaleForAssignedJobCount(2)
|
||||||
require.NoError(t, err, "Unexpected error")
|
require.NoError(t, err, "Unexpected error")
|
||||||
@@ -476,7 +506,7 @@ func TestScaleForAssignedJobCount_ScaleWithinMinMax(t *testing.T) {
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -490,13 +520,15 @@ func TestScaleForAssignedJobCount_ScaleWithinMinMax(t *testing.T) {
|
|||||||
s.logger = logger
|
s.logger = logger
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
||||||
|
|
||||||
err := service.scaleForAssignedJobCount(0)
|
err = service.scaleForAssignedJobCount(0)
|
||||||
require.NoError(t, err, "Unexpected error")
|
require.NoError(t, err, "Unexpected error")
|
||||||
err = service.scaleForAssignedJobCount(3)
|
err = service.scaleForAssignedJobCount(3)
|
||||||
require.NoError(t, err, "Unexpected error")
|
require.NoError(t, err, "Unexpected error")
|
||||||
@@ -521,7 +553,7 @@ func TestScaleForAssignedJobCount_ScaleFailed(t *testing.T) {
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -535,9 +567,11 @@ func TestScaleForAssignedJobCount_ScaleFailed(t *testing.T) {
|
|||||||
s.logger = logger
|
s.logger = logger
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(fmt.Errorf("error"))
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(fmt.Errorf("error"))
|
||||||
|
|
||||||
err := service.scaleForAssignedJobCount(2)
|
err = service.scaleForAssignedJobCount(2)
|
||||||
|
|
||||||
assert.ErrorContains(t, err, "could not scale ephemeral runner set (namespace/resource). error", "Unexpected error")
|
assert.ErrorContains(t, err, "could not scale ephemeral runner set (namespace/resource). error", "Unexpected error")
|
||||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||||
@@ -553,7 +587,7 @@ func TestProcessMessage_JobStartedMessage(t *testing.T) {
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -567,12 +601,14 @@ func TestProcessMessage_JobStartedMessage(t *testing.T) {
|
|||||||
s.logger = logger
|
s.logger = logger
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
service.currentRunnerCount = 1
|
service.currentRunnerCount = 1
|
||||||
|
|
||||||
mockKubeManager.On("UpdateEphemeralRunnerWithJobInfo", ctx, service.settings.Namespace, "runner1", "owner1", "repo1", ".github/workflows/ci.yaml", "job1", int64(100), int64(3)).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
mockKubeManager.On("UpdateEphemeralRunnerWithJobInfo", ctx, service.settings.Namespace, "runner1", "owner1", "repo1", ".github/workflows/ci.yaml", "job1", int64(100), int64(3)).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
|
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
|
||||||
|
|
||||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
MessageType: "RunnerScaleSetJobMessages",
|
MessageType: "RunnerScaleSetJobMessages",
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{
|
Statistics: &actions.RunnerScaleSetStatistic{
|
||||||
@@ -596,7 +632,7 @@ func TestProcessMessage_JobStartedMessageIgnoreRunnerUpdateError(t *testing.T) {
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
service := NewService(
|
service, err := NewService(
|
||||||
ctx,
|
ctx,
|
||||||
mockRsClient,
|
mockRsClient,
|
||||||
mockKubeManager,
|
mockKubeManager,
|
||||||
@@ -610,12 +646,14 @@ func TestProcessMessage_JobStartedMessageIgnoreRunnerUpdateError(t *testing.T) {
|
|||||||
s.logger = logger
|
s.logger = logger
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
service.currentRunnerCount = 1
|
service.currentRunnerCount = 1
|
||||||
|
|
||||||
mockKubeManager.On("UpdateEphemeralRunnerWithJobInfo", ctx, service.settings.Namespace, "runner1", "owner1", "repo1", ".github/workflows/ci.yaml", "job1", int64(100), int64(3)).Run(func(args mock.Arguments) { cancel() }).Return(fmt.Errorf("error")).Once()
|
mockKubeManager.On("UpdateEphemeralRunnerWithJobInfo", ctx, service.settings.Namespace, "runner1", "owner1", "repo1", ".github/workflows/ci.yaml", "job1", int64(100), int64(3)).Run(func(args mock.Arguments) { cancel() }).Return(fmt.Errorf("error")).Once()
|
||||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
|
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
|
||||||
|
|
||||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
MessageType: "RunnerScaleSetJobMessages",
|
MessageType: "RunnerScaleSetJobMessages",
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{
|
Statistics: &actions.RunnerScaleSetStatistic{
|
||||||
|
|||||||
@@ -25,13 +25,17 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/build"
|
"github.com/actions/actions-runner-controller/build"
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/actions/actions-runner-controller/logging"
|
"github.com/actions/actions-runner-controller/logging"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/kelseyhightower/envconfig"
|
"github.com/kelseyhightower/envconfig"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
"golang.org/x/net/http/httpproxy"
|
"golang.org/x/net/http/httpproxy"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
type RunnerScaleSetListenerConfig struct {
|
type RunnerScaleSetListenerConfig struct {
|
||||||
@@ -45,19 +49,34 @@ type RunnerScaleSetListenerConfig struct {
|
|||||||
MaxRunners int `split_words:"true"`
|
MaxRunners int `split_words:"true"`
|
||||||
MinRunners int `split_words:"true"`
|
MinRunners int `split_words:"true"`
|
||||||
RunnerScaleSetId int `split_words:"true"`
|
RunnerScaleSetId int `split_words:"true"`
|
||||||
|
RunnerScaleSetName string `split_words:"true"`
|
||||||
ServerRootCA string `split_words:"true"`
|
ServerRootCA string `split_words:"true"`
|
||||||
|
LogLevel string `split_words:"true"`
|
||||||
|
LogFormat string `split_words:"true"`
|
||||||
|
MetricsAddr string `split_words:"true"`
|
||||||
|
MetricsEndpoint string `split_words:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
var rc RunnerScaleSetListenerConfig
|
||||||
if err != nil {
|
if err := envconfig.Process("github", &rc); err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "Error: creating logger: %v\n", err)
|
fmt.Fprintf(os.Stderr, "Error: processing environment variables for RunnerScaleSetListenerConfig: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
var rc RunnerScaleSetListenerConfig
|
logLevel := string(logging.LogLevelDebug)
|
||||||
if err := envconfig.Process("github", &rc); err != nil {
|
if rc.LogLevel != "" {
|
||||||
logger.Error(err, "Error: processing environment variables for RunnerScaleSetListenerConfig")
|
logLevel = rc.LogLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
logFormat := string(logging.LogFormatText)
|
||||||
|
if rc.LogFormat != "" {
|
||||||
|
logFormat = rc.LogFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
logger, err := logging.NewLogger(logLevel, logFormat)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: creating logger: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -67,17 +86,95 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := run(rc, logger); err != nil {
|
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||||
logger.Error(err, "Run error")
|
defer stop()
|
||||||
|
|
||||||
|
g, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
|
g.Go(func() error {
|
||||||
|
opts := runOptions{
|
||||||
|
serviceOptions: []func(*Service){
|
||||||
|
WithLogger(logger),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
opts.serviceOptions = append(opts.serviceOptions, WithPrometheusMetrics(rc))
|
||||||
|
|
||||||
|
return run(ctx, rc, logger, opts)
|
||||||
|
})
|
||||||
|
|
||||||
|
if len(rc.MetricsAddr) != 0 {
|
||||||
|
g.Go(func() error {
|
||||||
|
metricsServer := metricsServer{
|
||||||
|
rc: rc,
|
||||||
|
logger: logger,
|
||||||
|
}
|
||||||
|
g.Go(func() error {
|
||||||
|
<-ctx.Done()
|
||||||
|
return metricsServer.shutdown()
|
||||||
|
})
|
||||||
|
return metricsServer.listenAndServe()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := g.Wait(); err != nil {
|
||||||
|
logger.Error(err, "Error encountered")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func run(rc RunnerScaleSetListenerConfig, logger logr.Logger) error {
|
type metricsServer struct {
|
||||||
// Create root context and hook with sigint and sigterm
|
rc RunnerScaleSetListenerConfig
|
||||||
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
logger logr.Logger
|
||||||
defer stop()
|
srv *http.Server
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *metricsServer) shutdown() error {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
return s.srv.Shutdown(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *metricsServer) listenAndServe() error {
|
||||||
|
reg := prometheus.NewRegistry()
|
||||||
|
reg.MustRegister(
|
||||||
|
// availableJobs,
|
||||||
|
// acquiredJobs,
|
||||||
|
assignedJobs,
|
||||||
|
runningJobs,
|
||||||
|
registeredRunners,
|
||||||
|
busyRunners,
|
||||||
|
minRunners,
|
||||||
|
maxRunners,
|
||||||
|
desiredRunners,
|
||||||
|
idleRunners,
|
||||||
|
startedJobsTotal,
|
||||||
|
completedJobsTotal,
|
||||||
|
// jobQueueDurationSeconds,
|
||||||
|
jobStartupDurationSeconds,
|
||||||
|
jobExecutionDurationSeconds,
|
||||||
|
)
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
mux.Handle(
|
||||||
|
s.rc.MetricsEndpoint,
|
||||||
|
promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}),
|
||||||
|
)
|
||||||
|
|
||||||
|
s.srv = &http.Server{
|
||||||
|
Addr: s.rc.MetricsAddr,
|
||||||
|
Handler: mux,
|
||||||
|
}
|
||||||
|
|
||||||
|
s.logger.Info("Starting metrics server", "address", s.srv.Addr)
|
||||||
|
return s.srv.ListenAndServe()
|
||||||
|
}
|
||||||
|
|
||||||
|
type runOptions struct {
|
||||||
|
serviceOptions []func(*Service)
|
||||||
|
}
|
||||||
|
|
||||||
|
func run(ctx context.Context, rc RunnerScaleSetListenerConfig, logger logr.Logger, opts runOptions) error {
|
||||||
|
// Create root context and hook with sigint and sigterm
|
||||||
creds := &actions.ActionsAuth{}
|
creds := &actions.ActionsAuth{}
|
||||||
if rc.Token != "" {
|
if rc.Token != "" {
|
||||||
creds.Token = rc.Token
|
creds.Token = rc.Token
|
||||||
@@ -119,9 +216,10 @@ func run(rc RunnerScaleSetListenerConfig, logger logr.Logger) error {
|
|||||||
MinRunners: rc.MinRunners,
|
MinRunners: rc.MinRunners,
|
||||||
}
|
}
|
||||||
|
|
||||||
service := NewService(ctx, autoScalerClient, kubeManager, scaleSettings, func(s *Service) {
|
service, err := NewService(ctx, autoScalerClient, kubeManager, scaleSettings, opts.serviceOptions...)
|
||||||
s.logger = logger.WithName("service")
|
if err != nil {
|
||||||
})
|
return fmt.Errorf("failed to create new service: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Start listening for messages
|
// Start listening for messages
|
||||||
if err = service.Start(); err != nil {
|
if err = service.Start(); err != nil {
|
||||||
|
|||||||
330
cmd/githubrunnerscalesetlistener/metrics.go
Normal file
330
cmd/githubrunnerscalesetlistener/metrics.go
Normal file
@@ -0,0 +1,330 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// label names
|
||||||
|
const (
|
||||||
|
labelKeyRunnerScaleSetName = "name"
|
||||||
|
labelKeyRunnerScaleSetNamespace = "namespace"
|
||||||
|
labelKeyEnterprise = "enterprise"
|
||||||
|
labelKeyOrganization = "organization"
|
||||||
|
labelKeyRepository = "repository"
|
||||||
|
labelKeyJobName = "job_name"
|
||||||
|
labelKeyJobWorkflowRef = "job_workflow_ref"
|
||||||
|
labelKeyEventName = "event_name"
|
||||||
|
labelKeyJobResult = "job_result"
|
||||||
|
labelKeyRunnerID = "runner_id"
|
||||||
|
labelKeyRunnerName = "runner_name"
|
||||||
|
)
|
||||||
|
|
||||||
|
const githubScaleSetSubsystem = "gha"
|
||||||
|
|
||||||
|
// labels
|
||||||
|
var (
|
||||||
|
scaleSetLabels = []string{
|
||||||
|
labelKeyRunnerScaleSetName,
|
||||||
|
labelKeyRepository,
|
||||||
|
labelKeyOrganization,
|
||||||
|
labelKeyEnterprise,
|
||||||
|
labelKeyRunnerScaleSetNamespace,
|
||||||
|
}
|
||||||
|
|
||||||
|
jobLabels = []string{
|
||||||
|
labelKeyRepository,
|
||||||
|
labelKeyOrganization,
|
||||||
|
labelKeyEnterprise,
|
||||||
|
labelKeyJobName,
|
||||||
|
labelKeyJobWorkflowRef,
|
||||||
|
labelKeyEventName,
|
||||||
|
}
|
||||||
|
|
||||||
|
completedJobsTotalLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
|
||||||
|
jobExecutionDurationLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
|
||||||
|
startedJobsTotalLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
|
||||||
|
jobStartupDurationLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
|
||||||
|
)
|
||||||
|
|
||||||
|
// metrics
|
||||||
|
var (
|
||||||
|
// availableJobs = prometheus.NewGaugeVec(
|
||||||
|
// prometheus.GaugeOpts{
|
||||||
|
// Subsystem: githubScaleSetSubsystem,
|
||||||
|
// Name: "available_jobs",
|
||||||
|
// Help: "Number of jobs with `runs-on` matching the runner scale set name. Jobs are not yet assigned to the runner scale set.",
|
||||||
|
// },
|
||||||
|
// scaleSetLabels,
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// acquiredJobs = prometheus.NewGaugeVec(
|
||||||
|
// prometheus.GaugeOpts{
|
||||||
|
// Subsystem: githubScaleSetSubsystem,
|
||||||
|
// Name: "acquired_jobs",
|
||||||
|
// Help: "Number of jobs acquired by the scale set.",
|
||||||
|
// },
|
||||||
|
// scaleSetLabels,
|
||||||
|
// )
|
||||||
|
|
||||||
|
assignedJobs = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "assigned_jobs",
|
||||||
|
Help: "Number of jobs assigned to this scale set.",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
runningJobs = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "running_jobs",
|
||||||
|
Help: "Number of jobs running (or about to be run).",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
registeredRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "registered_runners",
|
||||||
|
Help: "Number of runners registered by the scale set.",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
busyRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "busy_runners",
|
||||||
|
Help: "Number of registered runners running a job.",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
minRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "min_runners",
|
||||||
|
Help: "Minimum number of runners.",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
maxRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "max_runners",
|
||||||
|
Help: "Maximum number of runners.",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
desiredRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "desired_runners",
|
||||||
|
Help: "Number of runners desired by the scale set.",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
idleRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "idle_runners",
|
||||||
|
Help: "Number of registered runners not running a job.",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
startedJobsTotal = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "started_jobs_total",
|
||||||
|
Help: "Total number of jobs started.",
|
||||||
|
},
|
||||||
|
startedJobsTotalLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
completedJobsTotal = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "completed_jobs_total",
|
||||||
|
Help: "Total number of jobs completed.",
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
},
|
||||||
|
completedJobsTotalLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
// jobQueueDurationSeconds = prometheus.NewHistogramVec(
|
||||||
|
// prometheus.HistogramOpts{
|
||||||
|
// Subsystem: githubScaleSetSubsystem,
|
||||||
|
// Name: "job_queue_duration_seconds",
|
||||||
|
// Help: "Time spent waiting for workflow jobs to get assigned to the scale set after queueing (in seconds).",
|
||||||
|
// Buckets: runtimeBuckets,
|
||||||
|
// },
|
||||||
|
// jobLabels,
|
||||||
|
// )
|
||||||
|
|
||||||
|
jobStartupDurationSeconds = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "job_startup_duration_seconds",
|
||||||
|
Help: "Time spent waiting for workflow job to get started on the runner owned by the scale set (in seconds).",
|
||||||
|
Buckets: runtimeBuckets,
|
||||||
|
},
|
||||||
|
jobStartupDurationLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
jobExecutionDurationSeconds = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "job_execution_duration_seconds",
|
||||||
|
Help: "Time spent executing workflow jobs by the scale set (in seconds).",
|
||||||
|
Buckets: runtimeBuckets,
|
||||||
|
},
|
||||||
|
jobExecutionDurationLabels,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
var runtimeBuckets []float64 = []float64{
|
||||||
|
0.01,
|
||||||
|
0.05,
|
||||||
|
0.1,
|
||||||
|
0.5,
|
||||||
|
1,
|
||||||
|
2,
|
||||||
|
3,
|
||||||
|
4,
|
||||||
|
5,
|
||||||
|
6,
|
||||||
|
7,
|
||||||
|
8,
|
||||||
|
9,
|
||||||
|
10,
|
||||||
|
12,
|
||||||
|
15,
|
||||||
|
18,
|
||||||
|
20,
|
||||||
|
25,
|
||||||
|
30,
|
||||||
|
40,
|
||||||
|
50,
|
||||||
|
60,
|
||||||
|
70,
|
||||||
|
80,
|
||||||
|
90,
|
||||||
|
100,
|
||||||
|
110,
|
||||||
|
120,
|
||||||
|
150,
|
||||||
|
180,
|
||||||
|
210,
|
||||||
|
240,
|
||||||
|
300,
|
||||||
|
360,
|
||||||
|
420,
|
||||||
|
480,
|
||||||
|
540,
|
||||||
|
600,
|
||||||
|
900,
|
||||||
|
1200,
|
||||||
|
1800,
|
||||||
|
2400,
|
||||||
|
3000,
|
||||||
|
3600,
|
||||||
|
}
|
||||||
|
|
||||||
|
type metricsExporter struct {
|
||||||
|
// Initialized during creation.
|
||||||
|
baseLabels
|
||||||
|
}
|
||||||
|
|
||||||
|
type baseLabels struct {
|
||||||
|
scaleSetName string
|
||||||
|
scaleSetNamespace string
|
||||||
|
enterprise string
|
||||||
|
organization string
|
||||||
|
repository string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
|
||||||
|
return prometheus.Labels{
|
||||||
|
labelKeyEnterprise: b.enterprise,
|
||||||
|
labelKeyOrganization: b.organization,
|
||||||
|
labelKeyRepository: b.repository,
|
||||||
|
labelKeyJobName: jobBase.JobDisplayName,
|
||||||
|
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
|
||||||
|
labelKeyEventName: jobBase.EventName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *baseLabels) scaleSetLabels() prometheus.Labels {
|
||||||
|
return prometheus.Labels{
|
||||||
|
labelKeyRunnerScaleSetName: b.scaleSetName,
|
||||||
|
labelKeyRunnerScaleSetNamespace: b.scaleSetNamespace,
|
||||||
|
labelKeyEnterprise: b.enterprise,
|
||||||
|
labelKeyOrganization: b.organization,
|
||||||
|
labelKeyRepository: b.repository,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *baseLabels) completedJobLabels(msg *actions.JobCompleted) prometheus.Labels {
|
||||||
|
l := b.jobLabels(&msg.JobMessageBase)
|
||||||
|
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
|
||||||
|
l[labelKeyJobResult] = msg.Result
|
||||||
|
l[labelKeyRunnerName] = msg.RunnerName
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *baseLabels) startedJobLabels(msg *actions.JobStarted) prometheus.Labels {
|
||||||
|
l := b.jobLabels(&msg.JobMessageBase)
|
||||||
|
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
|
||||||
|
l[labelKeyRunnerName] = msg.RunnerName
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metricsExporter) withBaseLabels(base baseLabels) {
|
||||||
|
m.baseLabels = base
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metricsExporter) publishStatistics(stats *actions.RunnerScaleSetStatistic) {
|
||||||
|
l := m.scaleSetLabels()
|
||||||
|
|
||||||
|
// availableJobs.With(l).Set(float64(stats.TotalAvailableJobs))
|
||||||
|
// acquiredJobs.With(l).Set(float64(stats.TotalAcquiredJobs))
|
||||||
|
assignedJobs.With(l).Set(float64(stats.TotalAssignedJobs))
|
||||||
|
runningJobs.With(l).Set(float64(stats.TotalRunningJobs))
|
||||||
|
registeredRunners.With(l).Set(float64(stats.TotalRegisteredRunners))
|
||||||
|
busyRunners.With(l).Set(float64(stats.TotalBusyRunners))
|
||||||
|
idleRunners.With(l).Set(float64(stats.TotalIdleRunners))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metricsExporter) publishJobStarted(msg *actions.JobStarted) {
|
||||||
|
l := m.startedJobLabels(msg)
|
||||||
|
startedJobsTotal.With(l).Inc()
|
||||||
|
|
||||||
|
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
|
||||||
|
jobStartupDurationSeconds.With(l).Observe(float64(startupDuration))
|
||||||
|
}
|
||||||
|
|
||||||
|
// func (m *metricsExporter) publishJobAssigned(msg *actions.JobAssigned) {
|
||||||
|
// l := m.jobLabels(&msg.JobMessageBase)
|
||||||
|
// queueDuration := msg.JobMessageBase.ScaleSetAssignTime.Unix() - msg.JobMessageBase.QueueTime.Unix()
|
||||||
|
// jobQueueDurationSeconds.With(l).Observe(float64(queueDuration))
|
||||||
|
// }
|
||||||
|
|
||||||
|
func (m *metricsExporter) publishJobCompleted(msg *actions.JobCompleted) {
|
||||||
|
l := m.completedJobLabels(msg)
|
||||||
|
completedJobsTotal.With(l).Inc()
|
||||||
|
|
||||||
|
executionDuration := msg.JobMessageBase.FinishTime.Unix() - msg.JobMessageBase.RunnerAssignTime.Unix()
|
||||||
|
jobExecutionDurationSeconds.With(l).Observe(float64(executionDuration))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metricsExporter) publishDesiredRunners(count int) {
|
||||||
|
desiredRunners.With(m.scaleSetLabels()).Set(float64(count))
|
||||||
|
}
|
||||||
@@ -124,7 +124,7 @@ func main() {
|
|||||||
if watchNamespace == "" {
|
if watchNamespace == "" {
|
||||||
logger.Info("-watch-namespace is empty. HorizontalRunnerAutoscalers in all the namespaces are watched, cached, and considered as scale targets.")
|
logger.Info("-watch-namespace is empty. HorizontalRunnerAutoscalers in all the namespaces are watched, cached, and considered as scale targets.")
|
||||||
} else {
|
} else {
|
||||||
logger.Info("-watch-namespace is %q. Only HorizontalRunnerAutoscalers in %q are watched, cached, and considered as scale targets.", watchNamespace, watchNamespace)
|
logger.Info(fmt.Sprintf("-watch-namespace is %q. Only HorizontalRunnerAutoscalers in %q are watched, cached, and considered as scale targets.", watchNamespace, watchNamespace))
|
||||||
}
|
}
|
||||||
|
|
||||||
ctrl.SetLogger(logger)
|
ctrl.SetLogger(logger)
|
||||||
|
|||||||
@@ -113,7 +113,7 @@ spec:
|
|||||||
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop)
|
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop)
|
||||||
type: integer
|
type: integer
|
||||||
scaleTargetRef:
|
scaleTargetRef:
|
||||||
description: ScaleTargetRef sis the reference to scaled resource like RunnerDeployment
|
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
||||||
properties:
|
properties:
|
||||||
kind:
|
kind:
|
||||||
description: Kind is the type of resource being referenced
|
description: Kind is the type of resource being referenced
|
||||||
|
|||||||
@@ -1497,6 +1497,12 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
dockerRegistryMirror:
|
dockerRegistryMirror:
|
||||||
type: string
|
type: string
|
||||||
|
dockerVarRunVolumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
dockerVolumeMounts:
|
dockerVolumeMounts:
|
||||||
items:
|
items:
|
||||||
description: VolumeMount describes a mounting of a Volume within a container.
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
|||||||
@@ -1479,6 +1479,12 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
dockerRegistryMirror:
|
dockerRegistryMirror:
|
||||||
type: string
|
type: string
|
||||||
|
dockerVarRunVolumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
dockerVolumeMounts:
|
dockerVolumeMounts:
|
||||||
items:
|
items:
|
||||||
description: VolumeMount describes a mounting of a Volume within a container.
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
|||||||
@@ -1432,6 +1432,12 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
dockerRegistryMirror:
|
dockerRegistryMirror:
|
||||||
type: string
|
type: string
|
||||||
|
dockerVarRunVolumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
dockerVolumeMounts:
|
dockerVolumeMounts:
|
||||||
items:
|
items:
|
||||||
description: VolumeMount describes a mounting of a Volume within a container.
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
|||||||
@@ -55,6 +55,12 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
dockerRegistryMirror:
|
dockerRegistryMirror:
|
||||||
type: string
|
type: string
|
||||||
|
dockerVarRunVolumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
dockerdWithinRunnerContainer:
|
dockerdWithinRunnerContainer:
|
||||||
type: boolean
|
type: boolean
|
||||||
effectiveTime:
|
effectiveTime:
|
||||||
|
|||||||
@@ -33,6 +33,8 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||||
|
|
||||||
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||||
|
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
|
||||||
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
hash "github.com/actions/actions-runner-controller/hash"
|
hash "github.com/actions/actions-runner-controller/hash"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
@@ -49,6 +51,10 @@ type AutoscalingListenerReconciler struct {
|
|||||||
client.Client
|
client.Client
|
||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
|
// ListenerMetricsAddr is address that the metrics endpoint binds to.
|
||||||
|
// If it is set to "0", the metrics server is not started.
|
||||||
|
ListenerMetricsAddr string
|
||||||
|
ListenerMetricsEndpoint string
|
||||||
|
|
||||||
resourceBuilder resourceBuilder
|
resourceBuilder resourceBuilder
|
||||||
}
|
}
|
||||||
@@ -227,6 +233,11 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := r.publishRunningListener(autoscalingListener, false); err != nil {
|
||||||
|
// If publish fails, URL is incorrect which means the listener pod would never be able to start
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Create a listener pod in the controller namespace
|
// Create a listener pod in the controller namespace
|
||||||
log.Info("Creating a listener pod")
|
log.Info("Creating a listener pod")
|
||||||
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, mirrorSecret, log)
|
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, mirrorSecret, log)
|
||||||
@@ -242,6 +253,16 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if listenerPod.Status.Phase == corev1.PodRunning {
|
||||||
|
if err := r.publishRunningListener(autoscalingListener, true); err != nil {
|
||||||
|
log.Error(err, "Unable to publish running listener", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||||
|
// stop reconciling. We should never get to this point but if we do,
|
||||||
|
// listener won't be able to start up, and the crash from the pod should
|
||||||
|
// notify the reconciler again.
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -260,6 +281,9 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||||||
return false, nil
|
return false, nil
|
||||||
case err != nil && !kerrors.IsNotFound(err):
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
return false, fmt.Errorf("failed to get listener pods: %v", err)
|
return false, fmt.Errorf("failed to get listener pods: %v", err)
|
||||||
|
|
||||||
|
default: // NOT FOUND
|
||||||
|
_ = r.publishRunningListener(autoscalingListener, false) // If error is returned, we never published metrics so it is safe to ignore
|
||||||
}
|
}
|
||||||
logger.Info("Listener pod is deleted")
|
logger.Info("Listener pod is deleted")
|
||||||
|
|
||||||
@@ -371,9 +395,22 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
|||||||
envs = append(envs, env)
|
envs = append(envs, env)
|
||||||
}
|
}
|
||||||
|
|
||||||
newPod := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, serviceAccount, secret, envs...)
|
var metricsConfig *listenerMetricsServerConfig
|
||||||
|
if r.ListenerMetricsAddr != "0" {
|
||||||
|
metricsConfig = &listenerMetricsServerConfig{
|
||||||
|
addr: r.ListenerMetricsAddr,
|
||||||
|
endpoint: r.ListenerMetricsEndpoint,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
newPod, err := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, serviceAccount, secret, metricsConfig, envs...)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "Failed to build listener pod")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
if err := ctrl.SetControllerReference(autoscalingListener, newPod, r.Scheme); err != nil {
|
if err := ctrl.SetControllerReference(autoscalingListener, newPod, r.Scheme); err != nil {
|
||||||
|
logger.Error(err, "Failed to set controller reference")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -556,6 +593,30 @@ func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context
|
|||||||
return ctrl.Result{Requeue: true}, nil
|
return ctrl.Result{Requeue: true}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *AutoscalingListenerReconciler) publishRunningListener(autoscalingListener *v1alpha1.AutoscalingListener, isUp bool) error {
|
||||||
|
githubConfigURL := autoscalingListener.Spec.GitHubConfigUrl
|
||||||
|
parsedURL, err := actions.ParseGitHubConfigFromURL(githubConfigURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
commonLabels := metrics.CommonLabels{
|
||||||
|
Name: autoscalingListener.Name,
|
||||||
|
Namespace: autoscalingListener.Namespace,
|
||||||
|
Repository: parsedURL.Repository,
|
||||||
|
Organization: parsedURL.Organization,
|
||||||
|
Enterprise: parsedURL.Enterprise,
|
||||||
|
}
|
||||||
|
|
||||||
|
if isUp {
|
||||||
|
metrics.AddRunningListener(commonLabels)
|
||||||
|
} else {
|
||||||
|
metrics.SubRunningListener(commonLabels)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// SetupWithManager sets up the controller with the Manager.
|
// SetupWithManager sets up the controller with the Manager.
|
||||||
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
groupVersionIndexer := func(rawObj client.Object) []string {
|
groupVersionIndexer := func(rawObj client.Object) []string {
|
||||||
|
|||||||
@@ -49,6 +49,24 @@ const (
|
|||||||
runnerScaleSetNameAnnotationKey = "runner-scale-set-name"
|
runnerScaleSetNameAnnotationKey = "runner-scale-set-name"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type UpdateStrategy string
|
||||||
|
|
||||||
|
// Defines how the controller should handle upgrades while having running jobs.
|
||||||
|
const (
|
||||||
|
// "immediate": (default) The controller will immediately apply the change causing the
|
||||||
|
// recreation of the listener and ephemeral runner set. This can lead to an
|
||||||
|
// overprovisioning of runners, if there are pending / running jobs. This should not
|
||||||
|
// be a problem at a small scale, but it could lead to a significant increase of
|
||||||
|
// resources if you have a lot of jobs running concurrently.
|
||||||
|
UpdateStrategyImmediate = UpdateStrategy("immediate")
|
||||||
|
// "eventual": The controller will remove the listener and ephemeral runner set
|
||||||
|
// immediately, but will not recreate them (to apply changes) until all
|
||||||
|
// pending / running jobs have completed.
|
||||||
|
// This can lead to a longer time to apply the change but it will ensure
|
||||||
|
// that you don't have any overprovisioning of runners.
|
||||||
|
UpdateStrategyEventual = UpdateStrategy("eventual")
|
||||||
|
)
|
||||||
|
|
||||||
// AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object
|
// AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object
|
||||||
type AutoscalingRunnerSetReconciler struct {
|
type AutoscalingRunnerSetReconciler struct {
|
||||||
client.Client
|
client.Client
|
||||||
@@ -57,6 +75,7 @@ type AutoscalingRunnerSetReconciler struct {
|
|||||||
ControllerNamespace string
|
ControllerNamespace string
|
||||||
DefaultRunnerScaleSetListenerImage string
|
DefaultRunnerScaleSetListenerImage string
|
||||||
DefaultRunnerScaleSetListenerImagePullSecrets []string
|
DefaultRunnerScaleSetListenerImagePullSecrets []string
|
||||||
|
UpdateStrategy UpdateStrategy
|
||||||
ActionsClient actions.MultiClient
|
ActionsClient actions.MultiClient
|
||||||
|
|
||||||
resourceBuilder resourceBuilder
|
resourceBuilder resourceBuilder
|
||||||
@@ -218,7 +237,48 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[labelKeyRunnerSpecHash])
|
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[labelKeyRunnerSpecHash])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure the AutoscalingListener is up and running in the controller namespace
|
||||||
|
listener := new(v1alpha1.AutoscalingListener)
|
||||||
|
listenerFound := true
|
||||||
|
if err := r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, listener); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
log.Error(err, "Failed to get AutoscalingListener resource")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
listenerFound = false
|
||||||
|
log.Info("AutoscalingListener does not exist.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Our listener pod is out of date, so we need to delete it to get a new recreate.
|
||||||
|
if listenerFound && (listener.Labels[labelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash()) {
|
||||||
|
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
|
||||||
|
if err := r.Delete(ctx, listener); err != nil {
|
||||||
|
if kerrors.IsNotFound(err) {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
log.Error(err, "Failed to delete AutoscalingListener resource")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Deleted RunnerScaleSetListener since existing one is out of date")
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
if desiredSpecHash != latestRunnerSet.Labels[labelKeyRunnerSpecHash] {
|
if desiredSpecHash != latestRunnerSet.Labels[labelKeyRunnerSpecHash] {
|
||||||
|
if r.drainingJobs(&latestRunnerSet.Status) {
|
||||||
|
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Waiting for the running and pending runners to finish:", "running", latestRunnerSet.Status.RunningEphemeralRunners, "pending", latestRunnerSet.Status.PendingEphemeralRunners)
|
||||||
|
log.Info("Scaling down the number of desired replicas to 0")
|
||||||
|
// We are in the process of draining the jobs. The listener has been deleted and the ephemeral runner set replicas
|
||||||
|
// need to scale down to 0
|
||||||
|
err := patch(ctx, r.Client, latestRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
|
||||||
|
obj.Spec.Replicas = 0
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err, "Failed to patch runner set to set desired count to 0")
|
||||||
|
}
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set")
|
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set")
|
||||||
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
|
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
|
||||||
}
|
}
|
||||||
@@ -234,31 +294,14 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the AutoscalingListener is up and running in the controller namespace
|
// Make sure the AutoscalingListener is up and running in the controller namespace
|
||||||
listener := new(v1alpha1.AutoscalingListener)
|
if !listenerFound {
|
||||||
if err := r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, listener); err != nil {
|
if r.drainingJobs(&latestRunnerSet.Status) {
|
||||||
if kerrors.IsNotFound(err) {
|
log.Info("Creating a new AutoscalingListener is waiting for the running and pending runners to finish. Waiting for the running and pending runners to finish:", "running", latestRunnerSet.Status.RunningEphemeralRunners, "pending", latestRunnerSet.Status.PendingEphemeralRunners)
|
||||||
// We don't have a listener
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
log.Info("Creating a new AutoscalingListener for the runner set", "ephemeralRunnerSetName", latestRunnerSet.Name)
|
log.Info("Creating a new AutoscalingListener for the runner set", "ephemeralRunnerSetName", latestRunnerSet.Name)
|
||||||
return r.createAutoScalingListenerForRunnerSet(ctx, autoscalingRunnerSet, latestRunnerSet, log)
|
return r.createAutoScalingListenerForRunnerSet(ctx, autoscalingRunnerSet, latestRunnerSet, log)
|
||||||
}
|
}
|
||||||
log.Error(err, "Failed to get AutoscalingListener resource")
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Our listener pod is out of date, so we need to delete it to get a new recreate.
|
|
||||||
if listener.Labels[labelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() {
|
|
||||||
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
|
|
||||||
if err := r.Delete(ctx, listener); err != nil {
|
|
||||||
if kerrors.IsNotFound(err) {
|
|
||||||
return ctrl.Result{}, nil
|
|
||||||
}
|
|
||||||
log.Error(err, "Failed to delete AutoscalingListener resource")
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("Deleted RunnerScaleSetListener since existing one is out of date")
|
|
||||||
return ctrl.Result{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the status of autoscaling runner set.
|
// Update the status of autoscaling runner set.
|
||||||
if latestRunnerSet.Status.CurrentReplicas != autoscalingRunnerSet.Status.CurrentRunners {
|
if latestRunnerSet.Status.CurrentReplicas != autoscalingRunnerSet.Status.CurrentRunners {
|
||||||
@@ -276,6 +319,16 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Prevents overprovisioning of runners.
|
||||||
|
// We reach this code path when runner scale set has been patched with a new runner spec but there are still running ephemeral runners.
|
||||||
|
// The safest approach is to wait for the running ephemeral runners to finish before creating a new runner set.
|
||||||
|
func (r *AutoscalingRunnerSetReconciler) drainingJobs(latestRunnerSetStatus *v1alpha1.EphemeralRunnerSetStatus) bool {
|
||||||
|
if r.UpdateStrategy == UpdateStrategyEventual && ((latestRunnerSetStatus.RunningEphemeralRunners + latestRunnerSetStatus.PendingEphemeralRunners) > 0) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) {
|
func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) {
|
||||||
logger.Info("Cleaning up the listener")
|
logger.Info("Cleaning up the listener")
|
||||||
var listener v1alpha1.AutoscalingListener
|
var listener v1alpha1.AutoscalingListener
|
||||||
|
|||||||
@@ -42,6 +42,7 @@ const (
|
|||||||
var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
||||||
var ctx context.Context
|
var ctx context.Context
|
||||||
var mgr ctrl.Manager
|
var mgr ctrl.Manager
|
||||||
|
var controller *AutoscalingRunnerSetReconciler
|
||||||
var autoscalingNS *corev1.Namespace
|
var autoscalingNS *corev1.Namespace
|
||||||
var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
|
var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
|
||||||
var configSecret *corev1.Secret
|
var configSecret *corev1.Secret
|
||||||
@@ -63,7 +64,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
|||||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||||
|
|
||||||
controller := &AutoscalingRunnerSetReconciler{
|
controller = &AutoscalingRunnerSetReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Scheme: mgr.GetScheme(),
|
Scheme: mgr.GetScheme(),
|
||||||
Log: logf.Log,
|
Log: logf.Log,
|
||||||
@@ -424,6 +425,110 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
Context("When updating an AutoscalingRunnerSet with running or pending jobs", func() {
|
||||||
|
It("It should wait for running and pending jobs to finish before applying the update. Update Strategy is set to eventual.", func() {
|
||||||
|
// Switch update strategy to eventual (drain jobs )
|
||||||
|
controller.UpdateStrategy = UpdateStrategyEventual
|
||||||
|
// Wait till the listener is created
|
||||||
|
listener := new(v1alpha1.AutoscalingListener)
|
||||||
|
Eventually(
|
||||||
|
func() error {
|
||||||
|
return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener)
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(Succeed(), "Listener should be created")
|
||||||
|
|
||||||
|
// Wait till the ephemeral runner set is created
|
||||||
|
Eventually(
|
||||||
|
func() (int, error) {
|
||||||
|
runnerSetList := new(v1alpha1.EphemeralRunnerSetList)
|
||||||
|
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(runnerSetList.Items), nil
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(BeEquivalentTo(1), "Only one EphemeralRunnerSet should be created")
|
||||||
|
|
||||||
|
runnerSetList := new(v1alpha1.EphemeralRunnerSetList)
|
||||||
|
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet")
|
||||||
|
|
||||||
|
// Emulate running and pending jobs
|
||||||
|
runnerSet := runnerSetList.Items[0]
|
||||||
|
activeRunnerSet := runnerSet.DeepCopy()
|
||||||
|
activeRunnerSet.Status.CurrentReplicas = 6
|
||||||
|
activeRunnerSet.Status.FailedEphemeralRunners = 1
|
||||||
|
activeRunnerSet.Status.RunningEphemeralRunners = 2
|
||||||
|
activeRunnerSet.Status.PendingEphemeralRunners = 3
|
||||||
|
|
||||||
|
desiredStatus := v1alpha1.AutoscalingRunnerSetStatus{
|
||||||
|
CurrentRunners: activeRunnerSet.Status.CurrentReplicas,
|
||||||
|
State: "",
|
||||||
|
PendingEphemeralRunners: activeRunnerSet.Status.PendingEphemeralRunners,
|
||||||
|
RunningEphemeralRunners: activeRunnerSet.Status.RunningEphemeralRunners,
|
||||||
|
FailedEphemeralRunners: activeRunnerSet.Status.FailedEphemeralRunners,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = k8sClient.Status().Patch(ctx, activeRunnerSet, client.MergeFrom(&runnerSet))
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "Failed to patch runner set status")
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() (v1alpha1.AutoscalingRunnerSetStatus, error) {
|
||||||
|
updated := new(v1alpha1.AutoscalingRunnerSet)
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, updated)
|
||||||
|
if err != nil {
|
||||||
|
return v1alpha1.AutoscalingRunnerSetStatus{}, fmt.Errorf("failed to get AutoScalingRunnerSet: %w", err)
|
||||||
|
}
|
||||||
|
return updated.Status, nil
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(BeEquivalentTo(desiredStatus), "AutoScalingRunnerSet status should be updated")
|
||||||
|
|
||||||
|
// Patch the AutoScalingRunnerSet image which should trigger
|
||||||
|
// the recreation of the Listener and EphemeralRunnerSet
|
||||||
|
patched := autoscalingRunnerSet.DeepCopy()
|
||||||
|
patched.Spec.Template.Spec = corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
Image: "ghcr.io/actions/abcd:1.1.1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
|
||||||
|
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
||||||
|
autoscalingRunnerSet = patched.DeepCopy()
|
||||||
|
|
||||||
|
// The EphemeralRunnerSet should not be recreated
|
||||||
|
Consistently(
|
||||||
|
func() (string, error) {
|
||||||
|
runnerSetList := new(v1alpha1.EphemeralRunnerSetList)
|
||||||
|
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to fetch AutoScalingRunnerSet")
|
||||||
|
return runnerSetList.Items[0].Name, nil
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(Equal(activeRunnerSet.Name), "The EphemeralRunnerSet should not be recreated")
|
||||||
|
|
||||||
|
// The listener should not be recreated
|
||||||
|
Consistently(
|
||||||
|
func() error {
|
||||||
|
return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener)
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).ShouldNot(Succeed(), "Listener should not be recreated")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
It("Should update Status on EphemeralRunnerSet status Update", func() {
|
It("Should update Status on EphemeralRunnerSet status Update", func() {
|
||||||
ars := new(v1alpha1.AutoscalingRunnerSet)
|
ars := new(v1alpha1.AutoscalingRunnerSet)
|
||||||
Eventually(
|
Eventually(
|
||||||
@@ -1617,10 +1722,14 @@ var _ = Describe("Test resource version and build version mismatch", func() {
|
|||||||
|
|
||||||
startManagers(GinkgoT(), mgr)
|
startManagers(GinkgoT(), mgr)
|
||||||
|
|
||||||
Eventually(func() bool {
|
Eventually(
|
||||||
|
func() bool {
|
||||||
ars := new(v1alpha1.AutoscalingRunnerSet)
|
ars := new(v1alpha1.AutoscalingRunnerSet)
|
||||||
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Name}, ars)
|
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Name}, ars)
|
||||||
return errors.IsNotFound(err)
|
return errors.IsNotFound(err)
|
||||||
}).Should(BeTrue())
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval,
|
||||||
|
).Should(BeTrue())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
package actionsgithubcom
|
package actionsgithubcom
|
||||||
|
|
||||||
import corev1 "k8s.io/api/core/v1"
|
import (
|
||||||
|
"github.com/actions/actions-runner-controller/logging"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
||||||
@@ -60,5 +63,11 @@ const (
|
|||||||
// to the listener when ImagePullPolicy is not specified
|
// to the listener when ImagePullPolicy is not specified
|
||||||
const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent
|
const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent
|
||||||
|
|
||||||
|
// DefaultScaleSetListenerLogLevel is the default log level applied
|
||||||
|
const DefaultScaleSetListenerLogLevel = string(logging.LogLevelDebug)
|
||||||
|
|
||||||
|
// DefaultScaleSetListenerLogFormat is the default log format applied
|
||||||
|
const DefaultScaleSetListenerLogFormat = string(logging.LogFormatText)
|
||||||
|
|
||||||
// ownerKey is field selector matching the owner name of a particular resource
|
// ownerKey is field selector matching the owner name of a particular resource
|
||||||
const resourceOwnerKey = ".metadata.controller"
|
const resourceOwnerKey = ".metadata.controller"
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||||
|
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"go.uber.org/multierr"
|
"go.uber.org/multierr"
|
||||||
@@ -50,6 +51,8 @@ type EphemeralRunnerSetReconciler struct {
|
|||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
ActionsClient actions.MultiClient
|
ActionsClient actions.MultiClient
|
||||||
|
|
||||||
|
PublishMetrics bool
|
||||||
|
|
||||||
resourceBuilder resourceBuilder
|
resourceBuilder resourceBuilder
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -163,6 +166,29 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
|
|||||||
"deleting", len(deletingEphemeralRunners),
|
"deleting", len(deletingEphemeralRunners),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if r.PublishMetrics {
|
||||||
|
githubConfigURL := ephemeralRunnerSet.Spec.EphemeralRunnerSpec.GitHubConfigUrl
|
||||||
|
parsedURL, err := actions.ParseGitHubConfigFromURL(githubConfigURL)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err, "Github Config URL is invalid", "URL", githubConfigURL)
|
||||||
|
// stop reconciling on this object
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics.SetEphemeralRunnerCountsByStatus(
|
||||||
|
metrics.CommonLabels{
|
||||||
|
Name: ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName],
|
||||||
|
Namespace: ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace],
|
||||||
|
Repository: parsedURL.Repository,
|
||||||
|
Organization: parsedURL.Organization,
|
||||||
|
Enterprise: parsedURL.Enterprise,
|
||||||
|
},
|
||||||
|
len(pendingEphemeralRunners),
|
||||||
|
len(runningEphemeralRunners),
|
||||||
|
len(failedEphemeralRunners),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// cleanup finished runners and proceed
|
// cleanup finished runners and proceed
|
||||||
var errs []error
|
var errs []error
|
||||||
for i := range finishedEphemeralRunners {
|
for i := range finishedEphemeralRunners {
|
||||||
|
|||||||
92
controllers/actions.github.com/metrics/metrics.go
Normal file
92
controllers/actions.github.com/metrics/metrics.go
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
var githubScaleSetControllerSubsystem = "gha_controller"
|
||||||
|
|
||||||
|
var labels = []string{
|
||||||
|
"name",
|
||||||
|
"namespace",
|
||||||
|
"repository",
|
||||||
|
"organization",
|
||||||
|
"enterprise",
|
||||||
|
}
|
||||||
|
|
||||||
|
type CommonLabels struct {
|
||||||
|
Name string
|
||||||
|
Namespace string
|
||||||
|
Repository string
|
||||||
|
Organization string
|
||||||
|
Enterprise string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *CommonLabels) labels() prometheus.Labels {
|
||||||
|
return prometheus.Labels{
|
||||||
|
"name": l.Name,
|
||||||
|
"namespace": l.Namespace,
|
||||||
|
"repository": l.Repository,
|
||||||
|
"organization": l.Organization,
|
||||||
|
"enterprise": l.Enterprise,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
pendingEphemeralRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetControllerSubsystem,
|
||||||
|
Name: "pending_ephemeral_runners",
|
||||||
|
Help: "Number of ephemeral runners in a pending state.",
|
||||||
|
},
|
||||||
|
labels,
|
||||||
|
)
|
||||||
|
runningEphemeralRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetControllerSubsystem,
|
||||||
|
Name: "running_ephemeral_runners",
|
||||||
|
Help: "Number of ephemeral runners in a running state.",
|
||||||
|
},
|
||||||
|
labels,
|
||||||
|
)
|
||||||
|
failedEphemeralRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetControllerSubsystem,
|
||||||
|
Name: "failed_ephemeral_runners",
|
||||||
|
Help: "Number of ephemeral runners in a failed state.",
|
||||||
|
},
|
||||||
|
labels,
|
||||||
|
)
|
||||||
|
runningListeners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetControllerSubsystem,
|
||||||
|
Name: "running_listeners",
|
||||||
|
Help: "Number of listeners in a running state.",
|
||||||
|
},
|
||||||
|
labels,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
func RegisterMetrics() {
|
||||||
|
metrics.Registry.MustRegister(
|
||||||
|
pendingEphemeralRunners,
|
||||||
|
runningEphemeralRunners,
|
||||||
|
failedEphemeralRunners,
|
||||||
|
runningListeners,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetEphemeralRunnerCountsByStatus(commonLabels CommonLabels, pending, running, failed int) {
|
||||||
|
pendingEphemeralRunners.With(commonLabels.labels()).Set(float64(pending))
|
||||||
|
runningEphemeralRunners.With(commonLabels.labels()).Set(float64(running))
|
||||||
|
failedEphemeralRunners.With(commonLabels.labels()).Set(float64(failed))
|
||||||
|
}
|
||||||
|
|
||||||
|
func AddRunningListener(commonLabels CommonLabels) {
|
||||||
|
runningListeners.With(commonLabels.labels()).Set(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func SubRunningListener(commonLabels CommonLabels) {
|
||||||
|
runningListeners.With(commonLabels.labels()).Set(0)
|
||||||
|
}
|
||||||
@@ -4,12 +4,14 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||||
"github.com/actions/actions-runner-controller/build"
|
"github.com/actions/actions-runner-controller/build"
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/actions/actions-runner-controller/hash"
|
"github.com/actions/actions-runner-controller/hash"
|
||||||
|
"github.com/actions/actions-runner-controller/logging"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@@ -46,6 +48,27 @@ func SetListenerImagePullPolicy(pullPolicy string) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var scaleSetListenerLogLevel = DefaultScaleSetListenerLogLevel
|
||||||
|
var scaleSetListenerLogFormat = DefaultScaleSetListenerLogFormat
|
||||||
|
|
||||||
|
func SetListenerLoggingParameters(level string, format string) bool {
|
||||||
|
switch level {
|
||||||
|
case logging.LogLevelDebug, logging.LogLevelInfo, logging.LogLevelWarn, logging.LogLevelError:
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
switch format {
|
||||||
|
case logging.LogFormatJSON, logging.LogFormatText:
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
scaleSetListenerLogLevel = level
|
||||||
|
scaleSetListenerLogFormat = format
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
type resourceBuilder struct{}
|
type resourceBuilder struct{}
|
||||||
|
|
||||||
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
||||||
@@ -63,26 +86,24 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
|
|||||||
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
|
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
|
||||||
}
|
}
|
||||||
|
|
||||||
githubConfig, err := actions.ParseGitHubConfigFromURL(autoscalingRunnerSet.Spec.GitHubConfigUrl)
|
labels := map[string]string{
|
||||||
if err != nil {
|
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||||
return nil, fmt.Errorf("failed to parse github config from url: %v", err)
|
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||||
|
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||||
|
LabelKeyKubernetesComponent: "runner-scale-set-listener",
|
||||||
|
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||||
|
labelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
autoscalingListener := &v1alpha1.AutoscalingListener{
|
autoscalingListener := &v1alpha1.AutoscalingListener{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: scaleSetListenerName(autoscalingRunnerSet),
|
Name: scaleSetListenerName(autoscalingRunnerSet),
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Labels: map[string]string{
|
Labels: labels,
|
||||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
|
||||||
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
|
||||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
|
||||||
LabelKeyKubernetesComponent: "runner-scale-set-listener",
|
|
||||||
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
|
||||||
LabelKeyGitHubEnterprise: githubConfig.Enterprise,
|
|
||||||
LabelKeyGitHubOrganization: githubConfig.Organization,
|
|
||||||
LabelKeyGitHubRepository: githubConfig.Repository,
|
|
||||||
labelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
Spec: v1alpha1.AutoscalingListenerSpec{
|
Spec: v1alpha1.AutoscalingListenerSpec{
|
||||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||||
@@ -104,7 +125,12 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
|
|||||||
return autoscalingListener, nil
|
return autoscalingListener, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
|
type listenerMetricsServerConfig struct {
|
||||||
|
addr string
|
||||||
|
endpoint string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
|
||||||
listenerEnv := []corev1.EnvVar{
|
listenerEnv := []corev1.EnvVar{
|
||||||
{
|
{
|
||||||
Name: "GITHUB_CONFIGURE_URL",
|
Name: "GITHUB_CONFIGURE_URL",
|
||||||
@@ -130,6 +156,18 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
|||||||
Name: "GITHUB_RUNNER_SCALE_SET_ID",
|
Name: "GITHUB_RUNNER_SCALE_SET_ID",
|
||||||
Value: strconv.Itoa(autoscalingListener.Spec.RunnerScaleSetId),
|
Value: strconv.Itoa(autoscalingListener.Spec.RunnerScaleSetId),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "GITHUB_RUNNER_SCALE_SET_NAME",
|
||||||
|
Value: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GITHUB_RUNNER_LOG_LEVEL",
|
||||||
|
Value: scaleSetListenerLogLevel,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GITHUB_RUNNER_LOG_FORMAT",
|
||||||
|
Value: scaleSetListenerLogFormat,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
listenerEnv = append(listenerEnv, envs...)
|
listenerEnv = append(listenerEnv, envs...)
|
||||||
|
|
||||||
@@ -189,6 +227,38 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var ports []corev1.ContainerPort
|
||||||
|
if metricsConfig != nil && len(metricsConfig.addr) != 0 {
|
||||||
|
listenerEnv = append(
|
||||||
|
listenerEnv,
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "GITHUB_METRICS_ADDR",
|
||||||
|
Value: metricsConfig.addr,
|
||||||
|
},
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "GITHUB_METRICS_ENDPOINT",
|
||||||
|
Value: metricsConfig.endpoint,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
_, portStr, err := net.SplitHostPort(metricsConfig.addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to split host:port for metrics address: %v", err)
|
||||||
|
}
|
||||||
|
port, err := strconv.ParseInt(portStr, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to convert port %q to int32: %v", portStr, err)
|
||||||
|
}
|
||||||
|
ports = append(
|
||||||
|
ports,
|
||||||
|
corev1.ContainerPort{
|
||||||
|
ContainerPort: int32(port),
|
||||||
|
Protocol: corev1.ProtocolTCP,
|
||||||
|
Name: "metrics",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
podSpec := corev1.PodSpec{
|
podSpec := corev1.PodSpec{
|
||||||
ServiceAccountName: serviceAccount.Name,
|
ServiceAccountName: serviceAccount.Name,
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
@@ -200,6 +270,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
|||||||
Command: []string{
|
Command: []string{
|
||||||
"/github-runnerscaleset-listener",
|
"/github-runnerscaleset-listener",
|
||||||
},
|
},
|
||||||
|
Ports: ports,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
ImagePullSecrets: autoscalingListener.Spec.ImagePullSecrets,
|
ImagePullSecrets: autoscalingListener.Spec.ImagePullSecrets,
|
||||||
@@ -224,7 +295,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
|||||||
Spec: podSpec,
|
Spec: podSpec,
|
||||||
}
|
}
|
||||||
|
|
||||||
return newRunnerScaleSetListenerPod
|
return newRunnerScaleSetListenerPod, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
|
func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
|
||||||
@@ -323,7 +394,7 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
|
|||||||
}
|
}
|
||||||
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||||
|
|
||||||
newLabels := map[string]string{
|
labels := map[string]string{
|
||||||
labelKeyRunnerSpecHash: runnerSpecHash,
|
labelKeyRunnerSpecHash: runnerSpecHash,
|
||||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||||
LabelKeyKubernetesComponent: "runner-set",
|
LabelKeyKubernetesComponent: "runner-set",
|
||||||
@@ -332,7 +403,7 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
|
|||||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, newLabels); err != nil {
|
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil {
|
||||||
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
|
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -345,7 +416,7 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
|
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
|
||||||
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
|
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
|
||||||
Labels: newLabels,
|
Labels: labels,
|
||||||
Annotations: newAnnotations,
|
Annotations: newAnnotations,
|
||||||
},
|
},
|
||||||
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
||||||
@@ -545,14 +616,23 @@ func applyGitHubURLLabels(url string, labels map[string]string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(githubConfig.Enterprise) > 0 {
|
if len(githubConfig.Enterprise) > 0 {
|
||||||
labels[LabelKeyGitHubEnterprise] = githubConfig.Enterprise
|
labels[LabelKeyGitHubEnterprise] = trimLabelValue(githubConfig.Enterprise)
|
||||||
}
|
}
|
||||||
if len(githubConfig.Organization) > 0 {
|
if len(githubConfig.Organization) > 0 {
|
||||||
labels[LabelKeyGitHubOrganization] = githubConfig.Organization
|
labels[LabelKeyGitHubOrganization] = trimLabelValue(githubConfig.Organization)
|
||||||
}
|
}
|
||||||
if len(githubConfig.Repository) > 0 {
|
if len(githubConfig.Repository) > 0 {
|
||||||
labels[LabelKeyGitHubRepository] = githubConfig.Repository
|
labels[LabelKeyGitHubRepository] = trimLabelValue(githubConfig.Repository)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const trimLabelVauleSuffix = "-trim"
|
||||||
|
|
||||||
|
func trimLabelValue(val string) string {
|
||||||
|
if len(val) > 63 {
|
||||||
|
return val[:63-len(trimLabelVauleSuffix)] + trimLabelVauleSuffix
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ package actionsgithubcom
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||||
@@ -66,7 +68,8 @@ func TestLabelPropagation(t *testing.T) {
|
|||||||
Name: "test",
|
Name: "test",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
listenerPod := b.newScaleSetListenerPod(listener, listenerServiceAccount, listenerSecret)
|
listenerPod, err := b.newScaleSetListenerPod(listener, listenerServiceAccount, listenerSecret, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
assert.Equal(t, listenerPod.Labels, listener.Labels)
|
assert.Equal(t, listenerPod.Labels, listener.Labels)
|
||||||
|
|
||||||
ephemeralRunner := b.newEphemeralRunner(ephemeralRunnerSet)
|
ephemeralRunner := b.newEphemeralRunner(ephemeralRunnerSet)
|
||||||
@@ -91,3 +94,70 @@ func TestLabelPropagation(t *testing.T) {
|
|||||||
assert.Equal(t, ephemeralRunner.Labels[key], pod.Labels[key])
|
assert.Equal(t, ephemeralRunner.Labels[key], pod.Labels[key])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGitHubURLTrimLabelValues(t *testing.T) {
|
||||||
|
enterprise := strings.Repeat("a", 64)
|
||||||
|
organization := strings.Repeat("b", 64)
|
||||||
|
repository := strings.Repeat("c", 64)
|
||||||
|
|
||||||
|
autoscalingRunnerSet := v1alpha1.AutoscalingRunnerSet{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-scale-set",
|
||||||
|
Namespace: "test-ns",
|
||||||
|
Labels: map[string]string{
|
||||||
|
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||||
|
LabelKeyKubernetesVersion: "0.2.0",
|
||||||
|
},
|
||||||
|
Annotations: map[string]string{
|
||||||
|
runnerScaleSetIdAnnotationKey: "1",
|
||||||
|
AnnotationKeyGitHubRunnerGroupName: "test-group",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("org/repo", func(t *testing.T) {
|
||||||
|
autoscalingRunnerSet := autoscalingRunnerSet.DeepCopy()
|
||||||
|
autoscalingRunnerSet.Spec = v1alpha1.AutoscalingRunnerSetSpec{
|
||||||
|
GitHubConfigUrl: fmt.Sprintf("https://github.com/%s/%s", organization, repository),
|
||||||
|
}
|
||||||
|
|
||||||
|
var b resourceBuilder
|
||||||
|
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 0)
|
||||||
|
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization], 63)
|
||||||
|
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubRepository], 63)
|
||||||
|
assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization], trimLabelVauleSuffix))
|
||||||
|
assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubRepository], trimLabelVauleSuffix))
|
||||||
|
|
||||||
|
listener, err := b.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Len(t, listener.Labels[LabelKeyGitHubEnterprise], 0)
|
||||||
|
assert.Len(t, listener.Labels[LabelKeyGitHubOrganization], 63)
|
||||||
|
assert.Len(t, listener.Labels[LabelKeyGitHubRepository], 63)
|
||||||
|
assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization], trimLabelVauleSuffix))
|
||||||
|
assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubRepository], trimLabelVauleSuffix))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("enterprise", func(t *testing.T) {
|
||||||
|
autoscalingRunnerSet := autoscalingRunnerSet.DeepCopy()
|
||||||
|
autoscalingRunnerSet.Spec = v1alpha1.AutoscalingRunnerSetSpec{
|
||||||
|
GitHubConfigUrl: fmt.Sprintf("https://github.com/enterprises/%s", enterprise),
|
||||||
|
}
|
||||||
|
|
||||||
|
var b resourceBuilder
|
||||||
|
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 63)
|
||||||
|
assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], trimLabelVauleSuffix))
|
||||||
|
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization], 0)
|
||||||
|
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubRepository], 0)
|
||||||
|
|
||||||
|
listener, err := b.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Len(t, listener.Labels[LabelKeyGitHubEnterprise], 63)
|
||||||
|
assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], trimLabelVauleSuffix))
|
||||||
|
assert.Len(t, listener.Labels[LabelKeyGitHubOrganization], 0)
|
||||||
|
assert.Len(t, listener.Labels[LabelKeyGitHubRepository], 0)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
"github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1"
|
"github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1"
|
||||||
prometheus_metrics "github.com/actions/actions-runner-controller/controllers/actions.summerwind.net/metrics"
|
prometheus_metrics "github.com/actions/actions-runner-controller/controllers/actions.summerwind.net/metrics"
|
||||||
arcgithub "github.com/actions/actions-runner-controller/github"
|
arcgithub "github.com/actions/actions-runner-controller/github"
|
||||||
"github.com/google/go-github/v47/github"
|
"github.com/google/go-github/v52/github"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
)
|
)
|
||||||
@@ -118,10 +118,10 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
|||||||
}
|
}
|
||||||
|
|
||||||
var total, inProgress, queued, completed, unknown int
|
var total, inProgress, queued, completed, unknown int
|
||||||
type callback func()
|
listWorkflowJobs := func(user string, repoName string, runID int64) {
|
||||||
listWorkflowJobs := func(user string, repoName string, runID int64, fallback_cb callback) {
|
|
||||||
if runID == 0 {
|
if runID == 0 {
|
||||||
fallback_cb()
|
// should not happen in reality
|
||||||
|
r.Log.Info("Detected run with no runID of 0, ignoring the case and not scaling.", "repo_name", repoName, "run_id", runID)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
opt := github.ListWorkflowJobsOptions{ListOptions: github.ListOptions{PerPage: 50}}
|
opt := github.ListWorkflowJobsOptions{ListOptions: github.ListOptions{PerPage: 50}}
|
||||||
@@ -139,7 +139,8 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
|||||||
opt.Page = resp.NextPage
|
opt.Page = resp.NextPage
|
||||||
}
|
}
|
||||||
if len(allJobs) == 0 {
|
if len(allJobs) == 0 {
|
||||||
fallback_cb()
|
// GitHub API can return run with empty job array - should be ignored
|
||||||
|
r.Log.Info("Detected run with no jobs, ignoring the case and not scaling.", "repo_name", repoName, "run_id", runID)
|
||||||
} else {
|
} else {
|
||||||
JOB:
|
JOB:
|
||||||
for _, job := range allJobs {
|
for _, job := range allJobs {
|
||||||
@@ -201,9 +202,9 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
|||||||
case "completed":
|
case "completed":
|
||||||
completed++
|
completed++
|
||||||
case "in_progress":
|
case "in_progress":
|
||||||
listWorkflowJobs(user, repoName, run.GetID(), func() { inProgress++ })
|
listWorkflowJobs(user, repoName, run.GetID())
|
||||||
case "queued":
|
case "queued":
|
||||||
listWorkflowJobs(user, repoName, run.GetID(), func() { queued++ })
|
listWorkflowJobs(user, repoName, run.GetID())
|
||||||
default:
|
default:
|
||||||
unknown++
|
unknown++
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -61,8 +61,9 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
want int
|
want int
|
||||||
err string
|
err string
|
||||||
}{
|
}{
|
||||||
|
// case_0
|
||||||
// Legacy functionality
|
// Legacy functionality
|
||||||
// 3 demanded, max at 3
|
// 0 demanded due to zero runID, min at 2
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
@@ -70,9 +71,10 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||||
want: 3,
|
want: 2,
|
||||||
},
|
},
|
||||||
// Explicitly speified the default `self-hosted` label which is ignored by the simulator,
|
// case_1
|
||||||
|
// Explicitly specified the default `self-hosted` label which is ignored by the simulator,
|
||||||
// as we assume that GitHub Actions automatically associates the `self-hosted` label to every self-hosted runner.
|
// as we assume that GitHub Actions automatically associates the `self-hosted` label to every self-hosted runner.
|
||||||
// 3 demanded, max at 3
|
// 3 demanded, max at 3
|
||||||
{
|
{
|
||||||
@@ -80,11 +82,17 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
labels: []string{"self-hosted"},
|
labels: []string{"self-hosted"},
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"id": 1, "status":"queued"}]}"`,
|
||||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}]}"`,
|
||||||
|
workflowJobs: map[int]string{
|
||||||
|
1: `{"jobs": [{"status": "queued", "labels":["self-hosted"]}]}`,
|
||||||
|
2: `{"jobs": [{"status": "in_progress", "labels":["self-hosted"]}]}`,
|
||||||
|
3: `{"jobs": [{"status": "in_progress", "labels":["self-hosted"]}]}`,
|
||||||
|
},
|
||||||
want: 3,
|
want: 3,
|
||||||
},
|
},
|
||||||
|
// case_2
|
||||||
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
@@ -97,6 +105,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
want: 3,
|
want: 3,
|
||||||
},
|
},
|
||||||
|
// case_3
|
||||||
// 3 demanded, max at 2
|
// 3 demanded, max at 2
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
@@ -107,6 +116,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||||
want: 2,
|
want: 2,
|
||||||
},
|
},
|
||||||
|
// case_4
|
||||||
// 2 demanded, min at 2
|
// 2 demanded, min at 2
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
@@ -117,6 +127,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
want: 2,
|
want: 2,
|
||||||
},
|
},
|
||||||
|
// case_5
|
||||||
// 1 demanded, min at 2
|
// 1 demanded, min at 2
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
@@ -127,6 +138,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
want: 2,
|
want: 2,
|
||||||
},
|
},
|
||||||
|
// case_6
|
||||||
// 1 demanded, min at 2
|
// 1 demanded, min at 2
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
@@ -137,6 +149,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
want: 2,
|
want: 2,
|
||||||
},
|
},
|
||||||
|
// case_7
|
||||||
// 1 demanded, min at 1
|
// 1 demanded, min at 1
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
@@ -147,6 +160,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
want: 1,
|
want: 1,
|
||||||
},
|
},
|
||||||
|
// case_8
|
||||||
// 1 demanded, min at 1
|
// 1 demanded, min at 1
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
@@ -157,6 +171,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
want: 1,
|
want: 1,
|
||||||
},
|
},
|
||||||
|
// case_9
|
||||||
// fixed at 3
|
// fixed at 3
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
@@ -166,9 +181,36 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||||
want: 3,
|
want: 1,
|
||||||
|
},
|
||||||
|
// Case for empty GitHub Actions reponse - should not trigger scale up
|
||||||
|
{
|
||||||
|
description: "GitHub Actions Jobs Array is empty - no scale up",
|
||||||
|
repo: "test/valid",
|
||||||
|
min: intPtr(0),
|
||||||
|
max: intPtr(3),
|
||||||
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowJobs: map[int]string{
|
||||||
|
1: `{"jobs": []}`,
|
||||||
|
},
|
||||||
|
want: 0,
|
||||||
|
},
|
||||||
|
// Case for hosted GitHub Actions run
|
||||||
|
{
|
||||||
|
description: "Hosted GitHub Actions run - no scale up",
|
||||||
|
repo: "test/valid",
|
||||||
|
min: intPtr(0),
|
||||||
|
max: intPtr(3),
|
||||||
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"id": 1, "status":"queued"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"id": 1, "status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowJobs: map[int]string{
|
||||||
|
1: `{"jobs": [{"status":"queued"}]}`,
|
||||||
|
},
|
||||||
|
want: 0,
|
||||||
},
|
},
|
||||||
|
|
||||||
{
|
{
|
||||||
description: "Job-level autoscaling with no explicit runner label (runners have implicit self-hosted, requested self-hosted, 5 jobs from 3 workflows)",
|
description: "Job-level autoscaling with no explicit runner label (runners have implicit self-hosted, requested self-hosted, 5 jobs from 3 workflows)",
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
@@ -422,7 +464,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
want int
|
want int
|
||||||
err string
|
err string
|
||||||
}{
|
}{
|
||||||
// 3 demanded, max at 3
|
// case_0
|
||||||
|
// 0 demanded due to zero runID, min at 2
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
repos: []string{"valid"},
|
repos: []string{"valid"},
|
||||||
@@ -431,8 +474,9 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||||
want: 3,
|
want: 2,
|
||||||
},
|
},
|
||||||
|
// case_1
|
||||||
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
@@ -446,6 +490,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
want: 3,
|
want: 3,
|
||||||
},
|
},
|
||||||
|
// case_2
|
||||||
// 3 demanded, max at 2
|
// 3 demanded, max at 2
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
@@ -457,6 +502,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||||
want: 2,
|
want: 2,
|
||||||
},
|
},
|
||||||
|
// case_3
|
||||||
// 2 demanded, min at 2
|
// 2 demanded, min at 2
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
@@ -468,6 +514,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
want: 2,
|
want: 2,
|
||||||
},
|
},
|
||||||
|
// case_4
|
||||||
// 1 demanded, min at 2
|
// 1 demanded, min at 2
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
@@ -479,6 +526,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
want: 2,
|
want: 2,
|
||||||
},
|
},
|
||||||
|
// case_5
|
||||||
// 1 demanded, min at 2
|
// 1 demanded, min at 2
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
@@ -512,6 +560,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
want: 1,
|
want: 1,
|
||||||
},
|
},
|
||||||
|
// case_6
|
||||||
// fixed at 3
|
// fixed at 3
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
@@ -522,8 +571,9 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`,
|
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`,
|
||||||
want: 3,
|
want: 1,
|
||||||
},
|
},
|
||||||
|
// case_7
|
||||||
// org runner, fixed at 3
|
// org runner, fixed at 3
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
@@ -534,8 +584,9 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`,
|
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`,
|
||||||
want: 3,
|
want: 1,
|
||||||
},
|
},
|
||||||
|
// case_8
|
||||||
// org runner, 1 demanded, min at 1, no repos
|
// org runner, 1 demanded, min at 1, no repos
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
|
|||||||
@@ -44,8 +44,8 @@ type scaleOperation struct {
|
|||||||
|
|
||||||
// Add the scale target to the unbounded queue, blocking until the target is successfully added to the queue.
|
// Add the scale target to the unbounded queue, blocking until the target is successfully added to the queue.
|
||||||
// All the targets in the queue are dequeued every 3 seconds, grouped by the HRA, and applied.
|
// All the targets in the queue are dequeued every 3 seconds, grouped by the HRA, and applied.
|
||||||
// In a happy path, batchScaler update each HRA only once, even though the HRA had two or more associated webhook events in the 3 seconds interval,
|
// In a happy path, batchScaler updates each HRA only once, even though the HRA had two or more associated webhook events in the 3 seconds interval,
|
||||||
// which results in less K8s API calls and less HRA update conflicts in case your ARC installation receives a lot of webhook events
|
// which results in fewer K8s API calls and fewer HRA update conflicts in case your ARC installation receives a lot of webhook events
|
||||||
func (s *batchScaler) Add(st *ScaleTarget) {
|
func (s *batchScaler) Add(st *ScaleTarget) {
|
||||||
if st == nil {
|
if st == nil {
|
||||||
return
|
return
|
||||||
@@ -142,87 +142,130 @@ func (s *batchScaler) batchScale(ctx context.Context, batch batchScaleOperation)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
copy, err := s.planBatchScale(ctx, batch, &hra, now)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.Client.Patch(ctx, copy, client.MergeFrom(&hra)); err != nil {
|
||||||
|
return fmt.Errorf("patching horizontalrunnerautoscaler to add capacity reservation: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *batchScaler) planBatchScale(ctx context.Context, batch batchScaleOperation, hra *v1alpha1.HorizontalRunnerAutoscaler, now time.Time) (*v1alpha1.HorizontalRunnerAutoscaler, error) {
|
||||||
copy := hra.DeepCopy()
|
copy := hra.DeepCopy()
|
||||||
|
|
||||||
|
if hra.Spec.MaxReplicas != nil && len(copy.Spec.CapacityReservations) > *copy.Spec.MaxReplicas {
|
||||||
|
// We have more reservations than MaxReplicas, meaning that we previously
|
||||||
|
// could not scale up to meet a capacity demand because we had hit MaxReplicas.
|
||||||
|
// Therefore, there are reservations that are starved for capacity. We extend the
|
||||||
|
// expiration time on these starved reservations because the "duration" is meant
|
||||||
|
// to apply to reservations that have launched replicas, not replicas in the backlog.
|
||||||
|
// Of course, if MaxReplicas is nil, then there is no max to hit, and we do not need this adjustment.
|
||||||
|
// See https://github.com/actions/actions-runner-controller/issues/2254 for more context.
|
||||||
|
|
||||||
|
// Extend the expiration time of all the reservations not yet assigned to replicas.
|
||||||
|
//
|
||||||
|
// Note that we assume that the two scenarios equivalent here.
|
||||||
|
// The first case is where the number of reservations become greater than MaxReplicas.
|
||||||
|
// The second case is where MaxReplicas become greater than the number of reservations equivalent.
|
||||||
|
// Presuming the HRA.spec.scaleTriggers[].duration as "the duration until the reservation expires after a corresponding runner was deployed",
|
||||||
|
// it's correct.
|
||||||
|
//
|
||||||
|
// In other words, we settle on a capacity reservation's ExpirationTime only after the corresponding runner is "about to be" deployed.
|
||||||
|
// It's "about to be deployed" not "deployed" because we have no way to correlate a capacity reservation and the runner;
|
||||||
|
// the best we can do here is to simulate the desired behavior by reading MaxReplicas and assuming it will be equal to the number of active runners soon.
|
||||||
|
//
|
||||||
|
// Perhaps we could use RunnerDeployment.Status.Replicas or RunnerSet.Status.Replicas instead of the MaxReplicas as a better source of "the number of active runners".
|
||||||
|
// However, note that the status is not guaranteed to be up-to-date.
|
||||||
|
// It might not be that easy to decide which is better to use.
|
||||||
|
for i := *hra.Spec.MaxReplicas; i < len(copy.Spec.CapacityReservations); i++ {
|
||||||
|
// Let's say maxReplicas=3 and the workflow job of status=completed result in deleting the first capacity reservation
|
||||||
|
// copy.Spec.CapacityReservations[i] where i=0.
|
||||||
|
// We are interested in at least four reservations and runners:
|
||||||
|
// i=0 - already included in the current desired replicas, but may be about to be deleted
|
||||||
|
// i=1-2 - already included in the current desired replicas
|
||||||
|
// i=3 - not yet included in the current desired replicas, might have been expired while waiting in the queue
|
||||||
|
//
|
||||||
|
// i=3 is especially important here- If we didn't reset the expiration time of this reservation,
|
||||||
|
// it might expire before it is assigned to a runner, due to the delay between the time the
|
||||||
|
// expiration timer starts and the time a runner becomes available.
|
||||||
|
//
|
||||||
|
// Why is there such delay? Because ARC implements the scale duration and expiration as such.
|
||||||
|
// The expiration timer starts when the reservation is created, while the runner is created only after
|
||||||
|
// the corresponding reservation fits within maxReplicas.
|
||||||
|
//
|
||||||
|
// We address that, by resetting the expiration time for fourth(i=3 in the above example)
|
||||||
|
// and subsequent reservations whenever a batch is run (which is when expired reservations get deleted).
|
||||||
|
|
||||||
|
// There is no guarantee that all the reservations have the same duration, and even if there were,
|
||||||
|
// at this point we have lost the reference to the duration that was intended.
|
||||||
|
// However, we can compute the intended duration from the existing interval.
|
||||||
|
//
|
||||||
|
// In other words, updating HRA.spec.scaleTriggers[].duration does not result in delaying capacity reservations expiration any longer
|
||||||
|
// than the "intended" duration, which is the duration of the trigger when the reservation was created.
|
||||||
|
duration := copy.Spec.CapacityReservations[i].ExpirationTime.Time.Sub(copy.Spec.CapacityReservations[i].EffectiveTime.Time)
|
||||||
|
copy.Spec.CapacityReservations[i].EffectiveTime = metav1.Time{Time: now}
|
||||||
|
copy.Spec.CapacityReservations[i].ExpirationTime = metav1.Time{Time: now.Add(duration)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we can filter out any expired reservations from consideration.
|
||||||
|
// This could leave us with 0 reservations left.
|
||||||
copy.Spec.CapacityReservations = getValidCapacityReservations(copy)
|
copy.Spec.CapacityReservations = getValidCapacityReservations(copy)
|
||||||
|
before := len(hra.Spec.CapacityReservations)
|
||||||
|
expired := before - len(copy.Spec.CapacityReservations)
|
||||||
|
|
||||||
var added, completed int
|
var added, completed int
|
||||||
|
|
||||||
for _, scale := range batch.scaleOps {
|
for _, scale := range batch.scaleOps {
|
||||||
amount := 1
|
amount := scale.trigger.Amount
|
||||||
|
|
||||||
if scale.trigger.Amount != 0 {
|
|
||||||
amount = scale.trigger.Amount
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// We do not track if a webhook-based scale-down event matches an expired capacity reservation
|
||||||
|
// or a job for which the scale-up event was never received. This means that scale-down
|
||||||
|
// events could drive capacity reservations into the negative numbers if we let it.
|
||||||
|
// We ensure capacity never falls below zero, but that also means that the
|
||||||
|
// final number of capacity reservations depends on the order in which events come in.
|
||||||
|
// If capacity is at zero and we get a scale-down followed by a scale-up,
|
||||||
|
// the scale-down will be ignored and we will end up with a desired capacity of 1.
|
||||||
|
// However, if we get the scale-up first, the scale-down will drive desired capacity back to zero.
|
||||||
|
// This could be fixed by matching events' `workflow_job.run_id` with capacity reservations,
|
||||||
|
// but that would be a lot of work. So for now we allow for some slop, and hope that
|
||||||
|
// GitHub provides a better autoscaling solution soon.
|
||||||
|
if amount > 0 {
|
||||||
scale.log.V(2).Info("Adding capacity reservation", "amount", amount)
|
scale.log.V(2).Info("Adding capacity reservation", "amount", amount)
|
||||||
|
|
||||||
now := time.Now()
|
// Parts of this function require that Spec.CapacityReservations.Replicas always equals 1.
|
||||||
if amount > 0 {
|
// Enforce that rule no matter what the `amount` value is
|
||||||
|
for i := 0; i < amount; i++ {
|
||||||
copy.Spec.CapacityReservations = append(copy.Spec.CapacityReservations, v1alpha1.CapacityReservation{
|
copy.Spec.CapacityReservations = append(copy.Spec.CapacityReservations, v1alpha1.CapacityReservation{
|
||||||
EffectiveTime: metav1.Time{Time: now},
|
EffectiveTime: metav1.Time{Time: now},
|
||||||
ExpirationTime: metav1.Time{Time: now.Add(scale.trigger.Duration.Duration)},
|
ExpirationTime: metav1.Time{Time: now.Add(scale.trigger.Duration.Duration)},
|
||||||
Replicas: amount,
|
Replicas: 1,
|
||||||
})
|
})
|
||||||
|
}
|
||||||
added += amount
|
added += amount
|
||||||
} else if amount < 0 {
|
} else if amount < 0 {
|
||||||
var reservations []v1alpha1.CapacityReservation
|
scale.log.V(2).Info("Removing capacity reservation", "amount", -amount)
|
||||||
|
|
||||||
var (
|
// Remove the requested number of reservations unless there are not that many left
|
||||||
found bool
|
if len(copy.Spec.CapacityReservations) > -amount {
|
||||||
foundIdx int
|
copy.Spec.CapacityReservations = copy.Spec.CapacityReservations[-amount:]
|
||||||
)
|
|
||||||
|
|
||||||
for i, r := range copy.Spec.CapacityReservations {
|
|
||||||
r := r
|
|
||||||
if !found && r.Replicas+amount == 0 {
|
|
||||||
found = true
|
|
||||||
foundIdx = i
|
|
||||||
} else {
|
} else {
|
||||||
// Note that we nil-check max replicas because this "fix" is needed only when there is the upper limit of runners.
|
copy.Spec.CapacityReservations = nil
|
||||||
// In other words, you don't need to reset effective time and expiration time when there is no max replicas.
|
|
||||||
// That's because the desired replicas would already contain the reservation since it's creation.
|
|
||||||
if found && copy.Spec.MaxReplicas != nil && i > foundIdx+*copy.Spec.MaxReplicas {
|
|
||||||
// Update newer CapacityReservations' time to now to trigger reconcile
|
|
||||||
// Without this, we might stuck in minReplicas unnecessarily long.
|
|
||||||
// That is, we might not scale up after an ephemeral runner has been deleted
|
|
||||||
// until a new scale up, all runners finish, or after DefaultRunnerPodRecreationDelayAfterWebhookScale
|
|
||||||
// See https://github.com/actions/actions-runner-controller/issues/2254 for more context.
|
|
||||||
r.EffectiveTime = metav1.Time{Time: now}
|
|
||||||
|
|
||||||
// We also reset the scale trigger expiration time, so that you don't need to tweak
|
|
||||||
// scale trigger duratoin depending on maxReplicas.
|
|
||||||
// A detailed explanation follows.
|
|
||||||
//
|
|
||||||
// Let's say maxReplicas=3 and the workflow job of status=canceled result in deleting the first capacity reservation hence i=0.
|
|
||||||
// We are interested in at least four reservations and runners:
|
|
||||||
// i=0 - already included in the current desired replicas, but just got deleted
|
|
||||||
// i=1-2 - already included in the current desired replicas
|
|
||||||
// i=3 - not yet included in the current desired replicas, might have been expired while waiting in the queue
|
|
||||||
//
|
|
||||||
// i=3 is especially important here- If we didn't reset the expiration time of 3rd reservation,
|
|
||||||
// it might expire before a corresponding runner is created, due to the delay between the expiration timer starts and the runner is created.
|
|
||||||
//
|
|
||||||
// Why is there such delay? Because ARC implements the scale duration and expiration as such...
|
|
||||||
// The expiration timer starts when the reservation is created, while the runner is created only after the corresponding reservation fits within maxReplicas.
|
|
||||||
//
|
|
||||||
// We address that, by resetting the expiration time for fourth(i=3 in the above example) and subsequent reservations when the first reservation gets cancelled.
|
|
||||||
r.ExpirationTime = metav1.Time{Time: now.Add(scale.trigger.Duration.Duration)}
|
|
||||||
}
|
}
|
||||||
|
// This "completed" represents the number of completed and therefore removed runners in this batch,
|
||||||
reservations = append(reservations, r)
|
// which is logged later.
|
||||||
|
// As the amount is negative for a scale-down trigger, we make the "completed" amount positive by negating the amount.
|
||||||
|
// That way, the user can see the number of removed runners(like 3), rather than the delta (like -3) in the number of runners.
|
||||||
|
completed -= amount
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
copy.Spec.CapacityReservations = reservations
|
|
||||||
|
|
||||||
completed += amount
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
before := len(hra.Spec.CapacityReservations)
|
|
||||||
expired := before - len(copy.Spec.CapacityReservations)
|
|
||||||
after := len(copy.Spec.CapacityReservations)
|
after := len(copy.Spec.CapacityReservations)
|
||||||
|
|
||||||
s.Log.V(1).Info(
|
s.Log.V(1).Info(
|
||||||
@@ -234,9 +277,5 @@ func (s *batchScaler) batchScale(ctx context.Context, batch batchScaleOperation)
|
|||||||
"after", after,
|
"after", after,
|
||||||
)
|
)
|
||||||
|
|
||||||
if err := s.Client.Patch(ctx, copy, client.MergeFrom(&hra)); err != nil {
|
return copy, nil
|
||||||
return fmt.Errorf("patching horizontalrunnerautoscaler to add capacity reservation: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,166 @@
|
|||||||
|
package actionssummerwindnet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPlanBatchScale(t *testing.T) {
|
||||||
|
s := &batchScaler{Log: logr.Discard()}
|
||||||
|
|
||||||
|
var (
|
||||||
|
expiry = 10 * time.Second
|
||||||
|
interval = 3 * time.Second
|
||||||
|
|
||||||
|
t0 = time.Now()
|
||||||
|
t1 = t0.Add(interval)
|
||||||
|
t2 = t1.Add(interval)
|
||||||
|
)
|
||||||
|
|
||||||
|
check := func(t *testing.T, amount int, newExpiry time.Duration, wantReservations []v1alpha1.CapacityReservation) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var (
|
||||||
|
op = batchScaleOperation{
|
||||||
|
scaleOps: []scaleOperation{
|
||||||
|
{
|
||||||
|
log: logr.Discard(),
|
||||||
|
trigger: v1alpha1.ScaleUpTrigger{
|
||||||
|
Amount: amount,
|
||||||
|
Duration: metav1.Duration{Duration: newExpiry},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
hra = &v1alpha1.HorizontalRunnerAutoscaler{
|
||||||
|
Spec: v1alpha1.HorizontalRunnerAutoscalerSpec{
|
||||||
|
MaxReplicas: intPtr(1),
|
||||||
|
ScaleUpTriggers: []v1alpha1.ScaleUpTrigger{
|
||||||
|
{
|
||||||
|
Amount: 1,
|
||||||
|
Duration: metav1.Duration{Duration: newExpiry},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
CapacityReservations: []v1alpha1.CapacityReservation{
|
||||||
|
{
|
||||||
|
EffectiveTime: metav1.NewTime(t0),
|
||||||
|
ExpirationTime: metav1.NewTime(t0.Add(expiry)),
|
||||||
|
Replicas: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
EffectiveTime: metav1.NewTime(t1),
|
||||||
|
ExpirationTime: metav1.NewTime(t1.Add(expiry)),
|
||||||
|
Replicas: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
want := hra.DeepCopy()
|
||||||
|
|
||||||
|
want.Spec.CapacityReservations = wantReservations
|
||||||
|
|
||||||
|
got, err := s.planBatchScale(context.Background(), op, hra, t2)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, want, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("scale up", func(t *testing.T) {
|
||||||
|
check(t, 1, expiry, []v1alpha1.CapacityReservation{
|
||||||
|
{
|
||||||
|
// This is kept based on t0 because it falls within maxReplicas
|
||||||
|
// i.e. the corresponding runner has assumbed to be already deployed.
|
||||||
|
EffectiveTime: metav1.NewTime(t0),
|
||||||
|
ExpirationTime: metav1.NewTime(t0.Add(expiry)),
|
||||||
|
Replicas: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Updated from t1 to t2 due to this exceeded maxReplicas
|
||||||
|
EffectiveTime: metav1.NewTime(t2),
|
||||||
|
ExpirationTime: metav1.NewTime(t2.Add(expiry)),
|
||||||
|
Replicas: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// This is based on t2(=now) because it has been added just now.
|
||||||
|
EffectiveTime: metav1.NewTime(t2),
|
||||||
|
ExpirationTime: metav1.NewTime(t2.Add(expiry)),
|
||||||
|
Replicas: 1,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("scale up reuses previous scale trigger duration for extension", func(t *testing.T) {
|
||||||
|
newExpiry := expiry + time.Second
|
||||||
|
check(t, 1, newExpiry, []v1alpha1.CapacityReservation{
|
||||||
|
{
|
||||||
|
// This is kept based on t0 because it falls within maxReplicas
|
||||||
|
// i.e. the corresponding runner has assumbed to be already deployed.
|
||||||
|
EffectiveTime: metav1.NewTime(t0),
|
||||||
|
ExpirationTime: metav1.NewTime(t0.Add(expiry)),
|
||||||
|
Replicas: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Updated from t1 to t2 due to this exceeded maxReplicas
|
||||||
|
EffectiveTime: metav1.NewTime(t2),
|
||||||
|
ExpirationTime: metav1.NewTime(t2.Add(expiry)),
|
||||||
|
Replicas: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// This is based on t2(=now) because it has been added just now.
|
||||||
|
EffectiveTime: metav1.NewTime(t2),
|
||||||
|
ExpirationTime: metav1.NewTime(t2.Add(newExpiry)),
|
||||||
|
Replicas: 1,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("scale down", func(t *testing.T) {
|
||||||
|
check(t, -1, expiry, []v1alpha1.CapacityReservation{
|
||||||
|
{
|
||||||
|
// Updated from t1 to t2 due to this exceeded maxReplicas
|
||||||
|
EffectiveTime: metav1.NewTime(t2),
|
||||||
|
ExpirationTime: metav1.NewTime(t2.Add(expiry)),
|
||||||
|
Replicas: 1,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("scale down is not affected by new scale trigger duration", func(t *testing.T) {
|
||||||
|
check(t, -1, expiry+time.Second, []v1alpha1.CapacityReservation{
|
||||||
|
{
|
||||||
|
// Updated from t1 to t2 due to this exceeded maxReplicas
|
||||||
|
EffectiveTime: metav1.NewTime(t2),
|
||||||
|
ExpirationTime: metav1.NewTime(t2.Add(expiry)),
|
||||||
|
Replicas: 1,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// TODO: Keep refreshing the expiry date even when there are no other scale down/up triggers before the expiration
|
||||||
|
t.Run("extension", func(t *testing.T) {
|
||||||
|
check(t, 0, expiry, []v1alpha1.CapacityReservation{
|
||||||
|
{
|
||||||
|
// This is kept based on t0 because it falls within maxReplicas
|
||||||
|
// i.e. the corresponding runner has assumbed to be already deployed.
|
||||||
|
EffectiveTime: metav1.NewTime(t0),
|
||||||
|
ExpirationTime: metav1.NewTime(t0.Add(expiry)),
|
||||||
|
Replicas: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Updated from t1 to t2 due to this exceeded maxReplicas
|
||||||
|
EffectiveTime: metav1.NewTime(t2),
|
||||||
|
ExpirationTime: metav1.NewTime(t2.Add(expiry)),
|
||||||
|
Replicas: 1,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -30,7 +30,7 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
gogithub "github.com/google/go-github/v47/github"
|
gogithub "github.com/google/go-github/v52/github"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import (
|
|||||||
|
|
||||||
actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1"
|
actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/google/go-github/v47/github"
|
"github.com/google/go-github/v52/github"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
@@ -376,19 +376,24 @@ func TestGetRequest(t *testing.T) {
|
|||||||
|
|
||||||
func TestGetValidCapacityReservations(t *testing.T) {
|
func TestGetValidCapacityReservations(t *testing.T) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
duration, _ := time.ParseDuration("10m")
|
||||||
|
effectiveTime := now.Add(-duration)
|
||||||
|
|
||||||
hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{
|
hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{
|
||||||
Spec: actionsv1alpha1.HorizontalRunnerAutoscalerSpec{
|
Spec: actionsv1alpha1.HorizontalRunnerAutoscalerSpec{
|
||||||
CapacityReservations: []actionsv1alpha1.CapacityReservation{
|
CapacityReservations: []actionsv1alpha1.CapacityReservation{
|
||||||
{
|
{
|
||||||
|
EffectiveTime: metav1.Time{Time: effectiveTime.Add(-time.Second)},
|
||||||
ExpirationTime: metav1.Time{Time: now.Add(-time.Second)},
|
ExpirationTime: metav1.Time{Time: now.Add(-time.Second)},
|
||||||
Replicas: 1,
|
Replicas: 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
EffectiveTime: metav1.Time{Time: effectiveTime},
|
||||||
ExpirationTime: metav1.Time{Time: now},
|
ExpirationTime: metav1.Time{Time: now},
|
||||||
Replicas: 2,
|
Replicas: 2,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
EffectiveTime: metav1.Time{Time: effectiveTime.Add(time.Second)},
|
||||||
ExpirationTime: metav1.Time{Time: now.Add(time.Second)},
|
ExpirationTime: metav1.Time{Time: now.Add(time.Second)},
|
||||||
Replicas: 3,
|
Replicas: 3,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
github2 "github.com/actions/actions-runner-controller/github"
|
github2 "github.com/actions/actions-runner-controller/github"
|
||||||
"github.com/google/go-github/v47/github"
|
"github.com/google/go-github/v52/github"
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/github/fake"
|
"github.com/actions/actions-runner-controller/github/fake"
|
||||||
|
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -30,7 +31,6 @@ import (
|
|||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
|
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
@@ -607,11 +607,14 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
if runnerSpec.ContainerMode == "kubernetes" {
|
if runnerSpec.ContainerMode == "kubernetes" {
|
||||||
return pod, errors.New("volume mount \"work\" should be specified by workVolumeClaimTemplate in container mode kubernetes")
|
return pod, errors.New("volume mount \"work\" should be specified by workVolumeClaimTemplate in container mode kubernetes")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
podSpecIsPresent, index := workVolumeMountPresent(pod.Spec.Containers[0].VolumeMounts)
|
||||||
|
if podSpecIsPresent {
|
||||||
// remove work volume since it will be provided from runnerSpec.Volumes
|
// remove work volume since it will be provided from runnerSpec.Volumes
|
||||||
// if we don't remove it here we would get a duplicate key error, i.e. two volumes named work
|
// if we don't remove it here we would get a duplicate key error, i.e. two volumes named work
|
||||||
_, index := workVolumeMountPresent(pod.Spec.Containers[0].VolumeMounts)
|
|
||||||
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts[:index], pod.Spec.Containers[0].VolumeMounts[index+1:]...)
|
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts[:index], pod.Spec.Containers[0].VolumeMounts[index+1:]...)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, runnerSpec.VolumeMounts...)
|
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, runnerSpec.VolumeMounts...)
|
||||||
}
|
}
|
||||||
@@ -623,12 +626,14 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
if runnerSpec.ContainerMode == "kubernetes" {
|
if runnerSpec.ContainerMode == "kubernetes" {
|
||||||
return pod, errors.New("volume \"work\" should be specified by workVolumeClaimTemplate in container mode kubernetes")
|
return pod, errors.New("volume \"work\" should be specified by workVolumeClaimTemplate in container mode kubernetes")
|
||||||
}
|
}
|
||||||
_, index := workVolumePresent(pod.Spec.Volumes)
|
|
||||||
|
|
||||||
|
podSpecIsPresent, index := workVolumePresent(pod.Spec.Volumes)
|
||||||
|
if podSpecIsPresent {
|
||||||
// remove work volume since it will be provided from runnerSpec.Volumes
|
// remove work volume since it will be provided from runnerSpec.Volumes
|
||||||
// if we don't remove it here we would get a duplicate key error, i.e. two volumes named work
|
// if we don't remove it here we would get a duplicate key error, i.e. two volumes named work
|
||||||
pod.Spec.Volumes = append(pod.Spec.Volumes[:index], pod.Spec.Volumes[index+1:]...)
|
pod.Spec.Volumes = append(pod.Spec.Volumes[:index], pod.Spec.Volumes[index+1:]...)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pod.Spec.Volumes = append(pod.Spec.Volumes, runnerSpec.Volumes...)
|
pod.Spec.Volumes = append(pod.Spec.Volumes, runnerSpec.Volumes...)
|
||||||
}
|
}
|
||||||
@@ -810,6 +815,11 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
|||||||
dockerRegistryMirror = *runnerSpec.DockerRegistryMirror
|
dockerRegistryMirror = *runnerSpec.DockerRegistryMirror
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if runnerSpec.DockerVarRunVolumeSizeLimit == nil {
|
||||||
|
runnerSpec.DockerVarRunVolumeSizeLimit = resource.NewScaledQuantity(1, resource.Mega)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
// Be aware some of the environment variables are used
|
// Be aware some of the environment variables are used
|
||||||
// in the runner entrypoint script
|
// in the runner entrypoint script
|
||||||
env := []corev1.EnvVar{
|
env := []corev1.EnvVar{
|
||||||
@@ -1080,7 +1090,7 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
|||||||
VolumeSource: corev1.VolumeSource{
|
VolumeSource: corev1.VolumeSource{
|
||||||
EmptyDir: &corev1.EmptyDirVolumeSource{
|
EmptyDir: &corev1.EmptyDirVolumeSource{
|
||||||
Medium: corev1.StorageMediumMemory,
|
Medium: corev1.StorageMediumMemory,
|
||||||
SizeLimit: resource.NewScaledQuantity(1, resource.Mega),
|
SizeLimit: runnerSpec.DockerVarRunVolumeSizeLimit,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
"github.com/actions/actions-runner-controller/github"
|
"github.com/actions/actions-runner-controller/github"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
gogithub "github.com/google/go-github/v47/github"
|
gogithub "github.com/google/go-github/v52/github"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ When a new [runner](https://github.com/actions/runner) version is released, new
|
|||||||
images need to be built in
|
images need to be built in
|
||||||
[actions-runner-controller/releases](https://github.com/actions-runner-controller/releases).
|
[actions-runner-controller/releases](https://github.com/actions-runner-controller/releases).
|
||||||
This is currently started by the
|
This is currently started by the
|
||||||
[release-runners](https://github.com/actions/actions-runner-controller/blob/master/.github/workflows/release-runners.yaml)
|
[release-runners](https://github.com/actions/actions-runner-controller/blob/master/.github/workflows/arc-release-runners.yaml)
|
||||||
workflow, although this only starts when the set of file containing the runner
|
workflow, although this only starts when the set of file containing the runner
|
||||||
version is updated (and this is currently done manually).
|
version is updated (and this is currently done manually).
|
||||||
|
|
||||||
@@ -19,7 +19,7 @@ version is updated (and this is currently done manually).
|
|||||||
We can have another workflow running on a cadence (hourly seems sensible) and checking for new runner
|
We can have another workflow running on a cadence (hourly seems sensible) and checking for new runner
|
||||||
releases, creating a PR updating `RUNNER_VERSION` in:
|
releases, creating a PR updating `RUNNER_VERSION` in:
|
||||||
|
|
||||||
- `.github/workflows/release-runners.yaml`
|
- `.github/workflows/arc-release-runners.yaml`
|
||||||
- `Makefile`
|
- `Makefile`
|
||||||
- `runner/Makefile`
|
- `runner/Makefile`
|
||||||
- `test/e2e/e2e_test.go`
|
- `test/e2e/e2e_test.go`
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ At the moment we have three workflows that validate Go code:
|
|||||||
- [Validate ARC](https://github.com/actions/actions-runner-controller/blob/01e9dd3/.github/workflows/validate-arc.yaml):
|
- [Validate ARC](https://github.com/actions/actions-runner-controller/blob/01e9dd3/.github/workflows/validate-arc.yaml):
|
||||||
this is a bit of a catch-all workflow, other than Go tests this also validates
|
this is a bit of a catch-all workflow, other than Go tests this also validates
|
||||||
Kubernetes manifests, runs `go generate`, `go fmt` and `go vet`
|
Kubernetes manifests, runs `go generate`, `go fmt` and `go vet`
|
||||||
- [Run CodeQL](https://github.com/actions/actions-runner-controller/blob/a095f0b66aad5fbc8aa8d7032f3299233e4c84d2/.github/workflows/run-codeql.yaml)
|
- [Run CodeQL](https://github.com/actions/actions-runner-controller/blob/master/.github/workflows/global-run-codeql.yaml)
|
||||||
|
|
||||||
### Proposal
|
### Proposal
|
||||||
|
|
||||||
|
|||||||
213
docs/adrs/2023-05-08-exposing-metrics.md
Normal file
213
docs/adrs/2023-05-08-exposing-metrics.md
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
# Exposing metrics
|
||||||
|
|
||||||
|
Date: 2023-05-08
|
||||||
|
|
||||||
|
**Status**: Proposed
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
Prometheus metrics are a common way to monitor the cluster. Providing metrics
|
||||||
|
can be a helpful way to monitor scale sets and the health of the ephemeral runners.
|
||||||
|
|
||||||
|
## Proposal
|
||||||
|
|
||||||
|
Two main components are driving the behavior of the scale set:
|
||||||
|
|
||||||
|
1. ARC controllers responsible for managing Kubernetes resources.
|
||||||
|
2. The `AutoscalingListener`, driver of the autoscaling solution responsible for
|
||||||
|
describing the desired state.
|
||||||
|
|
||||||
|
We can approach publishing those metrics in 3 different ways
|
||||||
|
|
||||||
|
### Option 1: Expose a metrics endpoint for the controller-manager and every instance of the listener
|
||||||
|
|
||||||
|
To expose metrics, we would need to create 3 additional resources:
|
||||||
|
|
||||||
|
1. `ServiceMonitor` - a resource used by Prometheus to match namespaces and
|
||||||
|
services from where it needs to gather metrics
|
||||||
|
2. `Service` for the `gha-runner-scale-set-controller` - service that will
|
||||||
|
target ARC controller `Deployment`
|
||||||
|
3. `Service` for each `gha-runner-scale-set` listener - service that will target
|
||||||
|
a single listener pod for each `AutoscalingRunnerSet`
|
||||||
|
|
||||||
|
#### Pros
|
||||||
|
|
||||||
|
- Easy to control which scale set exposes metrics and which does not.
|
||||||
|
- Easy to implement using helm charts in case they are enabled per chart
|
||||||
|
installation.
|
||||||
|
|
||||||
|
#### Cons
|
||||||
|
|
||||||
|
- With a cluster running many scale sets, we are going to create a lot of
|
||||||
|
resources.
|
||||||
|
- In case metrics are enabled on the controller manager level, and they should
|
||||||
|
be applied across all `AutoscalingRunnerSets`, it is difficult to inherit this
|
||||||
|
configuration by applying helm charts.
|
||||||
|
|
||||||
|
### Option 2: Create a single metrics aggregator service
|
||||||
|
|
||||||
|
To create an aggregator service, we can create a simple web application
|
||||||
|
responsible for publishing and gathering metrics. All listeners would be
|
||||||
|
responsible to communicate the metrics on each message, and controllers are
|
||||||
|
responsible to communicate the metrics on each reconciliation.
|
||||||
|
|
||||||
|
The application can be executed as a single pod, or as a side container next to
|
||||||
|
the manager.
|
||||||
|
|
||||||
|
#### Running the aggregator as a container in the controller-manager pod
|
||||||
|
|
||||||
|
**Pros:**
|
||||||
|
- It exists side by side and is following the life cycle of the controller
|
||||||
|
manager
|
||||||
|
- We don't need to introduce another controller managing the state of the pod
|
||||||
|
|
||||||
|
**Cons**
|
||||||
|
|
||||||
|
- Crashes of the aggregator can influence the controller manager execution
|
||||||
|
- The controller manager pod needs more resources to run
|
||||||
|
|
||||||
|
#### Running the aggregator in a separate pod
|
||||||
|
|
||||||
|
**Pros**
|
||||||
|
|
||||||
|
- Does not influence the controller manager pod
|
||||||
|
- The life cycle of the metric can be controlled by the controller manager (by
|
||||||
|
implementing another controller)
|
||||||
|
|
||||||
|
**Cons**
|
||||||
|
|
||||||
|
- We need to implement the controller that can spin up the aggregator in case of
|
||||||
|
the crash.
|
||||||
|
- If we choose not to implement the controller, the resource like `Deployment`
|
||||||
|
can be used to manage the aggregator, but we lose control over its life cycle.
|
||||||
|
|
||||||
|
#### Metrics webserver requirements
|
||||||
|
|
||||||
|
1. Create a web server with a single `/metrics` endpoint. The endpoint will have
|
||||||
|
`POST` and `GET` methods registered. The `GET` is used by Prometheus to
|
||||||
|
fetch the metrics, while the `POST` is going to be used by controllers and
|
||||||
|
listeners to publish their metrics.
|
||||||
|
2. `ServiceMonitor` - to target the metrics aggregator service
|
||||||
|
3. `Service` sitting in front of the web server.
|
||||||
|
|
||||||
|
**Pros**
|
||||||
|
|
||||||
|
- This implementation requires a few additional resources to be created
|
||||||
|
in a cluster.
|
||||||
|
- Web server is easy to implement and easy to document - all metrics are aggregated in a
|
||||||
|
single package, and the web server only needs to apply them to its state on
|
||||||
|
`POST`. The `GET` handler is simple.
|
||||||
|
- We can avoid Pushgateway from Prometheus.
|
||||||
|
|
||||||
|
**Cons**
|
||||||
|
|
||||||
|
- Another image that we need to publish on release.
|
||||||
|
- Change in metric configuration (on manager update) would require re-creation
|
||||||
|
of all listeners. This is not a big problem but is something to point out.
|
||||||
|
- Managing requests/limits can be tricky.
|
||||||
|
|
||||||
|
### Option 3: Use a Prometheus Pushgateway
|
||||||
|
|
||||||
|
#### Pros
|
||||||
|
|
||||||
|
- Using a supported way of pushing the metrics.
|
||||||
|
- Easy to implement using their library.
|
||||||
|
|
||||||
|
#### Cons
|
||||||
|
|
||||||
|
- In the Prometheus docs, they specify that: "Usually, the only valid use case
|
||||||
|
for Pushgateway is for capturing the outcome of a service-level batch job".
|
||||||
|
The listener does not really fit this criteria.
|
||||||
|
- Pushgateway is a single point of failure and potential bottleneck.
|
||||||
|
- You lose Prometheus's automatic instance health monitoring via the up metric (generated on every scrape).
|
||||||
|
- The Pushgateway never forgets series pushed to it and will expose them to Prometheus forever unless those series are manually deleted via the Pushgateway's API.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
Since there are many ways in which you can collect metrics, we have decided not
|
||||||
|
to apply `prometheus-operator` resources nor `Service`.
|
||||||
|
|
||||||
|
The responsibility of the controller and the autoscaling listener is
|
||||||
|
only to expose metrics. It is up to the user to decide how to collect them.
|
||||||
|
|
||||||
|
When installing the ARC, the configuration for both the controller manager
|
||||||
|
and autoscaling listeners' metric servers is established.
|
||||||
|
|
||||||
|
### Controller metrics
|
||||||
|
|
||||||
|
By default, metrics server is listening on `0.0.0.0:8080`.
|
||||||
|
You can control the port of the metrics server using the `--metrics-addr` flag.
|
||||||
|
|
||||||
|
Metrics can be collected from `/metrics` endpoint
|
||||||
|
|
||||||
|
If the value of `--metrics-addr` is an empty string, metrics server won't be
|
||||||
|
started.
|
||||||
|
|
||||||
|
### Autoscaling listeners
|
||||||
|
|
||||||
|
By default, metrics server is listening on `0.0.0.0:8080`.
|
||||||
|
The endpoint used to expose metrics is `/metrics`.
|
||||||
|
|
||||||
|
You can control both the address and the endpoint using `--listener-metrics-addr` and `--listener-metrics-endpoint` flags.
|
||||||
|
|
||||||
|
If the value of `--listener-metrics-addr` is an empty string, metrics server won't be
|
||||||
|
started.
|
||||||
|
|
||||||
|
### Metrics exposed by the controller
|
||||||
|
|
||||||
|
To get a better understanding of health and workings of the cluster
|
||||||
|
resources, we need to expose the following metrics:
|
||||||
|
|
||||||
|
- `pending_ephemeral_runners` - Number of ephemeral runners in a pending state.
|
||||||
|
This information can show the latency between creating an `EphemeralRunner`
|
||||||
|
resource, and having an ephemeral runner pod started and ready to receive a
|
||||||
|
job.
|
||||||
|
- `running_ephemeral_runners` - Number of ephemeral runners currently running.
|
||||||
|
This information is helpful to see how many ephemeral runner pods are running
|
||||||
|
at any given time.
|
||||||
|
- `failed_ephemeral_runners` - Number of ephemeral runners in a `Failed` state.
|
||||||
|
This information is helpful to catch the faulty image, or some underlying
|
||||||
|
problem. When the ephemeral runner controller is not able to start the
|
||||||
|
ephemeral runner pod after multiple retries, it will set the state of the
|
||||||
|
`EphemeralRunner` to failed. Since the controller can not recover from this
|
||||||
|
state, it can be useful to set Prometheus alerts to catch this issue quickly.
|
||||||
|
|
||||||
|
### Metrics exposed by the `AutoscalingListener`
|
||||||
|
|
||||||
|
Since the listener is responsible for communicating the state with the actions
|
||||||
|
service, it can expose actions service related data through metrics. In
|
||||||
|
particular:
|
||||||
|
|
||||||
|
- `available_jobs` - Number of jobs with `runs-on` matching the runner scale set name. Jobs are not yet assigned but are acquired by the runner scale set.
|
||||||
|
- `acquired_jobs`- Number of jobs acquired by the scale set.
|
||||||
|
- `assigned_jobs` - Number of jobs assigned to this scale set.
|
||||||
|
- `running_jobs` - Number of jobs running (or about to be run).
|
||||||
|
- `registered_runners` - Number of registered runners.
|
||||||
|
- `busy_runners` - Number of registered runners running a job.
|
||||||
|
- `min_runners` - Number of runners desired by the scale set.
|
||||||
|
- `max_runners` - Number of runners desired by the scale set.
|
||||||
|
- `desired_runners` - Number of runners desired by the scale set.
|
||||||
|
- `idle_runners` - Number of registered runners not running a job.
|
||||||
|
- `available_jobs_total` - Total number of jobs available for the scale set (runs-on matches and scale set passes all the runner group permission checks).
|
||||||
|
- `acquired_jobs_total` - Total number of jobs acquired by the scale set.
|
||||||
|
- `assigned_jobs_total` - Total number of jobs assigned to the scale set.
|
||||||
|
- `started_jobs_total` - Total number of jobs started.
|
||||||
|
- `completed_jobs_total` - Total number of jobs completed.
|
||||||
|
- `job_queue_duration_seconds` - Time spent waiting for workflow jobs to get assigned to the scale set after queueing (in seconds).
|
||||||
|
- `job_startup_duration_seconds` - Time spent waiting for a workflow job to get started on the runner owned by the scale set (in seconds).
|
||||||
|
- `job_execution_duration_seconds` - Time spent executing workflow jobs by the scale set (in seconds).
|
||||||
|
|
||||||
|
### Metric names
|
||||||
|
|
||||||
|
Listener metrics belong to the `github_runner_scale_set` subsystem, so the names
|
||||||
|
are going to have the `github_runner_scale_set_` prefix.
|
||||||
|
|
||||||
|
Controller metrics belong to the `github_runner_scale_set_controller` subsystem,
|
||||||
|
so the names are going to have `github_runner_scale_set_controller` prefix.
|
||||||
|
|
||||||
|
## Consequences
|
||||||
|
|
||||||
|
Users can define alerts, monitor the behavior of both the actions-based metrics
|
||||||
|
(gathered from the listener) and the Kubernetes resource-based metrics
|
||||||
|
(gathered from the controller manager).
|
||||||
|
|
||||||
54
docs/adrs/2023-07-18-customize-listener-pod.md
Normal file
54
docs/adrs/2023-07-18-customize-listener-pod.md
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
# Customize listener pod
|
||||||
|
|
||||||
|
**Status**: Proposed
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
The Autoscaling listener is a critical component of the autoscaling solution, as it monitors the workload and communicates with the ephemeral runner set to adjust the number of runners as needed.
|
||||||
|
|
||||||
|
The problem can arise when cluster policies are configured to disallow pods with (or without) certain fields. Since the Autoscaling listener pod is an internal detail that is not currently customizable, it can be a blocker for users with Kubernetes clusters that enforce such policies.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
Expose field on the `AutoscalingRunnerSetSpec` resource called `ListenerTemplate` of type `PodTemplateSpec`.
|
||||||
|
|
||||||
|
Expose field on the `AutoscalingListenerSpec` resource called `Template` of type `PodTemplateSpec`.
|
||||||
|
|
||||||
|
The `AutoscalingRunnerSetController` is responsible for creating the `AutoscalingListener` with the `ListenerTemplate`.
|
||||||
|
|
||||||
|
The `AutoscalingListenerController` then creates the listener pod based on the default spec, and the customized spec.
|
||||||
|
|
||||||
|
List of fields that are going to be ignored by the merge:
|
||||||
|
|
||||||
|
- `spec.serviceAccountName`: Created by the AutoscalingListener.
|
||||||
|
- reserved `metadata.labels`: Labels that collide with reserved labels used by the system are ignored.
|
||||||
|
- reserved `spec.containers[0].env`: Environment variables used by the listener application
|
||||||
|
- `metadata.name`: Name of the listener pod
|
||||||
|
- `metadata.namespace`: Namespace of the listener pod
|
||||||
|
|
||||||
|
The change is extending the gha-runner-scale-set template. We will extend `values.yaml` file to add `listenerTemplate` object that is optional.
|
||||||
|
|
||||||
|
If not provided, the listener will be created the same way it was created before. Otherwise, the `listenerTemplate.metadata` and the `listenerTemplate.spec` are going to be merged with the default listener specification.
|
||||||
|
|
||||||
|
The way the merge will work is:
|
||||||
|
|
||||||
|
1. Create a default spec used for the listener
|
||||||
|
2. All non-reserved fields are going to be applied from the provided `listenerTemplate` if they are not empty. If empty, the default configuration is used.
|
||||||
|
3. For the container:
|
||||||
|
1. If the container name is "listener", values specified for that container are going to be merged with the default listener container spec. The name "listener" serves just as an indicator that the container spec should be merged with the listener container. Name will be overwritten by the controller. All fields are optional, and non-null fields are merged as described above.
|
||||||
|
2. If the container name is **not** "listener", the spec provided for that container will be appended to the `pod.spec.containers` without any modifications. Fields that must be specified are the required fields for the kubernetes container spec.
|
||||||
|
|
||||||
|
### Pros:
|
||||||
|
|
||||||
|
- Env `CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY` can be removed as a global configuration for building Autoscaling listener resources
|
||||||
|
- Ability to customize securityContext, requests, limits, and other fields.
|
||||||
|
- Avoid re-creating CRDs whenever a new field requirement occurs. Fields that are not reserved by the controller are applied if specified.
|
||||||
|
|
||||||
|
### Cons:
|
||||||
|
|
||||||
|
- Keep the documentation updated when new reserved fields are introduced.
|
||||||
|
- Since the listener spec can be customized, debugging possible problems with customized spec can be harder.
|
||||||
|
|
||||||
|
## Consequences
|
||||||
|
|
||||||
|
With the listener pod spec exposed, we can provide a way to run ARC for users with policies prohibiting them to do so at this moment.
|
||||||
@@ -22,7 +22,7 @@ _Note: Links are provided further down to create an app for your logged in user
|
|||||||
|
|
||||||
* Actions (read)
|
* Actions (read)
|
||||||
* Administration (read / write)
|
* Administration (read / write)
|
||||||
* Checks (read) (if you are going to use [Webhook Driven Scaling](#webhook-driven-scaling))
|
* Checks (read) (if you are going to use [Webhook Driven Scaling](automatically-scaling-runners.md#webhook-driven-scaling))
|
||||||
* Metadata (read)
|
* Metadata (read)
|
||||||
|
|
||||||
**Required Permissions for Organization Runners:**<br />
|
**Required Permissions for Organization Runners:**<br />
|
||||||
@@ -39,7 +39,7 @@ _Note: All API routes mapped to their permissions can be found [here](https://do
|
|||||||
|
|
||||||
**Subscribe to events**
|
**Subscribe to events**
|
||||||
|
|
||||||
At this point you have a choice of configuring a webhook, a webhook is needed if you are going to use [webhook driven scaling](#webhook-driven-scaling). The webhook can be configured centrally in the GitHub app itself or separately. In either case you need to subscribe to the `Workflow Job` event.
|
At this point you have a choice of configuring a webhook, a webhook is needed if you are going to use [webhook driven scaling](automatically-scaling-runners.md#webhook-driven-scaling). The webhook can be configured centrally in the GitHub app itself or separately. In either case you need to subscribe to the `Workflow Job` event.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ The `HorizontalRunnerAutoscaler` will poll GitHub for the number of runners in t
|
|||||||
**Benefits of this metric**
|
**Benefits of this metric**
|
||||||
1. Supports named repositories server-side the same as the `TotalNumberOfQueuedAndInProgressWorkflowRuns` metric [#313](https://github.com/actions/actions-runner-controller/pull/313)
|
1. Supports named repositories server-side the same as the `TotalNumberOfQueuedAndInProgressWorkflowRuns` metric [#313](https://github.com/actions/actions-runner-controller/pull/313)
|
||||||
2. Supports GitHub organization wide scaling without maintaining an explicit list of repositories, this is especially useful for those that are working at a larger scale. [#223](https://github.com/actions/actions-runner-controller/pull/223)
|
2. Supports GitHub organization wide scaling without maintaining an explicit list of repositories, this is especially useful for those that are working at a larger scale. [#223](https://github.com/actions/actions-runner-controller/pull/223)
|
||||||
3. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [GitHub labels](#runner-labels)
|
3. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [GitHub labels](using-arc-runners-in-a-workflow.md#runner-labels)
|
||||||
4. Supports scaling desired runner count on both a percentage increase / decrease basis as well as on a fixed increase / decrease count basis [#223](https://github.com/actions/actions-runner-controller/pull/223) [#315](https://github.com/actions/actions-runner-controller/pull/315)
|
4. Supports scaling desired runner count on both a percentage increase / decrease basis as well as on a fixed increase / decrease count basis [#223](https://github.com/actions/actions-runner-controller/pull/223) [#315](https://github.com/actions/actions-runner-controller/pull/315)
|
||||||
|
|
||||||
**Drawbacks of this metric**
|
**Drawbacks of this metric**
|
||||||
@@ -186,6 +186,38 @@ spec:
|
|||||||
scaleDownAdjustment: 1 # The scale down runner count subtracted from the desired count
|
scaleDownAdjustment: 1 # The scale down runner count subtracted from the desired count
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Combining Pull Driven Scaling Metrics**
|
||||||
|
|
||||||
|
If a HorizontalRunnerAutoscaler is configured with a secondary metric of `TotalNumberOfQueuedAndInProgressWorkflowRuns`, then be aware that the controller will check the primary metric of `PercentageRunnersBusy` first and will only use the secondary metric to calculate the desired replica count if the primary metric returns 0 desired replicas.
|
||||||
|
|
||||||
|
`PercentageRunnersBusy` metrics must appear before `TotalNumberOfQueuedAndInProgressWorkflowRuns`; otherwise, the controller will fail to process the `HorizontalRunnerAutoscaler`. A valid configuration follows.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
---
|
||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
metadata:
|
||||||
|
name: example-runner-deployment-autoscaler
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
kind: RunnerDeployment
|
||||||
|
# # In case the scale target is RunnerSet:
|
||||||
|
# kind: RunnerSet
|
||||||
|
name: example-runner-deployment
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 5
|
||||||
|
metrics:
|
||||||
|
- type: PercentageRunnersBusy
|
||||||
|
scaleUpThreshold: '0.75' # The percentage of busy runners at which the number of desired runners are re-evaluated to scale up
|
||||||
|
scaleDownThreshold: '0.3' # The percentage of busy runners at which the number of desired runners are re-evaluated to scale down
|
||||||
|
scaleUpAdjustment: 2 # The scale up runner count added to desired count
|
||||||
|
scaleDownAdjustment: 1 # The scale down runner count subtracted from the desired count
|
||||||
|
- type: TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||||
|
repositoryNames:
|
||||||
|
# A repository name is the REPO part of `github.com/OWNER/REPO`
|
||||||
|
- myrepo
|
||||||
|
```
|
||||||
|
|
||||||
## Webhook Driven Scaling
|
## Webhook Driven Scaling
|
||||||
|
|
||||||
> This feature requires controller version => [v0.20.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.20.0)
|
> This feature requires controller version => [v0.20.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.20.0)
|
||||||
@@ -224,34 +256,54 @@ spec:
|
|||||||
duration: "30m"
|
duration: "30m"
|
||||||
```
|
```
|
||||||
|
|
||||||
The lifecycle of a runner provisioned from a webhook is different to a runner provisioned from the pull based scaling method:
|
With the `workflowJob` trigger, each event adds or subtracts a single runner. the `scaleUpTriggers.amount` field is ignored.
|
||||||
|
|
||||||
|
The `duration` field is there because event delivery is not guaranteed. If a scale-up event is received, but the corresponding
|
||||||
|
scale-down event is not, then the extra runner would be left running forever if there were not some clean-up mechanism.
|
||||||
|
The `duration` field sets the maximum amount of time to wait for a scale-down event. Scale-down happens at the
|
||||||
|
earlier of receiving the scale-down event or the expiration of `duration` after the scale-up event is processed and
|
||||||
|
the scale-up itself is initiated.
|
||||||
|
|
||||||
|
The lifecycle of a runner provisioned from a webhook is different from that of a runner provisioned from the pull based scaling method:
|
||||||
|
|
||||||
1. GitHub sends a `workflow_job` event to ARC with `status=queued`
|
1. GitHub sends a `workflow_job` event to ARC with `status=queued`
|
||||||
2. ARC finds a HRA with a `workflow_job` webhook scale trigger that backs a RunnerDeployment / RunnerSet with matching runner labels
|
2. ARC finds the HRA with a `workflow_job` webhook scale trigger that backs a RunnerDeployment / RunnerSet with matching runner labels. (If it finds more than one match, the event is ignored.)
|
||||||
3. The matched HRA adds a unit to its `capacityReservations` list
|
3. The matched HRA adds a `capacityReservation` to its list and sets it to expire at current time + `HRA.spec.scaleUpTriggers[].duration`
|
||||||
4. ARC adds a replica and sets the EffectiveTime of that replica to current + `HRA.spec.scaleUpTriggers[].duration`
|
4. If there are fewer replicas running than `maxReplicas`, HRA adds a replica and sets the EffectiveTime of that replica to the current time
|
||||||
|
|
||||||
At this point there are a few things that can happen, either the job gets allocated to the runner or the runner is left dangling due to it not being used, if the runner gets assigned the job that triggered the scale up the lifecycle looks like this:
|
At this point there are a few things that can happen:
|
||||||
|
1. Due to idle runners already being available, the job is assigned to one of them and the new runner is left dangling due to it not being used
|
||||||
|
2. The job gets allocated to the runner just launched
|
||||||
|
3. If there are already `maxReplicas` replicas running, the job waits for its `capacityReservation` to be assigned to one of them
|
||||||
|
|
||||||
|
If the runner gets assigned the job that triggered the scale up, the lifecycle looks like this:
|
||||||
|
|
||||||
1. The new runner gets allocated the job and processes it
|
1. The new runner gets allocated the job and processes it
|
||||||
2. Upon the job ending GitHub sends another `workflow_job` event to ARC but with `status=completed`
|
2. Upon the job ending GitHub sends another `workflow_job` event to ARC but with `status=completed`
|
||||||
3. The HRA removes the oldest capacity reservation from its `capacityReservations` and picks a runner to terminate ensuring it isn't busy via the GitHub API beforehand
|
3. The HRA removes the oldest capacity reservation from its `capacityReservations` and picks a runner to terminate ensuring it isn't busy via the GitHub API beforehand
|
||||||
|
|
||||||
|
If the job has to wait for a runner because there are already `maxReplicas` replicas running, the lifecycle looks like this:
|
||||||
|
1. A `capacityReservation` is added to the list, but no scale-up happens because that would exceed `maxReplicas`
|
||||||
|
2. When one of the existing runners finishes a job, GitHub sends another `workflow_job` event to ARC but with `status=completed` (or `status=canceled` if the job was cancelled)
|
||||||
|
3. The HRA removes the oldest capacity reservation from its `capacityReservations`, the oldest waiting `capacityReservation` becomes active, and its `duration` timer starts
|
||||||
|
4. GitHub assigns a waiting job to the newly available runner
|
||||||
|
|
||||||
If the job is cancelled before it is allocated to a runner then the lifecycle looks like this:
|
If the job is cancelled before it is allocated to a runner then the lifecycle looks like this:
|
||||||
|
|
||||||
1. Upon the job cancellation GitHub sends another `workflow_job` event to ARC but with `status=cancelled`
|
1. Upon the job cancellation GitHub sends another `workflow_job` event to ARC but with `status=cancelled`
|
||||||
2. The HRA removes the oldest capacity reservation from its `capacityReservations` and picks a runner to terminate ensuring it isn't busy via the GitHub API beforehand
|
2. The HRA removes the oldest capacity reservation from its `capacityReservations` and picks a runner to terminate ensuring it isn't busy via the GitHub API beforehand
|
||||||
|
|
||||||
If runner is never used due to other runners matching needed runner group and required runner labels are allocated the job then the lifecycle looks like this:
|
If the `status=completed` or `status=cancelled` is never delivered to ARC (which happens occasionally) then the lifecycle looks like this:
|
||||||
|
|
||||||
1. The scale trigger duration specified via `HRA.spec.scaleUpTriggers[].duration` elapses
|
1. The scale trigger duration specified via `HRA.spec.scaleUpTriggers[].duration` elapses
|
||||||
2. The HRA thinks the capacity reservation is expired, removes it from HRA's `capacityReservations` and terminates the expired runner ensuring it isn't busy via the GitHub API beforehand
|
2. The HRA notices that the capacity reservation has expired, removes it from HRA's `capacityReservation` list and (unless there are `maxReplicas` running and jobs waiting) terminates the expired runner ensuring it isn't busy via the GitHub API beforehand
|
||||||
|
|
||||||
Your `HRA.spec.scaleUpTriggers[].duration` value should be set long enough to account for the following things:
|
Your `HRA.spec.scaleUpTriggers[].duration` value should be set long enough to account for the following things:
|
||||||
|
|
||||||
1. the potential amount of time it could take for a pod to become `Running` e.g. you need to scale horizontally because there isn't a node avaliable
|
1. The potential amount of time it could take for a pod to become `Running` e.g. you need to scale horizontally because there isn't a node available +
|
||||||
2. the amount of time it takes for GitHub to allocate a job to that runner
|
2. The amount of time it takes for GitHub to allocate a job to that runner +
|
||||||
3. the amount of time it takes for the runner to notice the allocated job and starts running it
|
3. The amount of time it takes for the runner to notice the allocated job and starts running it +
|
||||||
|
4. The length of time it takes for the runner to complete the job
|
||||||
|
|
||||||
### Install with Helm
|
### Install with Helm
|
||||||
|
|
||||||
@@ -411,8 +463,6 @@ The main use case for scaling from 0 is with the `HorizontalRunnerAutoscaler` ki
|
|||||||
|
|
||||||
`PercentageRunnersBusy` can't be used alone for scale-from-zero as, by its definition, it needs one or more GitHub runners to become `busy` to be able to scale. If there isn't a runner to pick up a job and enter a `busy` state then the controller will never know to provision a runner to begin with as this metric has no knowledge of the job queue and is relying on using the number of busy runners as a means for calculating the desired replica count.
|
`PercentageRunnersBusy` can't be used alone for scale-from-zero as, by its definition, it needs one or more GitHub runners to become `busy` to be able to scale. If there isn't a runner to pick up a job and enter a `busy` state then the controller will never know to provision a runner to begin with as this metric has no knowledge of the job queue and is relying on using the number of busy runners as a means for calculating the desired replica count.
|
||||||
|
|
||||||
If a HorizontalRunnerAutoscaler is configured with a secondary metric of `TotalNumberOfQueuedAndInProgressWorkflowRuns` then be aware that the controller will check the primary metric of `PercentageRunnersBusy` first and will only use the secondary metric to calculate the desired replica count if the primary metric returns 0 desired replicas.
|
|
||||||
|
|
||||||
Webhook-based autoscaling is the best option as it is relatively easy to configure and also it can scale quickly.
|
Webhook-based autoscaling is the best option as it is relatively easy to configure and also it can scale quickly.
|
||||||
|
|
||||||
## Scheduled Overrides
|
## Scheduled Overrides
|
||||||
@@ -504,7 +554,7 @@ This can be problematic in two scenarios:
|
|||||||
|
|
||||||
> RunnerDeployment is not affected by the Scenario 1 as RunnerDeployment-managed runners are already tolerable to unlimitedly long in-progress running job while being replaced, as it's graceful termination process is handled outside of the entrypoint and the Kubernetes' pod termination process.
|
> RunnerDeployment is not affected by the Scenario 1 as RunnerDeployment-managed runners are already tolerable to unlimitedly long in-progress running job while being replaced, as it's graceful termination process is handled outside of the entrypoint and the Kubernetes' pod termination process.
|
||||||
|
|
||||||
To make it more reliable, please set `spec.template.spec.terminationGracePeriodSeconds` field and the `RUNNER_GRACEFUL_STOP_TIMEOUT` environment variable appropriately.
|
To make it more reliable, please set `spec.template.spec.terminationGracePeriodSeconds` field and the `RUNNER_GRACEFUL_STOP_TIMEOUT` environment variable appropriately. **NOTE:** if you are using the default configuration of running DinD as a sidecar, you'll need to set this environment variable in both `spec.template.spec.env` as well as `spec.template.spec.dockerEnv` for RunnerDeployment objects, otherwise the `docker` container will recieve the same termination signal and exit while the remainder of the build runs.
|
||||||
|
|
||||||
If you want the pod to terminate in approximately 110 seconds at the latest since the termination request, try `terminationGracePeriodSeconds` of `110` and `RUNNER_GRACEFUL_STOP_TIMEOUT` of like `90`.
|
If you want the pod to terminate in approximately 110 seconds at the latest since the termination request, try `terminationGracePeriodSeconds` of `110` and `RUNNER_GRACEFUL_STOP_TIMEOUT` of like `90`.
|
||||||
|
|
||||||
|
|||||||
121
docs/gha-runner-scale-set-controller/README.md
Normal file
121
docs/gha-runner-scale-set-controller/README.md
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
# Autoscaling Runner Scale Sets mode
|
||||||
|
|
||||||
|
This new autoscaling mode brings numerous enhancements (described in the following sections) that will make your experience more reliable and secure.
|
||||||
|
|
||||||
|
## How it works
|
||||||
|
|
||||||
|

|
||||||
|

|
||||||
|
|
||||||
|
1. ARC is installed using the supplied Helm charts, and the controller manager pod is deployed in the specified namespace. A new `AutoScalingRunnerSet` resource is deployed via the supplied Helm charts or a customized manifest file. The `AutoScalingRunnerSet` controller calls GitHub's APIs to fetch the runner group ID that the runner scale set will belong to.
|
||||||
|
2. The `AutoScalingRunnerSet` controller calls the APIs one more time to either fetch or create a runner scale set in the `Actions Service` before creating the `Runner ScaleSet Listener` resource.
|
||||||
|
3. A `Runner ScaleSet Listener` pod is deployed by the `AutoScaling Listener Controller`. In this pod, the listener application connects to the `Actions Service` to authenticate and establish a long poll HTTPS connection. The listener stays idle until it receives a `Job Available` message from the `Actions Service`.
|
||||||
|
4. When a workflow run is triggered from a repository, the `Actions Service` dispatches individual job runs to the runners or runner scalesets where the `runs-on` property matches the name of the runner scaleset or labels of self-hosted runners.
|
||||||
|
5. When the `Runner ScaleSet Listener` receives the `Job Available` message, it checks whether it can scale up to the desired count. If it can, the `Runner ScaleSet Listener` acknowledges the message.
|
||||||
|
6. The `Runner ScaleSet Listener` uses a `Service Account` and a `Role` bound to that account to make an HTTPS call through the Kubernetes APIs to patch the `EphemeralRunner Set` resource with the number of desired replicas count.
|
||||||
|
7. The `EphemeralRunner Set` attempts to create new runners and the `EphemeralRunner Controller` requests a JIT configuration token to register these runners. The controller attempts to create runner pods. If the pod's status is `failed`, the controller retries up to 5 times. After 24 hours the `Actions Service` unassigns the job if no runner accepts it.
|
||||||
|
8. Once the runner pod is created, the runner application in the pod uses the JIT configuration token to register itself with the `Actions Service`. It then establishes another HTTPS long poll connection to receive the job details it needs to execute.
|
||||||
|
9. The `Actions Service` acknowledges the runner registration and dispatches the job run details.
|
||||||
|
10. Throughout the job run execution, the runner continuously communicates the logs and job run status back to the `Actions Service`.
|
||||||
|
11. When the runner completes its job successfully, the `EphemeralRunner Controller` checks with the `Actions Service` to see if runner can be deleted. If it can, the `Ephemeral RunnerSet` deletes the runner.
|
||||||
|
|
||||||
|
In addition to the increased reliability of the automatic scaling, we have worked on these improvements:
|
||||||
|
|
||||||
|
- No longer require cert-manager as a prerequisite for installing actions-runner-controller
|
||||||
|
- Reliable scale-up based on job demands and scale-down to zero runner pods
|
||||||
|
- Reduce API requests to `api.github.com`, no more API rate-limiting problems
|
||||||
|
- The GitHub Personal Access Token (PAT) or the GitHub App installation token is no longer passed to the runner pod for runner registration
|
||||||
|
- Maximum flexibility for customizing your runner pod template
|
||||||
|
|
||||||
|
### Demo
|
||||||
|
|
||||||
|
[](https://youtu.be/wQ0k5k6KW5Y)
|
||||||
|
|
||||||
|
> Will take you to Youtube for a short walkthrough of the Autoscaling Runner Scale Sets mode.
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
You can follow [this quickstart guide](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller) for installation steps.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
You can follow [this troubleshooting guide](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/troubleshooting-actions-runner-controller-errors) for troubleshooting steps.
|
||||||
|
|
||||||
|
## Changelog
|
||||||
|
|
||||||
|
### v0.5.0
|
||||||
|
|
||||||
|
1. Provide scale-set listener metrics [#2559](https://github.com/actions/actions-runner-controller/pull/2559)
|
||||||
|
1. Add DrainJobsMode [#2569](https://github.com/actions/actions-runner-controller/pull/2569)
|
||||||
|
1. Trim gha-runner-scale-set to gha-rs in names and remove role type suffixes [#2706](https://github.com/actions/actions-runner-controller/pull/2706)
|
||||||
|
1. Adapt role name to prevent namespace collision [#2617](https://github.com/actions/actions-runner-controller/pull/2617)
|
||||||
|
1. Add status check before deserializing runner-registration response [#2699](https://github.com/actions/actions-runner-controller/pull/2699)
|
||||||
|
1. Add configurable log format to values.yaml and propagate it to listener [#2686](https://github.com/actions/actions-runner-controller/pull/2686)
|
||||||
|
1. Extend manager roles to accept ephemeralrunnerset/finalizers [#2493](https://github.com/actions/actions-runner-controller/pull/2493)
|
||||||
|
1. Trim repo/org/enterprise to 63 characters in label values [#2657](https://github.com/actions/actions-runner-controller/pull/2657)
|
||||||
|
1. Revert back chart renaming [#2824](https://github.com/actions/actions-runner-controller/pull/2824)
|
||||||
|
1. Discard logs on helm chart tests [#2607](https://github.com/actions/actions-runner-controller/pull/2607)
|
||||||
|
1. Use build.Version to check if resource version is a mismatch [#2521](https://github.com/actions/actions-runner-controller/pull/2521)
|
||||||
|
1. Reordering methods and constants so it is easier to look it up [#2501](https://github.com/actions/actions-runner-controller/pull/2501)
|
||||||
|
1. chore: Set build version on make-runscaleset [#2713](https://github.com/actions/actions-runner-controller/pull/2713)
|
||||||
|
1. Fix scaling back to 0 after min runners were set to number > 0 [#2742](https://github.com/actions/actions-runner-controller/pull/2742)
|
||||||
|
1. Document customization for containerModes [#2777](https://github.com/actions/actions-runner-controller/pull/2777)
|
||||||
|
1. Bump github.com/cloudflare/circl from 1.1.0 to 1.3.3 [#2628](https://github.com/actions/actions-runner-controller/pull/2628)
|
||||||
|
1. chore(deps): bump github.com/stretchr/testify from 1.8.2 to 1.8.4 [#2716](https://github.com/actions/actions-runner-controller/pull/2716)
|
||||||
|
1. Move gha-* docs out of preview [#2779](https://github.com/actions/actions-runner-controller/pull/2779)
|
||||||
|
1. Prepare 0.5.0 release [#2783](https://github.com/actions/actions-runner-controller/pull/2783)
|
||||||
|
1. Security fix [#2676](https://github.com/actions/actions-runner-controller/pull/2676)
|
||||||
|
|
||||||
|
### v0.4.0
|
||||||
|
|
||||||
|
#### ⚠️ Warning
|
||||||
|
|
||||||
|
This release contains a major change related to the way permissions are
|
||||||
|
applied to the manager ([#2276](https://github.com/actions/actions-runner-controller/pull/2276) and [#2363](https://github.com/actions/actions-runner-controller/pull/2363)).
|
||||||
|
|
||||||
|
Please evaluate these changes carefully before upgrading.
|
||||||
|
|
||||||
|
#### Major changes
|
||||||
|
|
||||||
|
1. Surface EphemeralRunnerSet stats to AutoscalingRunnerSet [#2382](https://github.com/actions/actions-runner-controller/pull/2382)
|
||||||
|
1. Improved security posture by removing list/watch secrets permission from manager cluster role
|
||||||
|
[#2276](https://github.com/actions/actions-runner-controller/pull/2276)
|
||||||
|
1. Improved security posture by delaying role/rolebinding creation to gha-runner-scale-set during installation
|
||||||
|
[#2363](https://github.com/actions/actions-runner-controller/pull/2363)
|
||||||
|
1. Improved security posture by supporting watching a single namespace from the controller
|
||||||
|
[#2374](https://github.com/actions/actions-runner-controller/pull/2374)
|
||||||
|
1. Added labels to AutoscalingRunnerSet subresources to allow easier inspection [#2391](https://github.com/actions/actions-runner-controller/pull/2391)
|
||||||
|
1. Fixed bug preventing env variables from being specified
|
||||||
|
[#2450](https://github.com/actions/actions-runner-controller/pull/2450)
|
||||||
|
1. Enhance quickstart troubleshooting guides
|
||||||
|
[#2435](https://github.com/actions/actions-runner-controller/pull/2435)
|
||||||
|
1. Fixed ignore extra dind container when container mode type is "dind"
|
||||||
|
[#2418](https://github.com/actions/actions-runner-controller/pull/2418)
|
||||||
|
1. Added additional cleanup finalizers [#2433](https://github.com/actions/actions-runner-controller/pull/2433)
|
||||||
|
1. gha-runner-scale-set listener pod inherits the ImagePullPolicy from the manager pod [#2477](https://github.com/actions/actions-runner-controller/pull/2477)
|
||||||
|
1. Treat `.ghe.com` domain as hosted environment [#2480](https://github.com/actions/actions-runner-controller/pull/2480)
|
||||||
|
|
||||||
|
### v0.3.0
|
||||||
|
|
||||||
|
#### Major changes
|
||||||
|
|
||||||
|
1. Runner pods are more similar to hosted runners [#2348](https://github.com/actions/actions-runner-controller/pull/2348)
|
||||||
|
1. Add support for self-signed CA certificates [#2268](https://github.com/actions/actions-runner-controller/pull/2268)
|
||||||
|
1. Fixed trailing slashes in config URLs breaking installations [#2381](https://github.com/actions/actions-runner-controller/pull/2381)
|
||||||
|
1. Fixed a bug where the listener pod would ignore proxy settings from env [#2366](https://github.com/actions/actions-runner-controller/pull/2366)
|
||||||
|
1. Added runner set name field making it optionally configurable [#2279](https://github.com/actions/actions-runner-controller/pull/2279)
|
||||||
|
1. Name and namespace labels of listener pod have been split [#2341](https://github.com/actions/actions-runner-controller/pull/2341)
|
||||||
|
1. Added chart name constraints validation on AutoscalingRunnerSet install [#2347](https://github.com/actions/actions-runner-controller/pull/2347)
|
||||||
|
|
||||||
|
### v0.2.0
|
||||||
|
|
||||||
|
#### Major changes
|
||||||
|
|
||||||
|
1. Added proxy support for the controller and the runner pods, see the new helm chart fields [#2286](https://github.com/actions/actions-runner-controller/pull/2286)
|
||||||
|
1. Added the abiilty to provide a pre-defined kubernetes secret for the auto scaling runner set helm chart [#2234](https://github.com/actions/actions-runner-controller/pull/2234)
|
||||||
|
1. Enhanced security posture by removing un-required permissions for the manager-role [#2260](https://github.com/actions/actions-runner-controller/pull/2260)
|
||||||
|
1. Enhanced our logging by returning an error when a runner group is defined in the values file but it's not created in GitHub [#2215](https://github.com/actions/actions-runner-controller/pull/2215)
|
||||||
|
1. Fixed helm charts issues that were preventing the use of DinD [#2291](https://github.com/actions/actions-runner-controller/pull/2291)
|
||||||
|
1. Fixed a bug that was preventing runner scale from being removed from the backend when they were deleted from the cluster [#2255](https://github.com/actions/actions-runner-controller/pull/2255) [#2223](https://github.com/actions/actions-runner-controller/pull/2223)
|
||||||
|
1. Fixed bugs with the helm chart definitions preventing certain values from being set [#2222](https://github.com/actions/actions-runner-controller/pull/2222)
|
||||||
|
1. Fixed a bug that prevented the configuration of a runner group for a runner scale set [#2216](https://github.com/actions/actions-runner-controller/pull/2216)
|
||||||
BIN
docs/gha-runner-scale-set-controller/arc-diagram-dark.png
(Stored with Git LFS)
Normal file
BIN
docs/gha-runner-scale-set-controller/arc-diagram-dark.png
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
docs/gha-runner-scale-set-controller/arc-diagram-light.png
(Stored with Git LFS)
Normal file
BIN
docs/gha-runner-scale-set-controller/arc-diagram-light.png
(Stored with Git LFS)
Normal file
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,15 @@
|
|||||||
|
# Visualizing Autoscaling Runner Scale Set metrics with Grafana
|
||||||
|
|
||||||
|
With metrics introduced in [gha-runner-scale-set-0.5.0](https://github.com/actions/actions-runner-controller/releases/tag/gha-runner-scale-set-0.5.0), you can now visualize the autoscaling behavior of your runner scale set with your tool of choice. This sample shows how to visualize the metrics with [Grafana](https://grafana.com/).
|
||||||
|
|
||||||
|
## Demo
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
We do not intend to provide a supported ARC dashboard. This is simply a reference and a demonstration for how you could leverage the metrics emitted by the controller-manager and listeners to visualize the autoscaling behavior of your runner scale set. We offer no promises of future upgrades to this sample.
|
||||||
|
|
||||||
|
1. Make sure to have [Grafana](https://grafana.com/docs/grafana/latest/installation/) and [Prometheus](https://prometheus.io/docs/prometheus/latest/installation/) running in your cluster.
|
||||||
|
2. Make sure that Prometheus is properly scraping the metrics endpoints of the controller-manager and listeners.
|
||||||
|
3. Import the [dashboard](ARC-Autoscaling-Runner-Set-Monitoring_1692627561838.json.json) into Grafana.
|
||||||
BIN
docs/gha-runner-scale-set-controller/samples/grafana-dashboard/grafana-sample.png
(Stored with Git LFS)
Normal file
BIN
docs/gha-runner-scale-set-controller/samples/grafana-dashboard/grafana-sample.png
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
docs/gha-runner-scale-set-controller/thumbnail.png
(Stored with Git LFS)
Normal file
BIN
docs/gha-runner-scale-set-controller/thumbnail.png
(Stored with Git LFS)
Normal file
Binary file not shown.
@@ -1 +0,0 @@
|
|||||||
301 - MOVED TO [../gha-runner-scale-set-controller/README.md](../gha-runner-scale-set-controller/README.md)
|
|
||||||
@@ -1,291 +0,0 @@
|
|||||||
# Autoscaling Runner Scale Sets mode
|
|
||||||
|
|
||||||
This new autoscaling mode brings numerous enhancements (described in the following sections) that will make your experience more reliable and secure.
|
|
||||||
|
|
||||||
## How it works
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
In addition to the increased reliability of the automatic scaling, we have worked on these improvements:
|
|
||||||
|
|
||||||
- No longer require cert-manager as a prerequisite for installing actions-runner-controller
|
|
||||||
- Reliable scale-up based on job demands and scale-down to zero runner pods
|
|
||||||
- Reduce API requests to `api.github.com`, no more API rate-limiting problems
|
|
||||||
- The GitHub Personal Access Token (PAT) or the GitHub App installation token is no longer passed to the runner pod for runner registration
|
|
||||||
- Maximum flexibility for customizing your runner pod template
|
|
||||||
|
|
||||||
### Demo
|
|
||||||
|
|
||||||
[](https://youtu.be/wQ0k5k6KW5Y)
|
|
||||||
|
|
||||||
## Setup
|
|
||||||
|
|
||||||
### Prerequisites
|
|
||||||
|
|
||||||
1. Create a K8s cluster, if not available.
|
|
||||||
- If you don't have a K8s cluster, you can install a local environment using minikube. See [installing minikube](https://minikube.sigs.k8s.io/docs/start/).
|
|
||||||
1. Install helm 3, if not available. See [installing Helm](https://helm.sh/docs/intro/install/).
|
|
||||||
|
|
||||||
### Install actions-runner-controller
|
|
||||||
|
|
||||||
1. Install actions-runner-controller using helm 3. For additional configuration options, see [values.yaml](https://github.com/actions/actions-runner-controller/blob/master/charts/gha-runner-scale-set-controller/values.yaml)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
NAMESPACE="arc-systems"
|
|
||||||
helm install arc \
|
|
||||||
--namespace "${NAMESPACE}" \
|
|
||||||
--create-namespace \
|
|
||||||
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \
|
|
||||||
--version 0.4.0
|
|
||||||
```
|
|
||||||
|
|
||||||
1. Generate a Personal Access Token (PAT) or create and install a GitHub App. See [Creating a personal access token](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token) and [Creating a GitHub App](https://docs.github.com/en/developers/apps/creating-a-github-app).
|
|
||||||
- ℹ For the list of required permissions, see [Authenticating to the GitHub API](https://github.com/actions/actions-runner-controller/blob/master/docs/authenticating-to-the-github-api.md#authenticating-to-the-github-api).
|
|
||||||
|
|
||||||
1. You're ready to install the autoscaling runner set. For additional configuration options, see [values.yaml](https://github.com/actions/actions-runner-controller/blob/master/charts/gha-runner-scale-set/values.yaml)
|
|
||||||
- ℹ **Choose your installation name carefully**, you will use it as the value of `runs-on` in your workflow.
|
|
||||||
- ℹ **We recommend you choose a unique namespace in the following steps**. As a good security measure, it's best to have your runner pods created in a different namespace than the one containing the manager and listener pods.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Using a Personal Access Token (PAT)
|
|
||||||
INSTALLATION_NAME="arc-runner-set"
|
|
||||||
NAMESPACE="arc-runners"
|
|
||||||
GITHUB_CONFIG_URL="https://github.com/<your_enterprise/org/repo>"
|
|
||||||
GITHUB_PAT="<PAT>"
|
|
||||||
helm install "${INSTALLATION_NAME}" \
|
|
||||||
--namespace "${NAMESPACE}" \
|
|
||||||
--create-namespace \
|
|
||||||
--set githubConfigUrl="${GITHUB_CONFIG_URL}" \
|
|
||||||
--set githubConfigSecret.github_token="${GITHUB_PAT}" \
|
|
||||||
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.4.0
|
|
||||||
```
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Using a GitHub App
|
|
||||||
INSTALLATION_NAME="arc-runner-set"
|
|
||||||
NAMESPACE="arc-runners"
|
|
||||||
GITHUB_CONFIG_URL="https://github.com/<your_enterprise/org/repo>"
|
|
||||||
GITHUB_APP_ID="<GITHUB_APP_ID>"
|
|
||||||
GITHUB_APP_INSTALLATION_ID="<GITHUB_APP_INSTALLATION_ID>"
|
|
||||||
GITHUB_APP_PRIVATE_KEY="<GITHUB_APP_PRIVATE_KEY>"
|
|
||||||
helm install "${INSTALLATION_NAME}" \
|
|
||||||
--namespace "${NAMESPACE}" \
|
|
||||||
--create-namespace \
|
|
||||||
--set githubConfigUrl="${GITHUB_CONFIG_URL}" \
|
|
||||||
--set githubConfigSecret.github_app_id="${GITHUB_APP_ID}" \
|
|
||||||
--set githubConfigSecret.github_app_installation_id="${GITHUB_APP_INSTALLATION_ID}" \
|
|
||||||
--set githubConfigSecret.github_app_private_key="${GITHUB_APP_PRIVATE_KEY}" \
|
|
||||||
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.4.0
|
|
||||||
```
|
|
||||||
|
|
||||||
1. Check your installation. If everything went well, you should see the following:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ helm list -n "${NAMESPACE}"
|
|
||||||
|
|
||||||
NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
|
|
||||||
arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed gha-runner-scale-set-controller-0.4.0 preview
|
|
||||||
arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed gha-runner-scale-set-0.4.0 0.4.0
|
|
||||||
```
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ kubectl get pods -n "${NAMESPACE}"
|
|
||||||
|
|
||||||
NAME READY STATUS RESTARTS AGE
|
|
||||||
arc-gha-runner-scale-set-controller-8c74b6f95-gr7zr 1/1 Running 0 20m
|
|
||||||
arc-runner-set-6cd58d58-listener 1/1 Running 0 21s
|
|
||||||
```
|
|
||||||
|
|
||||||
1. In a repository, create a simple test workflow as follows. The `runs-on` value should match the helm installation name you used in the previous step.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
name: Test workflow
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: arc-runner-set
|
|
||||||
steps:
|
|
||||||
- name: Hello world
|
|
||||||
run: echo "Hello world"
|
|
||||||
```
|
|
||||||
|
|
||||||
1. Run the workflow. You should see the runner pod being created and the workflow being executed.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ kubectl get pods -A
|
|
||||||
|
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
|
||||||
arc-systems arc-gha-runner-scale-set-controller-8c74b6f95-gr7zr 1/1 Running 0 27m
|
|
||||||
arc-systems arc-runner-set-6cd58d58-listener 1/1 Running 0 7m52s
|
|
||||||
arc-runners arc-runner-set-rmrgw-runner-p9p5n 1/1 Running 0 21s
|
|
||||||
```
|
|
||||||
|
|
||||||
### Upgrade to newer versions
|
|
||||||
|
|
||||||
Upgrading actions-runner-controller requires a few extra steps because CRDs will not be automatically upgraded (this is a helm limitation).
|
|
||||||
|
|
||||||
1. Uninstall the autoscaling runner set first
|
|
||||||
|
|
||||||
```bash
|
|
||||||
INSTALLATION_NAME="arc-runner-set"
|
|
||||||
NAMESPACE="arc-runners"
|
|
||||||
helm uninstall "${INSTALLATION_NAME}" --namespace "${NAMESPACE}"
|
|
||||||
```
|
|
||||||
|
|
||||||
1. Wait for all the pods to drain
|
|
||||||
|
|
||||||
1. Pull the new helm chart, unpack it and update the CRDs. When applying this step, don't forget to replace `<PATH>` with the path of the `gha-runner-scale-set-controller` helm chart:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
helm pull oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \
|
|
||||||
--version 0.4.0 \
|
|
||||||
--untar && \
|
|
||||||
kubectl replace -f <PATH>/gha-runner-scale-set-controller/crds/
|
|
||||||
```
|
|
||||||
|
|
||||||
1. Reinstall actions-runner-controller using the steps from the previous section
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### I'm using the charts from the `master` branch and the controller is not working
|
|
||||||
|
|
||||||
The `master` branch is highly unstable! We offer no guarantees that the charts in the `master` branch will work at any given time. If you're using the charts from the `master` branch, you should expect to encounter issues. Please use the latest release instead.
|
|
||||||
|
|
||||||
### Controller pod is running but the runner set listener pod is not
|
|
||||||
|
|
||||||
You need to inspect the logs of the controller first and see if there are any errors. If there are no errors, and the runner set listener pod is still not running, you need to make sure that the **controller pod has access to the Kubernetes API server in your cluster!**
|
|
||||||
|
|
||||||
You'll see something similar to the following in the logs of the controller pod:
|
|
||||||
|
|
||||||
```log
|
|
||||||
kubectl logs <controller_pod_name> -c manager
|
|
||||||
17:35:28.661069 1 request.go:690] Waited for 1.032376652s due to client-side throttling, not priority and fairness, request: GET:https://10.0.0.1:443/apis/monitoring.coreos.com/v1alpha1?timeout=32s
|
|
||||||
2023-03-15T17:35:29Z INFO starting manager
|
|
||||||
```
|
|
||||||
|
|
||||||
If you have a proxy configured or you're using a sidecar proxy that's automatically injected (think [Istio](https://istio.io/)), you need to make sure it's configured appropriately to allow traffic from the controller container (manager) to the Kubernetes API server.
|
|
||||||
|
|
||||||
### Check the logs
|
|
||||||
|
|
||||||
You can check the logs of the controller pod using the following command:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Controller logs
|
|
||||||
kubectl logs -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
|
||||||
```
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Runner set listener logs
|
|
||||||
kubectl logs -n "${NAMESPACE}" -l actions.github.com/scale-set-namespace=arc-systems -l actions.github.com/scale-set-name=arc-runner-set
|
|
||||||
```
|
|
||||||
|
|
||||||
### Naming error: `Name must have up to characters`
|
|
||||||
|
|
||||||
We are using some of the resources generated names as labels for other resources. Resource names have a max length of `263 characters` while labels are limited to `63 characters`. Given this constraint, we have to limit the resource names to `63 characters`.
|
|
||||||
|
|
||||||
Since part of the resource name is defined by you, we have to impose a limit on the amount of characters you can use for the installation and namespace names.
|
|
||||||
|
|
||||||
If you see these errors, you have to use shorter installation or namespace names.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
Error: INSTALLATION FAILED: execution error at (gha-runner-scale-set/templates/autoscalingrunnerset.yaml:5:5): Name must have up to 45 characters
|
|
||||||
|
|
||||||
Error: INSTALLATION FAILED: execution error at (gha-runner-scale-set/templates/autoscalingrunnerset.yaml:8:5): Namespace must have up to 63 characters
|
|
||||||
```
|
|
||||||
|
|
||||||
### If you installed the autoscaling runner set, but the listener pod is not created
|
|
||||||
|
|
||||||
Verify that the secret you provided is correct and that the `githubConfigUrl` you provided is accurate.
|
|
||||||
|
|
||||||
### Access to the path `/home/runner/_work/_tool` is denied error
|
|
||||||
|
|
||||||
You might see this error if you're using kubernetes mode with persistent volumes. This is because the runner container is running with a non-root user and is causing a permissions mismatch with the mounted volume.
|
|
||||||
|
|
||||||
To fix this, you can either:
|
|
||||||
|
|
||||||
1. Use a volume type that supports `securityContext.fsGroup` (`hostPath` volumes don't support it, `local` volumes do as well as other types). Update the `fsGroup` of your runner pod to match the GID of the runner. You can do that by updating the `gha-runner-scale-set` helm chart values to include the following:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
spec:
|
|
||||||
securityContext:
|
|
||||||
fsGroup: 123
|
|
||||||
containers:
|
|
||||||
- name: runner
|
|
||||||
image: ghcr.io/actions/actions-runner:<VERSION> # Replace <VERSION> with the version you want to use
|
|
||||||
command: ["/home/runner/run.sh"]
|
|
||||||
```
|
|
||||||
|
|
||||||
1. If updating the `securityContext` of your runner pod is not a viable solution, you can workaround the issue by using `initContainers` to change the mounted volume's ownership, as follows:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
initContainers:
|
|
||||||
- name: kube-init
|
|
||||||
image: ghcr.io/actions/actions-runner:latest
|
|
||||||
command: ["sudo", "chown", "-R", "1001:123", "/home/runner/_work"]
|
|
||||||
volumeMounts:
|
|
||||||
- name: work
|
|
||||||
mountPath: /home/runner/_work
|
|
||||||
containers:
|
|
||||||
- name: runner
|
|
||||||
image: ghcr.io/actions/actions-runner:latest
|
|
||||||
command: ["/home/runner/run.sh"]
|
|
||||||
```
|
|
||||||
|
|
||||||
## Changelog
|
|
||||||
|
|
||||||
### v0.4.0
|
|
||||||
|
|
||||||
#### ⚠️ Warning
|
|
||||||
|
|
||||||
This release contains a major change related to the way permissions are
|
|
||||||
applied to the manager ([#2276](https://github.com/actions/actions-runner-controller/pull/2276) and [#2363](https://github.com/actions/actions-runner-controller/pull/2363)).
|
|
||||||
|
|
||||||
Please evaluate these changes carefully before upgrading.
|
|
||||||
|
|
||||||
#### Major changes
|
|
||||||
|
|
||||||
1. Surface EphemeralRunnerSet stats to AutoscalingRunnerSet [#2382](https://github.com/actions/actions-runner-controller/pull/2382)
|
|
||||||
1. Improved security posture by removing list/watch secrets permission from manager cluster role
|
|
||||||
[#2276](https://github.com/actions/actions-runner-controller/pull/2276)
|
|
||||||
1. Improved security posture by delaying role/rolebinding creation to gha-runner-scale-set during installation
|
|
||||||
[#2363](https://github.com/actions/actions-runner-controller/pull/2363)
|
|
||||||
1. Improved security posture by supporting watching a single namespace from the controller
|
|
||||||
[#2374](https://github.com/actions/actions-runner-controller/pull/2374)
|
|
||||||
1. Added labels to AutoscalingRunnerSet subresources to allow easier inspection [#2391](https://github.com/actions/actions-runner-controller/pull/2391)
|
|
||||||
1. Fixed bug preventing env variables from being specified
|
|
||||||
[#2450](https://github.com/actions/actions-runner-controller/pull/2450)
|
|
||||||
1. Enhance quickstart troubleshooting guides
|
|
||||||
[#2435](https://github.com/actions/actions-runner-controller/pull/2435)
|
|
||||||
1. Fixed ignore extra dind container when container mode type is "dind"
|
|
||||||
[#2418](https://github.com/actions/actions-runner-controller/pull/2418)
|
|
||||||
1. Added additional cleanup finalizers [#2433](https://github.com/actions/actions-runner-controller/pull/2433)
|
|
||||||
1. gha-runner-scale-set listener pod inherits the ImagePullPolicy from the manager pod [#2477](https://github.com/actions/actions-runner-controller/pull/2477)
|
|
||||||
1. Treat `.ghe.com` domain as hosted environment [#2480](https://github.com/actions/actions-runner-controller/pull/2480)
|
|
||||||
|
|
||||||
### v0.3.0
|
|
||||||
|
|
||||||
#### Major changes
|
|
||||||
|
|
||||||
1. Runner pods are more similar to hosted runners [#2348](https://github.com/actions/actions-runner-controller/pull/2348)
|
|
||||||
1. Add support for self-signed CA certificates [#2268](https://github.com/actions/actions-runner-controller/pull/2268)
|
|
||||||
1. Fixed trailing slashes in config URLs breaking installations [#2381](https://github.com/actions/actions-runner-controller/pull/2381)
|
|
||||||
1. Fixed a bug where the listener pod would ignore proxy settings from env [#2366](https://github.com/actions/actions-runner-controller/pull/2366)
|
|
||||||
1. Added runner set name field making it optionally configurable [#2279](https://github.com/actions/actions-runner-controller/pull/2279)
|
|
||||||
1. Name and namespace labels of listener pod have been split [#2341](https://github.com/actions/actions-runner-controller/pull/2341)
|
|
||||||
1. Added chart name constraints validation on AutoscalingRunnerSet install [#2347](https://github.com/actions/actions-runner-controller/pull/2347)
|
|
||||||
|
|
||||||
### v0.2.0
|
|
||||||
|
|
||||||
#### Major changes
|
|
||||||
|
|
||||||
1. Added proxy support for the controller and the runner pods, see the new helm chart fields [#2286](https://github.com/actions/actions-runner-controller/pull/2286)
|
|
||||||
1. Added the abiilty to provide a pre-defined kubernetes secret for the auto scaling runner set helm chart [#2234](https://github.com/actions/actions-runner-controller/pull/2234)
|
|
||||||
1. Enhanced security posture by removing un-required permissions for the manager-role [#2260](https://github.com/actions/actions-runner-controller/pull/2260)
|
|
||||||
1. Enhanced our logging by returning an error when a runner group is defined in the values file but it's not created in GitHub [#2215](https://github.com/actions/actions-runner-controller/pull/2215)
|
|
||||||
1. Fixed helm charts issues that were preventing the use of DinD [#2291](https://github.com/actions/actions-runner-controller/pull/2291)
|
|
||||||
1. Fixed a bug that was preventing runner scale from being removed from the backend when they were deleted from the cluster [#2255](https://github.com/actions/actions-runner-controller/pull/2255) [#2223](https://github.com/actions/actions-runner-controller/pull/2223)
|
|
||||||
1. Fixed bugs with the helm chart definitions preventing certain values from being set [#2222](https://github.com/actions/actions-runner-controller/pull/2222)
|
|
||||||
1. Fixed a bug that prevented the configuration of a runner group for a runner scale set [#2216](https://github.com/actions/actions-runner-controller/pull/2216)
|
|
||||||
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
> This feature requires controller version => [v0.26.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.26.0)
|
> This feature requires controller version => [v0.26.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.26.0)
|
||||||
|
|
||||||
In a large enterprise, there might be many GitHub organizations that requires self-hosted runners. Previously, the only way to provide ARC-managed self-hosted runners in such environment was [Deploying Multiple Controllers](#deploying-multiple-controllers), which incurs overhead due to it requires one ARC installation per GitHub organization.
|
In a large enterprise, there might be many GitHub organizations that requires self-hosted runners. Previously, the only way to provide ARC-managed self-hosted runners in such environment was [Deploying Multiple Controllers](deploying-arc-runners.md#deploying-multiple-controllers), which incurs overhead due to it requires one ARC installation per GitHub organization.
|
||||||
|
|
||||||
With multitenancy, you can let ARC manage self-hosted runners across organizations. It's enabled by default and the only thing you need to start using it is to set the `spec.githubAPICredentialsFrom.secretRef.name` fields for the following resources:
|
With multitenancy, you can let ARC manage self-hosted runners across organizations. It's enabled by default and the only thing you need to start using it is to set the `spec.githubAPICredentialsFrom.secretRef.name` fields for the following resources:
|
||||||
|
|
||||||
@@ -58,4 +58,4 @@ spec:
|
|||||||
when and which varying ARC component(`horizontalrunnerautoscaler-controller`, `runnerdeployment-controller`, `runnerreplicaset-controller`, `runner-controller` or `runnerpod-controller`) makes specific API calls.
|
when and which varying ARC component(`horizontalrunnerautoscaler-controller`, `runnerdeployment-controller`, `runnerreplicaset-controller`, `runner-controller` or `runnerpod-controller`) makes specific API calls.
|
||||||
> Just don't be surprised you have to repeat `githubAPICredentialsFrom.secretRef.name` settings among two resources!
|
> Just don't be surprised you have to repeat `githubAPICredentialsFrom.secretRef.name` settings among two resources!
|
||||||
|
|
||||||
Please refer to [Deploying Using GitHub App Authentication](#deploying-using-github-app-authentication) for how you could create the Kubernetes secret containing GitHub App credentials.
|
Please refer to [Deploying Using GitHub App Authentication](authenticating-to-the-github-api.md#deploying-using-github-app-authentication) for how you could create the Kubernetes secret containing GitHub App credentials.
|
||||||
@@ -67,3 +67,46 @@ spec:
|
|||||||
- name: DOCKER_DEFAULT_ADDRESS_POOL_SIZE
|
- name: DOCKER_DEFAULT_ADDRESS_POOL_SIZE
|
||||||
value: "24"
|
value: "24"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
More options can be configured by mounting a configmap to the daemon.json location:
|
||||||
|
|
||||||
|
- rootless: /home/runner/.config/docker/daemon.json
|
||||||
|
- rootful: /etc/docker/daemon.json
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: RunnerDeployment
|
||||||
|
metadata:
|
||||||
|
name: example-runnerdeployment
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
dockerdWithinRunnerContainer: true
|
||||||
|
image: summerwind/actions-runner-dind(-rootless)
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /home/runner/.config/docker/daemon.json
|
||||||
|
name: daemon-config-volume
|
||||||
|
subPath: daemon.json
|
||||||
|
volumes:
|
||||||
|
- name: daemon-config-volume
|
||||||
|
configMap:
|
||||||
|
name: daemon-cm
|
||||||
|
items:
|
||||||
|
- key: daemon.json
|
||||||
|
path: daemon.json
|
||||||
|
securityContext:
|
||||||
|
fsGroup: 1001 # runner user id
|
||||||
|
```
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: daemon-cm
|
||||||
|
data:
|
||||||
|
daemon.json: |
|
||||||
|
{
|
||||||
|
"log-level": "warn",
|
||||||
|
"dns": ["x.x.x.x"]
|
||||||
|
}
|
||||||
|
```
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user