mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 19:50:30 +00:00
Compare commits
100 Commits
gha-runner
...
gha-runner
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9e191cdd21 | ||
|
|
f965dfef73 | ||
|
|
4ee49fee14 | ||
|
|
8075e5ee74 | ||
|
|
963ae48a3f | ||
|
|
98854ef9c0 | ||
|
|
1987d9eb2e | ||
|
|
0006dd5eb1 | ||
|
|
86f1714354 | ||
|
|
f68bbad579 | ||
|
|
d3a8a34bb2 | ||
|
|
d515b4a6e0 | ||
|
|
d971fedbe8 | ||
|
|
6c6d061f0a | ||
|
|
5b9b9f7ca2 | ||
|
|
4357525445 | ||
|
|
1d1790614b | ||
|
|
442d52cd56 | ||
|
|
b6a95ae879 | ||
|
|
9968141086 | ||
|
|
e59d127d41 | ||
|
|
fb1232c13e | ||
|
|
7a643a5107 | ||
|
|
46cfbb6ec7 | ||
|
|
c9099a5a56 | ||
|
|
48706584fd | ||
|
|
2c0e53951b | ||
|
|
a7af44e042 | ||
|
|
f225fef921 | ||
|
|
814947c60e | ||
|
|
039350a0d0 | ||
|
|
a0fb417f69 | ||
|
|
f5fd831c2f | ||
|
|
753afb75b9 | ||
|
|
309b53143e | ||
|
|
7da2d7f96a | ||
|
|
e06c7edc21 | ||
|
|
9fba37540a | ||
|
|
a68aa00bd8 | ||
|
|
9b053102ed | ||
|
|
c03fac8fdd | ||
|
|
d72774753c | ||
|
|
f7b6ad901d | ||
|
|
728f05c844 | ||
|
|
c00465973e | ||
|
|
5f23afaad3 | ||
|
|
47dfed3ced | ||
|
|
1f9b7541e6 | ||
|
|
a029b705cd | ||
|
|
3fab744a4f | ||
|
|
fe8c3bb789 | ||
|
|
e40874f67f | ||
|
|
d7d479172d | ||
|
|
31352924d7 | ||
|
|
3e4201ac5f | ||
|
|
a44b037d6b | ||
|
|
e11beea49b | ||
|
|
bfadad0830 | ||
|
|
f7eb88ce9c | ||
|
|
0fd8eac305 | ||
|
|
b78cadd901 | ||
|
|
202a97ab12 | ||
|
|
b08d533105 | ||
|
|
0bfa57ac50 | ||
|
|
2831d658c4 | ||
|
|
0f40f6ab26 | ||
|
|
5347e2c2c8 | ||
|
|
1cba9c7800 | ||
|
|
2c29cfb994 | ||
|
|
4f89ac5878 | ||
|
|
64778a828e | ||
|
|
8e484637f9 | ||
|
|
b202be712e | ||
|
|
fb11d3bfd0 | ||
|
|
7793e1974a | ||
|
|
8aa04dd2be | ||
|
|
2939640fa9 | ||
|
|
65fd04540c | ||
|
|
1ae5d2b18e | ||
|
|
862bc1a9dd | ||
|
|
95487735a2 | ||
|
|
16815230bb | ||
|
|
2646456677 | ||
|
|
62eca94e45 | ||
|
|
510b1d82e5 | ||
|
|
b511953df7 | ||
|
|
2117fd1892 | ||
|
|
e1edb84abe | ||
|
|
f14dbd68f1 | ||
|
|
bffcb32b19 | ||
|
|
ea2443a410 | ||
|
|
ba91c183b5 | ||
|
|
e10a1cc7a3 | ||
|
|
ce80adb9ab | ||
|
|
1a8abb6d39 | ||
|
|
fdf7b6c525 | ||
|
|
db061b33e7 | ||
|
|
ead26ab18f | ||
|
|
16666e1bba | ||
|
|
2ae39828b2 |
3
.github/ISSUE_TEMPLATE/config.yml
vendored
3
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,5 +1,8 @@
|
|||||||
blank_issues_enabled: false
|
blank_issues_enabled: false
|
||||||
contact_links:
|
contact_links:
|
||||||
|
- name: Feature requests for the gha-runner-scale-set (actions.github.com API group)
|
||||||
|
about: Feature requests associated with the actions.github.com group should be posted on the GitHub Community Support Forum
|
||||||
|
url: https://github.com/orgs/community/discussions/categories/actions
|
||||||
- name: Sponsor ARC Maintainers
|
- name: Sponsor ARC Maintainers
|
||||||
about: If your business relies on the continued maintainance of actions-runner-controller, please consider sponsoring the project and the maintainers.
|
about: If your business relies on the continued maintainance of actions-runner-controller, please consider sponsoring the project and the maintainers.
|
||||||
url: https://github.com/actions/actions-runner-controller/tree/master/CODEOWNERS
|
url: https://github.com/actions/actions-runner-controller/tree/master/CODEOWNERS
|
||||||
|
|||||||
113
.github/ISSUE_TEMPLATE/github_bug_report.yaml
vendored
Normal file
113
.github/ISSUE_TEMPLATE/github_bug_report.yaml
vendored
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
name: Bug Report (actions.github.com API group)
|
||||||
|
description: File a bug report for actions.github.com API group
|
||||||
|
title: "<Please write what didn't work for you here>"
|
||||||
|
labels: ["bug", "needs triage", "gha-runner-scale-set"]
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
id: read-troubleshooting-guide
|
||||||
|
attributes:
|
||||||
|
label: Checks
|
||||||
|
description: Please check all the boxes below before submitting
|
||||||
|
options:
|
||||||
|
- label: I've already read https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/troubleshooting-actions-runner-controller-errors and I'm sure my issue is not covered in the troubleshooting guide.
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- label: I am using charts that are officially provided
|
||||||
|
- type: input
|
||||||
|
id: controller-version
|
||||||
|
attributes:
|
||||||
|
label: Controller Version
|
||||||
|
description: Refers to semver-like release tags for controller versions. Any release tags prefixed with `gha-runner-scale-set-` are releases associated with this API group
|
||||||
|
placeholder: ex. 0.6.1
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: dropdown
|
||||||
|
id: deployment-method
|
||||||
|
attributes:
|
||||||
|
label: Deployment Method
|
||||||
|
description: Which deployment method did you use to install ARC?
|
||||||
|
options:
|
||||||
|
- Helm
|
||||||
|
- Kustomize
|
||||||
|
- ArgoCD
|
||||||
|
- Other
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: checkboxes
|
||||||
|
id: checks
|
||||||
|
attributes:
|
||||||
|
label: Checks
|
||||||
|
description: Please check all the boxes below before submitting
|
||||||
|
options:
|
||||||
|
- label: This isn't a question or user support case (For Q&A and community support, go to [Discussions](https://github.com/actions/actions-runner-controller/discussions)).
|
||||||
|
required: true
|
||||||
|
- label: I've read the [Changelog](https://github.com/actions/actions-runner-controller/blob/master/docs/gha-runner-scale-set-controller/README.md#changelog) before submitting this issue and I'm sure it's not due to any recently-introduced backward-incompatible changes
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: reproduction-steps
|
||||||
|
attributes:
|
||||||
|
label: To Reproduce
|
||||||
|
description: "Steps to reproduce the behavior"
|
||||||
|
render: markdown
|
||||||
|
placeholder: |
|
||||||
|
1. Go to '...'
|
||||||
|
2. Click on '....'
|
||||||
|
3. Scroll down to '....'
|
||||||
|
4. See error
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: actual-behavior
|
||||||
|
attributes:
|
||||||
|
label: Describe the bug
|
||||||
|
description: Also tell us, what did happen?
|
||||||
|
placeholder: A clear and concise description of what happened.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: expected-behavior
|
||||||
|
attributes:
|
||||||
|
label: Describe the expected behavior
|
||||||
|
description: Also tell us, what did you expect to happen?
|
||||||
|
placeholder: A clear and concise description of what the expected behavior is.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: additional-context
|
||||||
|
attributes:
|
||||||
|
label: Additional Context
|
||||||
|
render: yaml
|
||||||
|
description: |
|
||||||
|
Provide `values.yaml` files that are relevant for this issue. PLEASE REDACT ANY INFORMATION THAT SHOULD NOT BE PUBLICALY AVAILABLE, LIKE GITHUB TOKEN FOR EXAMPLE.
|
||||||
|
placeholder: |
|
||||||
|
PLEASE REDACT ANY INFORMATION THAT SHOULD NOT BE PUBLICALY AVAILABLE, LIKE GITHUB TOKEN FOR EXAMPLE.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: controller-logs
|
||||||
|
attributes:
|
||||||
|
label: Controller Logs
|
||||||
|
description: "NEVER EVER OMIT THIS! Include complete logs from `actions-runner-controller`'s controller-manager pod."
|
||||||
|
render: shell
|
||||||
|
placeholder: |
|
||||||
|
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
||||||
|
|
||||||
|
To grab controller logs:
|
||||||
|
|
||||||
|
kubectl logs -n $NAMESPACE deployments/$CONTROLLER_DEPLOYMENT
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: runner-pod-logs
|
||||||
|
attributes:
|
||||||
|
label: Runner Pod Logs
|
||||||
|
description: "Include logs and kubectl describe output from runner pod(s)."
|
||||||
|
render: shell
|
||||||
|
placeholder: |
|
||||||
|
PROVIDE THE WHOLE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
name: Bug Report
|
name: Bug Report (actions.summerwind.net API group)
|
||||||
description: File a bug report
|
description: File a bug report for actions.summerwind.net API group
|
||||||
title: "<Please write what didn't work for you here>"
|
title: "<Please write what didn't work for you here>"
|
||||||
labels: ["bug", "needs triage"]
|
labels: ["bug", "needs triage", "community"]
|
||||||
body:
|
body:
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: read-troubleshooting-guide
|
id: read-troubleshooting-guide
|
||||||
@@ -146,7 +146,7 @@ body:
|
|||||||
render: shell
|
render: shell
|
||||||
placeholder: |
|
placeholder: |
|
||||||
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
||||||
|
|
||||||
To grab controller logs:
|
To grab controller logs:
|
||||||
|
|
||||||
# Set NS according to your setup
|
# Set NS according to your setup
|
||||||
@@ -166,7 +166,7 @@ body:
|
|||||||
render: shell
|
render: shell
|
||||||
placeholder: |
|
placeholder: |
|
||||||
PROVIDE THE WHOLE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
PROVIDE THE WHOLE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
||||||
|
|
||||||
To grab the runner pod logs:
|
To grab the runner pod logs:
|
||||||
|
|
||||||
# Set NS according to your setup. It should match your RunnerDeployment's metadata.namespace.
|
# Set NS according to your setup. It should match your RunnerDeployment's metadata.namespace.
|
||||||
@@ -177,7 +177,7 @@ body:
|
|||||||
|
|
||||||
kubectl -n $NS logs $POD_NAME -c runner > runnerpod_runner.log
|
kubectl -n $NS logs $POD_NAME -c runner > runnerpod_runner.log
|
||||||
kubectl -n $NS logs $POD_NAME -c docker > runnerpod_docker.log
|
kubectl -n $NS logs $POD_NAME -c docker > runnerpod_docker.log
|
||||||
|
|
||||||
If any of the containers are getting terminated immediately, try adding `--previous` to the kubectl-logs command to obtain logs emitted before the termination.
|
If any of the containers are getting terminated immediately, try adding `--previous` to the kubectl-logs command to obtain logs emitted before the termination.
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
name: Feature request
|
name: Feature request (actions.summerwind.net API group)
|
||||||
about: Suggest an idea for this project
|
about: Suggest an idea for this project
|
||||||
labels: ["enhancement", "needs triage"]
|
labels: ["enhancement", "needs triage", "community"]
|
||||||
title: ''
|
title: ''
|
||||||
assignees: ''
|
assignees: ''
|
||||||
---
|
---
|
||||||
@@ -193,7 +193,7 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
|
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
|
||||||
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-name}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
|
kubectl wait --timeout=30s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-namespace}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
|
||||||
|
|
||||||
- name: Gather logs and cleanup
|
- name: Gather logs and cleanup
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|||||||
2
.github/workflows/arc-publish-chart.yaml
vendored
2
.github/workflows/arc-publish-chart.yaml
vendored
@@ -63,7 +63,7 @@ jobs:
|
|||||||
python-version: '3.11'
|
python-version: '3.11'
|
||||||
|
|
||||||
- name: Set up chart-testing
|
- name: Set up chart-testing
|
||||||
uses: helm/chart-testing-action@v2.3.1
|
uses: helm/chart-testing-action@v2.6.0
|
||||||
|
|
||||||
- name: Run chart-testing (list-changed)
|
- name: Run chart-testing (list-changed)
|
||||||
id: list-changed
|
id: list-changed
|
||||||
|
|||||||
2
.github/workflows/arc-release-runners.yaml
vendored
2
.github/workflows/arc-release-runners.yaml
vendored
@@ -17,7 +17,7 @@ env:
|
|||||||
PUSH_TO_REGISTRIES: true
|
PUSH_TO_REGISTRIES: true
|
||||||
TARGET_ORG: actions-runner-controller
|
TARGET_ORG: actions-runner-controller
|
||||||
TARGET_WORKFLOW: release-runners.yaml
|
TARGET_WORKFLOW: release-runners.yaml
|
||||||
DOCKER_VERSION: 20.10.23
|
DOCKER_VERSION: 24.0.7
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}
|
group: ${{ github.workflow }}
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
RUNNER_MESSAGE="runner to v${RUNNER_LATEST_VERSION}"
|
RUNNER_MESSAGE="runner to v${RUNNER_LATEST_VERSION}"
|
||||||
CONTAINER_HOOKS_MESSAGE="container-hooks to v${CONTAINER_HOOKS_LATEST_VERSION}"
|
CONTAINER_HOOKS_MESSAGE="container-hooks to v${CONTAINER_HOOKS_LATEST_VERSION}"
|
||||||
|
|
||||||
PR_NAME="Updates:"
|
PR_NAME="Updates:"
|
||||||
if [ "$RUNNER_CURRENT_VERSION" != "$RUNNER_LATEST_VERSION" ]
|
if [ "$RUNNER_CURRENT_VERSION" != "$RUNNER_LATEST_VERSION" ]
|
||||||
then
|
then
|
||||||
@@ -88,7 +88,7 @@ jobs:
|
|||||||
then
|
then
|
||||||
PR_NAME="$PR_NAME $CONTAINER_HOOKS_MESSAGE"
|
PR_NAME="$PR_NAME $CONTAINER_HOOKS_MESSAGE"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1)
|
result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1)
|
||||||
if [ -z "$result" ]
|
if [ -z "$result" ]
|
||||||
then
|
then
|
||||||
@@ -120,21 +120,25 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: New branch
|
- name: New branch
|
||||||
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"
|
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"
|
||||||
|
|
||||||
- name: Update files
|
- name: Update files
|
||||||
run: |
|
run: |
|
||||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/VERSION
|
CURRENT_VERSION="${RUNNER_CURRENT_VERSION//./\\.}"
|
||||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/Makefile
|
LATEST_VERSION="${RUNNER_LATEST_VERSION//./\\.}"
|
||||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" Makefile
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
|
||||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" test/e2e/e2e_test.go
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
|
||||||
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
|
||||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/VERSION
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
|
||||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/Makefile
|
|
||||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" Makefile
|
CURRENT_VERSION="${CONTAINER_HOOKS_CURRENT_VERSION//./\\.}"
|
||||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" test/e2e/e2e_test.go
|
LATEST_VERSION="${CONTAINER_HOOKS_LATEST_VERSION//./\\.}"
|
||||||
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
|
||||||
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
|
||||||
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
|
||||||
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
|
||||||
|
|
||||||
- name: Commit changes
|
- name: Commit changes
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
6
.github/workflows/arc-validate-chart.yaml
vendored
6
.github/workflows/arc-validate-chart.yaml
vendored
@@ -28,7 +28,7 @@ permissions:
|
|||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# This will make sure we only apply the concurrency limits on pull requests
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
# but not pushes to master branch by making the concurrency group name unique
|
# but not pushes to master branch by making the concurrency group name unique
|
||||||
# for pushes
|
# for pushes
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
@@ -69,10 +69,10 @@ jobs:
|
|||||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.7'
|
python-version: '3.11'
|
||||||
|
|
||||||
- name: Set up chart-testing
|
- name: Set up chart-testing
|
||||||
uses: helm/chart-testing-action@v2.4.0
|
uses: helm/chart-testing-action@v2.6.0
|
||||||
|
|
||||||
- name: Run chart-testing (list-changed)
|
- name: Run chart-testing (list-changed)
|
||||||
id: list-changed
|
id: list-changed
|
||||||
|
|||||||
97
.github/workflows/gha-e2e-tests.yaml
vendored
97
.github/workflows/gha-e2e-tests.yaml
vendored
@@ -16,7 +16,7 @@ env:
|
|||||||
TARGET_ORG: actions-runner-controller
|
TARGET_ORG: actions-runner-controller
|
||||||
TARGET_REPO: arc_e2e_test_dummy
|
TARGET_REPO: arc_e2e_test_dummy
|
||||||
IMAGE_NAME: "arc-test-image"
|
IMAGE_NAME: "arc-test-image"
|
||||||
IMAGE_VERSION: "0.6.0"
|
IMAGE_VERSION: "0.9.1"
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# This will make sure we only apply the concurrency limits on pull requests
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
@@ -880,3 +880,98 @@ jobs:
|
|||||||
helm uninstall "${{ steps.install_arc.outputs.ARC_NAME }}" --namespace "arc-runners" --debug
|
helm uninstall "${{ steps.install_arc.outputs.ARC_NAME }}" --namespace "arc-runners" --debug
|
||||||
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n "${{ steps.install_arc.outputs.ARC_NAME }}" -l app.kubernetes.io/instance="${{ steps.install_arc.outputs.ARC_NAME }}"
|
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n "${{ steps.install_arc.outputs.ARC_NAME }}" -l app.kubernetes.io/instance="${{ steps.install_arc.outputs.ARC_NAME }}"
|
||||||
kubectl logs deployment/arc-gha-rs-controller -n "arc-systems"
|
kubectl logs deployment/arc-gha-rs-controller -n "arc-systems"
|
||||||
|
|
||||||
|
init-with-min-runners:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 20
|
||||||
|
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||||
|
env:
|
||||||
|
WORKFLOW_FILE: arc-test-workflow.yaml
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{ github.head_ref }}
|
||||||
|
|
||||||
|
- uses: ./.github/actions/setup-arc-e2e
|
||||||
|
id: setup
|
||||||
|
with:
|
||||||
|
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||||
|
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||||
|
image-name: ${{env.IMAGE_NAME}}
|
||||||
|
image-tag: ${{env.IMAGE_VERSION}}
|
||||||
|
target-org: ${{env.TARGET_ORG}}
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set-controller
|
||||||
|
id: install_arc_controller
|
||||||
|
run: |
|
||||||
|
helm install arc \
|
||||||
|
--namespace "arc-systems" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||||
|
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||||
|
--set flags.updateStrategy="eventual" \
|
||||||
|
./charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||||
|
|
||||||
|
- name: Install gha-runner-scale-set
|
||||||
|
id: install_arc
|
||||||
|
run: |
|
||||||
|
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||||
|
helm install "$ARC_NAME" \
|
||||||
|
--namespace "arc-runners" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||||
|
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||||
|
--set minRunners=5 \
|
||||||
|
./charts/gha-runner-scale-set \
|
||||||
|
--debug
|
||||||
|
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||||
|
kubectl get pod -n arc-systems
|
||||||
|
- name: Ensure 5 runners are up
|
||||||
|
run: |
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
pod_count=$(kubectl get pods -n arc-runners --no-headers | wc -l)
|
||||||
|
if [[ "$pod_count" = 5 ]]; then
|
||||||
|
echo "5 pods are up!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [[ "$count" -ge 12 ]]; then
|
||||||
|
echo "Timeout waiting for 5 pods to be created"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
|||||||
6
.github/workflows/gha-validate-chart.yaml
vendored
6
.github/workflows/gha-validate-chart.yaml
vendored
@@ -24,7 +24,7 @@ permissions:
|
|||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# This will make sure we only apply the concurrency limits on pull requests
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
# but not pushes to master branch by making the concurrency group name unique
|
# but not pushes to master branch by making the concurrency group name unique
|
||||||
# for pushes
|
# for pushes
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
@@ -65,10 +65,10 @@ jobs:
|
|||||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.7'
|
python-version: '3.11'
|
||||||
|
|
||||||
- name: Set up chart-testing
|
- name: Set up chart-testing
|
||||||
uses: helm/chart-testing-action@v2.4.0
|
uses: helm/chart-testing-action@v2.6.0
|
||||||
|
|
||||||
- name: Run chart-testing (list-changed)
|
- name: Run chart-testing (list-changed)
|
||||||
id: list-changed
|
id: list-changed
|
||||||
|
|||||||
9
.github/workflows/global-run-codeql.yaml
vendored
9
.github/workflows/global-run-codeql.yaml
vendored
@@ -2,7 +2,7 @@ name: Run CodeQL
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
@@ -11,7 +11,7 @@ on:
|
|||||||
- cron: '30 1 * * 0'
|
- cron: '30 1 * * 0'
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# This will make sure we only apply the concurrency limits on pull requests
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
# but not pushes to master branch by making the concurrency group name unique
|
# but not pushes to master branch by making the concurrency group name unique
|
||||||
# for pushes
|
# for pushes
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
@@ -27,6 +27,11 @@ jobs:
|
|||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Install Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v2
|
uses: github/codeql-action/init@v2
|
||||||
with:
|
with:
|
||||||
|
|||||||
4
.github/workflows/go.yaml
vendored
4
.github/workflows/go.yaml
vendored
@@ -19,7 +19,7 @@ permissions:
|
|||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# This will make sure we only apply the concurrency limits on pull requests
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
# but not pushes to master branch by making the concurrency group name unique
|
# but not pushes to master branch by making the concurrency group name unique
|
||||||
# for pushes
|
# for pushes
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
@@ -51,7 +51,7 @@ jobs:
|
|||||||
uses: golangci/golangci-lint-action@v3
|
uses: golangci/golangci-lint-action@v3
|
||||||
with:
|
with:
|
||||||
only-new-issues: true
|
only-new-issues: true
|
||||||
version: v1.51.1
|
version: v1.55.2
|
||||||
|
|
||||||
generate:
|
generate:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
# actions-runner-controller maintainers
|
# actions-runner-controller maintainers
|
||||||
* @mumoshu @toast-gear @actions/actions-runtime @nikola-jokic
|
* @mumoshu @toast-gear @actions/actions-launch @nikola-jokic @rentziass
|
||||||
|
|||||||
@@ -73,7 +73,7 @@ To make your development cycle faster, use the below command to update deploy an
|
|||||||
# Makefile
|
# Makefile
|
||||||
VERSION=controller1 \
|
VERSION=controller1 \
|
||||||
RUNNER_TAG=runner1 \
|
RUNNER_TAG=runner1 \
|
||||||
make acceptance/pull acceptance/kind docker-build acceptance/load acceptance/deploy
|
make acceptance/pull acceptance/kind docker-buildx acceptance/load acceptance/deploy
|
||||||
```
|
```
|
||||||
|
|
||||||
If you've already deployed actions-runner-controller and only want to recreate pods to use the newer image, you can run:
|
If you've already deployed actions-runner-controller and only want to recreate pods to use the newer image, you can run:
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Build the manager binary
|
# Build the manager binary
|
||||||
FROM --platform=$BUILDPLATFORM golang:1.20.7 as builder
|
FROM --platform=$BUILDPLATFORM golang:1.22.1 as builder
|
||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
@@ -38,6 +38,7 @@ RUN --mount=target=. \
|
|||||||
export GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOARM=${TARGETVARIANT#v} && \
|
export GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOARM=${TARGETVARIANT#v} && \
|
||||||
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/manager main.go && \
|
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/manager main.go && \
|
||||||
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener && \
|
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener && \
|
||||||
|
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/ghalistener ./cmd/ghalistener && \
|
||||||
go build -trimpath -ldflags="-s -w" -o /out/github-webhook-server ./cmd/githubwebhookserver && \
|
go build -trimpath -ldflags="-s -w" -o /out/github-webhook-server ./cmd/githubwebhookserver && \
|
||||||
go build -trimpath -ldflags="-s -w" -o /out/actions-metrics-server ./cmd/actionsmetricsserver && \
|
go build -trimpath -ldflags="-s -w" -o /out/actions-metrics-server ./cmd/actionsmetricsserver && \
|
||||||
go build -trimpath -ldflags="-s -w" -o /out/sleep ./cmd/sleep
|
go build -trimpath -ldflags="-s -w" -o /out/sleep ./cmd/sleep
|
||||||
@@ -52,6 +53,7 @@ COPY --from=builder /out/manager .
|
|||||||
COPY --from=builder /out/github-webhook-server .
|
COPY --from=builder /out/github-webhook-server .
|
||||||
COPY --from=builder /out/actions-metrics-server .
|
COPY --from=builder /out/actions-metrics-server .
|
||||||
COPY --from=builder /out/github-runnerscaleset-listener .
|
COPY --from=builder /out/github-runnerscaleset-listener .
|
||||||
|
COPY --from=builder /out/ghalistener .
|
||||||
COPY --from=builder /out/sleep .
|
COPY --from=builder /out/sleep .
|
||||||
|
|
||||||
USER 65532:65532
|
USER 65532:65532
|
||||||
|
|||||||
6
Makefile
6
Makefile
@@ -6,7 +6,7 @@ endif
|
|||||||
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
||||||
VERSION ?= dev
|
VERSION ?= dev
|
||||||
COMMIT_SHA = $(shell git rev-parse HEAD)
|
COMMIT_SHA = $(shell git rev-parse HEAD)
|
||||||
RUNNER_VERSION ?= 2.309.0
|
RUNNER_VERSION ?= 2.315.0
|
||||||
TARGETPLATFORM ?= $(shell arch)
|
TARGETPLATFORM ?= $(shell arch)
|
||||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||||
RUNNER_TAG ?= ${VERSION}
|
RUNNER_TAG ?= ${VERSION}
|
||||||
@@ -68,7 +68,7 @@ endif
|
|||||||
all: manager
|
all: manager
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.54.2 golangci-lint run
|
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.55.2 golangci-lint run
|
||||||
|
|
||||||
GO_TEST_ARGS ?= -short
|
GO_TEST_ARGS ?= -short
|
||||||
|
|
||||||
@@ -320,7 +320,7 @@ ifeq (, $(wildcard $(GOBIN)/controller-gen))
|
|||||||
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
|
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
|
||||||
cd $$CONTROLLER_GEN_TMP_DIR ;\
|
cd $$CONTROLLER_GEN_TMP_DIR ;\
|
||||||
go mod init tmp ;\
|
go mod init tmp ;\
|
||||||
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0 ;\
|
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.14.0 ;\
|
||||||
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
|
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
|
||||||
}
|
}
|
||||||
endif
|
endif
|
||||||
|
|||||||
@@ -42,6 +42,10 @@ type EphemeralRunner struct {
|
|||||||
Status EphemeralRunnerStatus `json:"status,omitempty"`
|
Status EphemeralRunnerStatus `json:"status,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (er *EphemeralRunner) IsDone() bool {
|
||||||
|
return er.Status.Phase == corev1.PodSucceeded || er.Status.Phase == corev1.PodFailed
|
||||||
|
}
|
||||||
|
|
||||||
// EphemeralRunnerSpec defines the desired state of EphemeralRunner
|
// EphemeralRunnerSpec defines the desired state of EphemeralRunner
|
||||||
type EphemeralRunnerSpec struct {
|
type EphemeralRunnerSpec struct {
|
||||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||||
|
|||||||
@@ -24,6 +24,8 @@ import (
|
|||||||
type EphemeralRunnerSetSpec struct {
|
type EphemeralRunnerSetSpec struct {
|
||||||
// Replicas is the number of desired EphemeralRunner resources in the k8s namespace.
|
// Replicas is the number of desired EphemeralRunner resources in the k8s namespace.
|
||||||
Replicas int `json:"replicas,omitempty"`
|
Replicas int `json:"replicas,omitempty"`
|
||||||
|
// PatchID is the unique identifier for the patch issued by the listener app
|
||||||
|
PatchID int `json:"patchID"`
|
||||||
|
|
||||||
EphemeralRunnerSpec EphemeralRunnerSpec `json:"ephemeralRunnerSpec,omitempty"`
|
EphemeralRunnerSpec EphemeralRunnerSpec `json:"ephemeralRunnerSpec,omitempty"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build !ignore_autogenerated
|
//go:build !ignore_autogenerated
|
||||||
// +build !ignore_autogenerated
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2020 The actions-runner-controller authors.
|
Copyright 2020 The actions-runner-controller authors.
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ import (
|
|||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||||
)
|
)
|
||||||
|
|
||||||
// log is for logging in this package.
|
// log is for logging in this package.
|
||||||
@@ -48,20 +49,20 @@ func (r *Runner) Default() {
|
|||||||
var _ webhook.Validator = &Runner{}
|
var _ webhook.Validator = &Runner{}
|
||||||
|
|
||||||
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
|
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
|
||||||
func (r *Runner) ValidateCreate() error {
|
func (r *Runner) ValidateCreate() (admission.Warnings, error) {
|
||||||
runnerLog.Info("validate resource to be created", "name", r.Name)
|
runnerLog.Info("validate resource to be created", "name", r.Name)
|
||||||
return r.Validate()
|
return nil, r.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
|
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
|
||||||
func (r *Runner) ValidateUpdate(old runtime.Object) error {
|
func (r *Runner) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
|
||||||
runnerLog.Info("validate resource to be updated", "name", r.Name)
|
runnerLog.Info("validate resource to be updated", "name", r.Name)
|
||||||
return r.Validate()
|
return nil, r.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
|
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
|
||||||
func (r *Runner) ValidateDelete() error {
|
func (r *Runner) ValidateDelete() (admission.Warnings, error) {
|
||||||
return nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate validates resource spec.
|
// Validate validates resource spec.
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ import (
|
|||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||||
)
|
)
|
||||||
|
|
||||||
// log is for logging in this package.
|
// log is for logging in this package.
|
||||||
@@ -48,20 +49,20 @@ func (r *RunnerDeployment) Default() {
|
|||||||
var _ webhook.Validator = &RunnerDeployment{}
|
var _ webhook.Validator = &RunnerDeployment{}
|
||||||
|
|
||||||
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
|
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
|
||||||
func (r *RunnerDeployment) ValidateCreate() error {
|
func (r *RunnerDeployment) ValidateCreate() (admission.Warnings, error) {
|
||||||
runnerDeploymentLog.Info("validate resource to be created", "name", r.Name)
|
runnerDeploymentLog.Info("validate resource to be created", "name", r.Name)
|
||||||
return r.Validate()
|
return nil, r.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
|
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
|
||||||
func (r *RunnerDeployment) ValidateUpdate(old runtime.Object) error {
|
func (r *RunnerDeployment) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
|
||||||
runnerDeploymentLog.Info("validate resource to be updated", "name", r.Name)
|
runnerDeploymentLog.Info("validate resource to be updated", "name", r.Name)
|
||||||
return r.Validate()
|
return nil, r.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
|
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
|
||||||
func (r *RunnerDeployment) ValidateDelete() error {
|
func (r *RunnerDeployment) ValidateDelete() (admission.Warnings, error) {
|
||||||
return nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate validates resource spec.
|
// Validate validates resource spec.
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ import (
|
|||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||||
)
|
)
|
||||||
|
|
||||||
// log is for logging in this package.
|
// log is for logging in this package.
|
||||||
@@ -48,20 +49,20 @@ func (r *RunnerReplicaSet) Default() {
|
|||||||
var _ webhook.Validator = &RunnerReplicaSet{}
|
var _ webhook.Validator = &RunnerReplicaSet{}
|
||||||
|
|
||||||
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
|
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
|
||||||
func (r *RunnerReplicaSet) ValidateCreate() error {
|
func (r *RunnerReplicaSet) ValidateCreate() (admission.Warnings, error) {
|
||||||
runnerReplicaSetLog.Info("validate resource to be created", "name", r.Name)
|
runnerReplicaSetLog.Info("validate resource to be created", "name", r.Name)
|
||||||
return r.Validate()
|
return nil, r.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
|
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
|
||||||
func (r *RunnerReplicaSet) ValidateUpdate(old runtime.Object) error {
|
func (r *RunnerReplicaSet) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
|
||||||
runnerReplicaSetLog.Info("validate resource to be updated", "name", r.Name)
|
runnerReplicaSetLog.Info("validate resource to be updated", "name", r.Name)
|
||||||
return r.Validate()
|
return nil, r.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
|
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
|
||||||
func (r *RunnerReplicaSet) ValidateDelete() error {
|
func (r *RunnerReplicaSet) ValidateDelete() (admission.Warnings, error) {
|
||||||
return nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate validates resource spec.
|
// Validate validates resource spec.
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build !ignore_autogenerated
|
//go:build !ignore_autogenerated
|
||||||
// +build !ignore_autogenerated
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2020 The actions-runner-controller authors.
|
Copyright 2020 The actions-runner-controller authors.
|
||||||
|
|||||||
@@ -15,10 +15,10 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.23.5
|
version: 0.23.7
|
||||||
|
|
||||||
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||||
appVersion: 0.27.5
|
appVersion: 0.27.6
|
||||||
|
|
||||||
home: https://github.com/actions/actions-runner-controller
|
home: https://github.com/actions/actions-runner-controller
|
||||||
|
|
||||||
|
|||||||
@@ -8,154 +8,156 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
|||||||
|
|
||||||
> _Default values are the defaults set in the charts `values.yaml`, some properties have default configurations in the code for when the property is omitted or invalid_
|
> _Default values are the defaults set in the charts `values.yaml`, some properties have default configurations in the code for when the property is omitted or invalid_
|
||||||
|
|
||||||
| Key | Description | Default |
|
| Key | Description | Default |
|
||||||
|----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------|
|
|-----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------|
|
||||||
| `labels` | Set labels to apply to all resources in the chart | |
|
| `labels` | Set labels to apply to all resources in the chart | |
|
||||||
| `replicaCount` | Set the number of controller pods | 1 |
|
| `replicaCount` | Set the number of controller pods | 1 |
|
||||||
| `webhookPort` | Set the containerPort for the webhook Pod | 9443 |
|
| `webhookPort` | Set the containerPort for the webhook Pod | 9443 |
|
||||||
| `syncPeriod` | Set the period in which the controller reconciles the desired runners count | 1m |
|
| `syncPeriod` | Set the period in which the controller reconciles the desired runners count | 1m |
|
||||||
| `enableLeaderElection` | Enable election configuration | true |
|
| `enableLeaderElection` | Enable election configuration | true |
|
||||||
| `leaderElectionId` | Set the election ID for the controller group | |
|
| `leaderElectionId` | Set the election ID for the controller group | |
|
||||||
| `githubEnterpriseServerURL` | Set the URL for a self-hosted GitHub Enterprise Server | |
|
| `githubEnterpriseServerURL` | Set the URL for a self-hosted GitHub Enterprise Server | |
|
||||||
| `githubURL` | Override GitHub URL to be used for GitHub API calls | |
|
| `githubURL` | Override GitHub URL to be used for GitHub API calls | |
|
||||||
| `githubUploadURL` | Override GitHub Upload URL to be used for GitHub API calls | |
|
| `githubUploadURL` | Override GitHub Upload URL to be used for GitHub API calls | |
|
||||||
| `runnerGithubURL` | Override GitHub URL to be used by runners during registration | |
|
| `runnerGithubURL` | Override GitHub URL to be used by runners during registration | |
|
||||||
| `logLevel` | Set the log level of the controller container | |
|
| `logLevel` | Set the log level of the controller container | |
|
||||||
| `logFormat` | Set the log format of the controller. Valid options are "text" and "json" | text |
|
| `logFormat` | Set the log format of the controller. Valid options are "text" and "json" | text |
|
||||||
| `additionalVolumes` | Set additional volumes to add to the manager container | |
|
| `additionalVolumes` | Set additional volumes to add to the manager container | |
|
||||||
| `additionalVolumeMounts` | Set additional volume mounts to add to the manager container | |
|
| `additionalVolumeMounts` | Set additional volume mounts to add to the manager container | |
|
||||||
| `authSecret.create` | Deploy the controller auth secret | false |
|
| `authSecret.create` | Deploy the controller auth secret | false |
|
||||||
| `authSecret.name` | Set the name of the auth secret | controller-manager |
|
| `authSecret.name` | Set the name of the auth secret | controller-manager |
|
||||||
| `authSecret.annotations` | Set annotations for the auth Secret | |
|
| `authSecret.annotations` | Set annotations for the auth Secret | |
|
||||||
| `authSecret.github_app_id` | The ID of your GitHub App. **This can't be set at the same time as `authSecret.github_token`** | |
|
| `authSecret.github_app_id` | The ID of your GitHub App. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||||
| `authSecret.github_app_installation_id` | The ID of your GitHub App installation. **This can't be set at the same time as `authSecret.github_token`** | |
|
| `authSecret.github_app_installation_id` | The ID of your GitHub App installation. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||||
| `authSecret.github_app_private_key` | The multiline string of your GitHub App's private key. **This can't be set at the same time as `authSecret.github_token`** | |
|
| `authSecret.github_app_private_key` | The multiline string of your GitHub App's private key. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||||
| `authSecret.github_token` | Your chosen GitHub PAT token. **This can't be set at the same time as the `authSecret.github_app_*`** | |
|
| `authSecret.github_token` | Your chosen GitHub PAT token. **This can't be set at the same time as the `authSecret.github_app_*`** | |
|
||||||
| `authSecret.github_basicauth_username` | Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
| `authSecret.github_basicauth_username` | Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
||||||
| `authSecret.github_basicauth_password` | Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
| `authSecret.github_basicauth_password` | Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
||||||
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. | |
|
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. | |
|
||||||
| `hostNetwork` | The "hostNetwork" of the controller container | false |
|
| `hostNetwork` | The "hostNetwork" of the controller container | false |
|
||||||
| `dnsPolicy` | The "dnsPolicy" of the controller container | ClusterFirst |
|
| `dnsPolicy` | The "dnsPolicy" of the controller container | ClusterFirst |
|
||||||
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
|
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
|
||||||
| `image.tag` | The tag of the controller container | |
|
| `image.tag` | The tag of the controller container | |
|
||||||
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
|
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
|
||||||
| `image.actionsRunnerImagePullSecrets` | Optional image pull secrets to be included in the runner pod's ImagePullSecrets | |
|
| `image.actionsRunnerImagePullSecrets` | Optional image pull secrets to be included in the runner pod's ImagePullSecrets | |
|
||||||
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
|
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
|
||||||
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
|
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
|
||||||
| `metrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
| `metrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||||
| `metrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
|
| `metrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
|
||||||
| `metrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
|
| `metrics.serviceMonitor.namespace | Namespace which Prometheus is running in | `Release.Namespace` (the default namespace of the helm chart). |
|
||||||
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
|
| `metrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
|
||||||
| `metrics.port` | Set port of metrics service | 8443 |
|
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
|
||||||
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
| `metrics.port` | Set port of metrics service | 8443 |
|
||||||
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||||
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
|
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||||
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
|
||||||
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
|
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||||
| `fullnameOverride` | Override the full resource names | |
|
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
|
||||||
| `nameOverride` | Override the resource name prefix | |
|
| `fullnameOverride` | Override the full resource names | |
|
||||||
| `serviceAccount.annotations` | Set annotations to the service account | |
|
| `nameOverride` | Override the resource name prefix | |
|
||||||
| `serviceAccount.create` | Deploy the controller pod under a service account | true |
|
| `serviceAccount.annotations` | Set annotations to the service account | |
|
||||||
| `podAnnotations` | Set annotations for the controller pod | |
|
| `serviceAccount.create` | Deploy the controller pod under a service account | true |
|
||||||
| `podLabels` | Set labels for the controller pod | |
|
| `podAnnotations` | Set annotations for the controller pod | |
|
||||||
| `serviceAccount.name` | Set the name of the service account | |
|
| `podLabels` | Set labels for the controller pod | |
|
||||||
| `securityContext` | Set the security context for each container in the controller pod | |
|
| `serviceAccount.name` | Set the name of the service account | |
|
||||||
| `podSecurityContext` | Set the security context to controller pod | |
|
| `securityContext` | Set the security context for each container in the controller pod | |
|
||||||
| `service.annotations` | Set annotations for the provisioned webhook service resource | |
|
| `podSecurityContext` | Set the security context to controller pod | |
|
||||||
| `service.port` | Set controller service ports | |
|
| `service.annotations` | Set annotations for the provisioned webhook service resource | |
|
||||||
| `service.type` | Set controller service type | |
|
| `service.port` | Set controller service ports | |
|
||||||
| `topologySpreadConstraints` | Set the controller pod topologySpreadConstraints | |
|
| `service.type` | Set controller service type | |
|
||||||
| `nodeSelector` | Set the controller pod nodeSelector | |
|
| `topologySpreadConstraints` | Set the controller pod topologySpreadConstraints | |
|
||||||
| `resources` | Set the controller pod resources | |
|
| `nodeSelector` | Set the controller pod nodeSelector | |
|
||||||
| `affinity` | Set the controller pod affinity rules | |
|
| `resources` | Set the controller pod resources | |
|
||||||
| `podDisruptionBudget.enabled` | Enables a PDB to ensure HA of controller pods | false |
|
| `affinity` | Set the controller pod affinity rules | |
|
||||||
| `podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
|
| `podDisruptionBudget.enabled` | Enables a PDB to ensure HA of controller pods | false |
|
||||||
| `podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
|
| `podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
|
||||||
| `tolerations` | Set the controller pod tolerations | |
|
| `podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
|
||||||
| `env` | Set environment variables for the controller container | |
|
| `tolerations` | Set the controller pod tolerations | |
|
||||||
| `priorityClassName` | Set the controller pod priorityClassName | |
|
| `env` | Set environment variables for the controller container | |
|
||||||
| `scope.watchNamespace` | Tells the controller and the github webhook server which namespace to watch if `scope.singleNamespace` is true | `Release.Namespace` (the default namespace of the helm chart). |
|
| `priorityClassName` | Set the controller pod priorityClassName | |
|
||||||
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
|
| `scope.watchNamespace` | Tells the controller and the github webhook server which namespace to watch if `scope.singleNamespace` is true | `Release.Namespace` (the default namespace of the helm chart). |
|
||||||
| `certManagerEnabled` | Enable cert-manager. If disabled you must set admissionWebHooks.caBundle and create TLS secrets manually | true |
|
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
|
||||||
| `runner.statusUpdateHook.enabled` | Use custom RBAC for runners (role, role binding and service account), this will enable reporting runner statuses | false |
|
| `certManagerEnabled` | Enable cert-manager. If disabled you must set admissionWebHooks.caBundle and create TLS secrets manually | true |
|
||||||
| `admissionWebHooks.caBundle` | Base64-encoded PEM bundle containing the CA that signed the webhook's serving certificate | |
|
| `runner.statusUpdateHook.enabled` | Use custom RBAC for runners (role, role binding and service account), this will enable reporting runner statuses | false |
|
||||||
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
|
| `admissionWebHooks.caBundle` | Base64-encoded PEM bundle containing the CA that signed the webhook's serving certificate | |
|
||||||
| `githubWebhookServer.logFormat` | Set the log format of the githubWebhookServer controller. Valid options are "text" and "json" | text |
|
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
|
||||||
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
|
| `githubWebhookServer.logFormat` | Set the log format of the githubWebhookServer controller. Valid options are "text" and "json" | text |
|
||||||
| `githubWebhookServer.useRunnerGroupsVisibility` | Enable supporting runner groups with custom visibility, you also need to set `githubWebhookServer.secret.enabled` to enable this feature. | false |
|
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
|
||||||
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
|
| `githubWebhookServer.useRunnerGroupsVisibility` | Enable supporting runner groups with custom visibility, you also need to set `githubWebhookServer.secret.enabled` to enable this feature. | false |
|
||||||
| `githubWebhookServer.queueLimit` | Set the queue size limit in the githubWebhookServer | |
|
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
|
||||||
| `githubWebhookServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
|
| `githubWebhookServer.queueLimit` | Set the queue size limit in the githubWebhookServer | |
|
||||||
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
|
| `githubWebhookServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
|
||||||
| `githubWebhookServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
|
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
|
||||||
| `githubWebhookServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
| `githubWebhookServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
|
||||||
| `githubWebhookServer.imagePullSecrets` | Specifies the secret to be used when pulling the githubWebhookServer pod containers | |
|
| `githubWebhookServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
||||||
| `githubWebhookServer.nameOverride` | Override the resource name prefix | |
|
| `githubWebhookServer.imagePullSecrets` | Specifies the secret to be used when pulling the githubWebhookServer pod containers | |
|
||||||
| `githubWebhookServer.fullnameOverride` | Override the full resource names | |
|
| `githubWebhookServer.nameOverride` | Override the resource name prefix | |
|
||||||
| `githubWebhookServer.serviceAccount.create` | Deploy the githubWebhookServer under a service account | true |
|
| `githubWebhookServer.fullnameOverride` | Override the full resource names | |
|
||||||
| `githubWebhookServer.serviceAccount.annotations` | Set annotations for the service account | |
|
| `githubWebhookServer.serviceAccount.create` | Deploy the githubWebhookServer under a service account | true |
|
||||||
| `githubWebhookServer.serviceAccount.name` | Set the service account name | |
|
| `githubWebhookServer.serviceAccount.annotations` | Set annotations for the service account | |
|
||||||
| `githubWebhookServer.podAnnotations` | Set annotations for the githubWebhookServer pod | |
|
| `githubWebhookServer.serviceAccount.name` | Set the service account name | |
|
||||||
| `githubWebhookServer.podLabels` | Set labels for the githubWebhookServer pod | |
|
| `githubWebhookServer.podAnnotations` | Set annotations for the githubWebhookServer pod | |
|
||||||
| `githubWebhookServer.podSecurityContext` | Set the security context to githubWebhookServer pod | |
|
| `githubWebhookServer.podLabels` | Set labels for the githubWebhookServer pod | |
|
||||||
| `githubWebhookServer.securityContext` | Set the security context for each container in the githubWebhookServer pod | |
|
| `githubWebhookServer.podSecurityContext` | Set the security context to githubWebhookServer pod | |
|
||||||
| `githubWebhookServer.resources` | Set the githubWebhookServer pod resources | |
|
| `githubWebhookServer.securityContext` | Set the security context for each container in the githubWebhookServer pod | |
|
||||||
| `githubWebhookServer.topologySpreadConstraints` | Set the githubWebhookServer pod topologySpreadConstraints | |
|
| `githubWebhookServer.resources` | Set the githubWebhookServer pod resources | |
|
||||||
| `githubWebhookServer.nodeSelector` | Set the githubWebhookServer pod nodeSelector | |
|
| `githubWebhookServer.topologySpreadConstraints` | Set the githubWebhookServer pod topologySpreadConstraints | |
|
||||||
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
|
| `githubWebhookServer.nodeSelector` | Set the githubWebhookServer pod nodeSelector | |
|
||||||
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
|
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
|
||||||
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
|
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
|
||||||
| `githubWebhookServer.terminationGracePeriodSeconds` | Set the githubWebhookServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
|
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
|
||||||
| `githubWebhookServer.lifecycle` | Set the githubWebhookServer pod lifecycle hooks | `{}` |
|
| `githubWebhookServer.terminationGracePeriodSeconds` | Set the githubWebhookServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
|
||||||
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
|
| `githubWebhookServer.lifecycle` | Set the githubWebhookServer pod lifecycle hooks | `{}` |
|
||||||
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
|
||||||
| `githubWebhookServer.service.loadBalancerSourceRanges` | Set githubWebhookServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
|
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||||
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
|
| `githubWebhookServer.service.loadBalancerSourceRanges` | Set githubWebhookServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
|
||||||
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
|
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
|
||||||
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||||
| `githubWebhookServer.ingress.tls` | Set tls configuration for ingress | |
|
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||||
| `githubWebhookServer.ingress.ingressClassName` | Set ingress class name | |
|
| `githubWebhookServer.ingress.tls` | Set tls configuration for ingress | |
|
||||||
| `githubWebhookServer.podDisruptionBudget.enabled` | Enables a PDB to ensure HA of githubwebhook pods | false |
|
| `githubWebhookServer.ingress.ingressClassName` | Set ingress class name | |
|
||||||
| `githubWebhookServer.podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
|
| `githubWebhookServer.podDisruptionBudget.enabled` | Enables a PDB to ensure HA of githubwebhook pods | false |
|
||||||
| `githubWebhookServer.podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
|
| `githubWebhookServer.podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
|
||||||
| `actionsMetricsServer.logLevel` | Set the log level of the actionsMetricsServer container | |
|
| `githubWebhookServer.podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
|
||||||
| `actionsMetricsServer.logFormat` | Set the log format of the actionsMetricsServer controller. Valid options are "text" and "json" | text |
|
| `actionsMetricsServer.logLevel` | Set the log level of the actionsMetricsServer container | |
|
||||||
| `actionsMetricsServer.enabled` | Deploy the actions metrics server pod | false |
|
| `actionsMetricsServer.logFormat` | Set the log format of the actionsMetricsServer controller. Valid options are "text" and "json" | text |
|
||||||
|
| `actionsMetricsServer.enabled` | Deploy the actions metrics server pod | false |
|
||||||
| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the actions-metrics-server | false |
|
| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the actions-metrics-server | false |
|
||||||
| `actionsMetricsServer.secret.create` | Deploy the webhook hook secret | false |
|
| `actionsMetricsServer.secret.create` | Deploy the webhook hook secret | false |
|
||||||
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | actions-metrics-server |
|
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | actions-metrics-server |
|
||||||
| `actionsMetricsServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
| `actionsMetricsServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
||||||
| `actionsMetricsServer.imagePullSecrets` | Specifies the secret to be used when pulling the actionsMetricsServer pod containers | |
|
| `actionsMetricsServer.imagePullSecrets` | Specifies the secret to be used when pulling the actionsMetricsServer pod containers | |
|
||||||
| `actionsMetricsServer.nameOverride` | Override the resource name prefix | |
|
| `actionsMetricsServer.nameOverride` | Override the resource name prefix | |
|
||||||
| `actionsMetricsServer.fullnameOverride` | Override the full resource names | |
|
| `actionsMetricsServer.fullnameOverride` | Override the full resource names | |
|
||||||
| `actionsMetricsServer.serviceAccount.create` | Deploy the actionsMetricsServer under a service account | true |
|
| `actionsMetricsServer.serviceAccount.create` | Deploy the actionsMetricsServer under a service account | true |
|
||||||
| `actionsMetricsServer.serviceAccount.annotations` | Set annotations for the service account | |
|
| `actionsMetricsServer.serviceAccount.annotations` | Set annotations for the service account | |
|
||||||
| `actionsMetricsServer.serviceAccount.name` | Set the service account name | |
|
| `actionsMetricsServer.serviceAccount.name` | Set the service account name | |
|
||||||
| `actionsMetricsServer.podAnnotations` | Set annotations for the actionsMetricsServer pod | |
|
| `actionsMetricsServer.podAnnotations` | Set annotations for the actionsMetricsServer pod | |
|
||||||
| `actionsMetricsServer.podLabels` | Set labels for the actionsMetricsServer pod | |
|
| `actionsMetricsServer.podLabels` | Set labels for the actionsMetricsServer pod | |
|
||||||
| `actionsMetricsServer.podSecurityContext` | Set the security context to actionsMetricsServer pod | |
|
| `actionsMetricsServer.podSecurityContext` | Set the security context to actionsMetricsServer pod | |
|
||||||
| `actionsMetricsServer.securityContext` | Set the security context for each container in the actionsMetricsServer pod | |
|
| `actionsMetricsServer.securityContext` | Set the security context for each container in the actionsMetricsServer pod | |
|
||||||
| `actionsMetricsServer.resources` | Set the actionsMetricsServer pod resources | |
|
| `actionsMetricsServer.resources` | Set the actionsMetricsServer pod resources | |
|
||||||
| `actionsMetricsServer.topologySpreadConstraints` | Set the actionsMetricsServer pod topologySpreadConstraints | |
|
| `actionsMetricsServer.topologySpreadConstraints` | Set the actionsMetricsServer pod topologySpreadConstraints | |
|
||||||
| `actionsMetricsServer.nodeSelector` | Set the actionsMetricsServer pod nodeSelector | |
|
| `actionsMetricsServer.nodeSelector` | Set the actionsMetricsServer pod nodeSelector | |
|
||||||
| `actionsMetricsServer.tolerations` | Set the actionsMetricsServer pod tolerations | |
|
| `actionsMetricsServer.tolerations` | Set the actionsMetricsServer pod tolerations | |
|
||||||
| `actionsMetricsServer.affinity` | Set the actionsMetricsServer pod affinity rules | |
|
| `actionsMetricsServer.affinity` | Set the actionsMetricsServer pod affinity rules | |
|
||||||
| `actionsMetricsServer.priorityClassName` | Set the actionsMetricsServer pod priorityClassName | |
|
| `actionsMetricsServer.priorityClassName` | Set the actionsMetricsServer pod priorityClassName | |
|
||||||
| `actionsMetricsServer.terminationGracePeriodSeconds` | Set the actionsMetricsServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
|
| `actionsMetricsServer.terminationGracePeriodSeconds` | Set the actionsMetricsServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
|
||||||
| `actionsMetricsServer.lifecycle` | Set the actionsMetricsServer pod lifecycle hooks | `{}` |
|
| `actionsMetricsServer.lifecycle` | Set the actionsMetricsServer pod lifecycle hooks | `{}` |
|
||||||
| `actionsMetricsServer.service.type` | Set actionsMetricsServer service type | |
|
| `actionsMetricsServer.service.type` | Set actionsMetricsServer service type | |
|
||||||
| `actionsMetricsServer.service.ports` | Set actionsMetricsServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
| `actionsMetricsServer.service.ports` | Set actionsMetricsServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||||
| `actionsMetricsServer.service.loadBalancerSourceRanges` | Set actionsMetricsServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
|
| `actionsMetricsServer.service.loadBalancerSourceRanges` | Set actionsMetricsServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
|
||||||
| `actionsMetricsServer.ingress.enabled` | Deploy an ingress kind for the actionsMetricsServer | false |
|
| `actionsMetricsServer.ingress.enabled` | Deploy an ingress kind for the actionsMetricsServer | false |
|
||||||
| `actionsMetricsServer.ingress.annotations` | Set annotations for the ingress kind | |
|
| `actionsMetricsServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||||
| `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
| `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||||
| `actionsMetricsServer.ingress.tls` | Set tls configuration for ingress | |
|
| `actionsMetricsServer.ingress.tls` | Set tls configuration for ingress | |
|
||||||
| `actionsMetricsServer.ingress.ingressClassName` | Set ingress class name | |
|
| `actionsMetricsServer.ingress.ingressClassName` | Set ingress class name | |
|
||||||
| `actionsMetrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
| `actionsMetrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||||
| `actionsMetrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
|
| `actionsMetrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
|
||||||
| `actionsMetrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
|
| `actionsMetrics.serviceMonitor.namespace` | Namespace which Prometheus is running in. | `Release.Namespace` (the default namespace of the helm chart). |
|
||||||
| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | |
|
| `actionsMetrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
|
||||||
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
|
| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | |
|
||||||
|
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
|
||||||
| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||||
| `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
| `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||||
| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
|
| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
|
||||||
|
|||||||
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
controller-gen.kubebuilder.io/version: v0.11.3
|
controller-gen.kubebuilder.io/version: v0.14.0
|
||||||
creationTimestamp: null
|
|
||||||
name: horizontalrunnerautoscalers.actions.summerwind.dev
|
name: horizontalrunnerautoscalers.actions.summerwind.dev
|
||||||
spec:
|
spec:
|
||||||
group: actions.summerwind.dev
|
group: actions.summerwind.dev
|
||||||
@@ -36,10 +35,19 @@ spec:
|
|||||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
|
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
|
||||||
properties:
|
properties:
|
||||||
apiVersion:
|
apiVersion:
|
||||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
description: |-
|
||||||
|
APIVersion defines the versioned schema of this representation of an object.
|
||||||
|
Servers should convert recognized schemas to the latest internal value, and
|
||||||
|
may reject unrecognized values.
|
||||||
|
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||||
type: string
|
type: string
|
||||||
kind:
|
kind:
|
||||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
description: |-
|
||||||
|
Kind is a string value representing the REST resource this object represents.
|
||||||
|
Servers may infer this from the endpoint the client submits requests to.
|
||||||
|
Cannot be updated.
|
||||||
|
In CamelCase.
|
||||||
|
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||||
type: string
|
type: string
|
||||||
metadata:
|
metadata:
|
||||||
type: object
|
type: object
|
||||||
@@ -48,7 +56,9 @@ spec:
|
|||||||
properties:
|
properties:
|
||||||
capacityReservations:
|
capacityReservations:
|
||||||
items:
|
items:
|
||||||
description: CapacityReservation specifies the number of replicas temporarily added to the scale target until ExpirationTime.
|
description: |-
|
||||||
|
CapacityReservation specifies the number of replicas temporarily added
|
||||||
|
to the scale target until ExpirationTime.
|
||||||
properties:
|
properties:
|
||||||
effectiveTime:
|
effectiveTime:
|
||||||
format: date-time
|
format: date-time
|
||||||
@@ -80,30 +90,46 @@ spec:
|
|||||||
items:
|
items:
|
||||||
properties:
|
properties:
|
||||||
repositoryNames:
|
repositoryNames:
|
||||||
description: RepositoryNames is the list of repository names to be used for calculating the metric. For example, a repository name is the REPO part of `github.com/USER/REPO`.
|
description: |-
|
||||||
|
RepositoryNames is the list of repository names to be used for calculating the metric.
|
||||||
|
For example, a repository name is the REPO part of `github.com/USER/REPO`.
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
scaleDownAdjustment:
|
scaleDownAdjustment:
|
||||||
description: ScaleDownAdjustment is the number of runners removed on scale-down. You can only specify either ScaleDownFactor or ScaleDownAdjustment.
|
description: |-
|
||||||
|
ScaleDownAdjustment is the number of runners removed on scale-down.
|
||||||
|
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
|
||||||
type: integer
|
type: integer
|
||||||
scaleDownFactor:
|
scaleDownFactor:
|
||||||
description: ScaleDownFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be removed.
|
description: |-
|
||||||
|
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
|
||||||
|
to determine how many pods should be removed.
|
||||||
type: string
|
type: string
|
||||||
scaleDownThreshold:
|
scaleDownThreshold:
|
||||||
description: ScaleDownThreshold is the percentage of busy runners less than which will trigger the hpa to scale the runners down.
|
description: |-
|
||||||
|
ScaleDownThreshold is the percentage of busy runners less than which will
|
||||||
|
trigger the hpa to scale the runners down.
|
||||||
type: string
|
type: string
|
||||||
scaleUpAdjustment:
|
scaleUpAdjustment:
|
||||||
description: ScaleUpAdjustment is the number of runners added on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
description: |-
|
||||||
|
ScaleUpAdjustment is the number of runners added on scale-up.
|
||||||
|
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
||||||
type: integer
|
type: integer
|
||||||
scaleUpFactor:
|
scaleUpFactor:
|
||||||
description: ScaleUpFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be added.
|
description: |-
|
||||||
|
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
|
||||||
|
to determine how many pods should be added.
|
||||||
type: string
|
type: string
|
||||||
scaleUpThreshold:
|
scaleUpThreshold:
|
||||||
description: ScaleUpThreshold is the percentage of busy runners greater than which will trigger the hpa to scale runners up.
|
description: |-
|
||||||
|
ScaleUpThreshold is the percentage of busy runners greater than which will
|
||||||
|
trigger the hpa to scale runners up.
|
||||||
type: string
|
type: string
|
||||||
type:
|
type:
|
||||||
description: Type is the type of metric to be used for autoscaling. It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
|
description: |-
|
||||||
|
Type is the type of metric to be used for autoscaling.
|
||||||
|
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
@@ -111,7 +137,9 @@ spec:
|
|||||||
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
|
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
|
||||||
type: integer
|
type: integer
|
||||||
scaleDownDelaySecondsAfterScaleOut:
|
scaleDownDelaySecondsAfterScaleOut:
|
||||||
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop)
|
description: |-
|
||||||
|
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
|
||||||
|
Used to prevent flapping (down->up->down->... loop)
|
||||||
type: integer
|
type: integer
|
||||||
scaleTargetRef:
|
scaleTargetRef:
|
||||||
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
||||||
@@ -127,7 +155,18 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
scaleUpTriggers:
|
scaleUpTriggers:
|
||||||
description: "ScaleUpTriggers is an experimental feature to increase the desired replicas by 1 on each webhook requested received by the webhookBasedAutoscaler. \n This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster. \n Note that the added runners remain until the next sync period at least, and they may or may not be used by GitHub Actions depending on the timing. They are intended to be used to gain \"resource slack\" immediately after you receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available."
|
description: |-
|
||||||
|
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
|
||||||
|
on each webhook requested received by the webhookBasedAutoscaler.
|
||||||
|
|
||||||
|
|
||||||
|
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
|
||||||
|
|
||||||
|
|
||||||
|
Note that the added runners remain until the next sync period at least,
|
||||||
|
and they may or may not be used by GitHub Actions depending on the timing.
|
||||||
|
They are intended to be used to gain "resource slack" immediately after you
|
||||||
|
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
|
||||||
items:
|
items:
|
||||||
properties:
|
properties:
|
||||||
amount:
|
amount:
|
||||||
@@ -140,12 +179,18 @@ spec:
|
|||||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||||
properties:
|
properties:
|
||||||
names:
|
names:
|
||||||
description: Names is a list of GitHub Actions glob patterns. Any check_run event whose name matches one of patterns in the list can trigger autoscaling. Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file. So it is very likely that you can utilize this to trigger depending on the job.
|
description: |-
|
||||||
|
Names is a list of GitHub Actions glob patterns.
|
||||||
|
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
|
||||||
|
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
|
||||||
|
So it is very likely that you can utilize this to trigger depending on the job.
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
repositories:
|
repositories:
|
||||||
description: Repositories is a list of GitHub repositories. Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
|
description: |-
|
||||||
|
Repositories is a list of GitHub repositories.
|
||||||
|
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
@@ -170,7 +215,9 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
type: object
|
type: object
|
||||||
push:
|
push:
|
||||||
description: PushSpec is the condition for triggering scale-up on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
description: |-
|
||||||
|
PushSpec is the condition for triggering scale-up on push event
|
||||||
|
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||||
type: object
|
type: object
|
||||||
workflowJob:
|
workflowJob:
|
||||||
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
|
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
|
||||||
@@ -179,23 +226,33 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
scheduledOverrides:
|
scheduledOverrides:
|
||||||
description: ScheduledOverrides is the list of ScheduledOverride. It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. The earlier a scheduled override is, the higher it is prioritized.
|
description: |-
|
||||||
|
ScheduledOverrides is the list of ScheduledOverride.
|
||||||
|
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||||
|
The earlier a scheduled override is, the higher it is prioritized.
|
||||||
items:
|
items:
|
||||||
description: ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
|
description: |-
|
||||||
|
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||||
|
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
|
||||||
properties:
|
properties:
|
||||||
endTime:
|
endTime:
|
||||||
description: EndTime is the time at which the first override ends.
|
description: EndTime is the time at which the first override ends.
|
||||||
format: date-time
|
format: date-time
|
||||||
type: string
|
type: string
|
||||||
minReplicas:
|
minReplicas:
|
||||||
description: MinReplicas is the number of runners while overriding. If omitted, it doesn't override minReplicas.
|
description: |-
|
||||||
|
MinReplicas is the number of runners while overriding.
|
||||||
|
If omitted, it doesn't override minReplicas.
|
||||||
minimum: 0
|
minimum: 0
|
||||||
nullable: true
|
nullable: true
|
||||||
type: integer
|
type: integer
|
||||||
recurrenceRule:
|
recurrenceRule:
|
||||||
properties:
|
properties:
|
||||||
frequency:
|
frequency:
|
||||||
description: Frequency is the name of a predefined interval of each recurrence. The valid values are "Daily", "Weekly", "Monthly", and "Yearly". If empty, the corresponding override happens only once.
|
description: |-
|
||||||
|
Frequency is the name of a predefined interval of each recurrence.
|
||||||
|
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
|
||||||
|
If empty, the corresponding override happens only once.
|
||||||
enum:
|
enum:
|
||||||
- Daily
|
- Daily
|
||||||
- Weekly
|
- Weekly
|
||||||
@@ -203,7 +260,9 @@ spec:
|
|||||||
- Yearly
|
- Yearly
|
||||||
type: string
|
type: string
|
||||||
untilTime:
|
untilTime:
|
||||||
description: UntilTime is the time of the final recurrence. If empty, the schedule recurs forever.
|
description: |-
|
||||||
|
UntilTime is the time of the final recurrence.
|
||||||
|
If empty, the schedule recurs forever.
|
||||||
format: date-time
|
format: date-time
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
@@ -232,18 +291,24 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
desiredReplicas:
|
desiredReplicas:
|
||||||
description: DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
description: |-
|
||||||
|
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
|
||||||
|
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
||||||
type: integer
|
type: integer
|
||||||
lastSuccessfulScaleOutTime:
|
lastSuccessfulScaleOutTime:
|
||||||
format: date-time
|
format: date-time
|
||||||
nullable: true
|
nullable: true
|
||||||
type: string
|
type: string
|
||||||
observedGeneration:
|
observedGeneration:
|
||||||
description: ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g. RunnerDeployment's generation, which is updated on mutation by the API Server.
|
description: |-
|
||||||
|
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
|
||||||
|
RunnerDeployment's generation, which is updated on mutation by the API Server.
|
||||||
format: int64
|
format: int64
|
||||||
type: integer
|
type: integer
|
||||||
scheduledOverridesSummary:
|
scheduledOverridesSummary:
|
||||||
description: ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output for observability.
|
description: |-
|
||||||
|
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
|
||||||
|
for observability.
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,5 @@
|
|||||||
{{- if and .Values.actionsMetricsServer.enabled .Values.actionsMetrics.serviceMonitor.enable }}
|
{{- if and .Values.actionsMetricsServer.enabled .Values.actionsMetrics.serviceMonitor.enable }}
|
||||||
|
{{- $servicemonitornamespace := .Values.actionsMetrics.serviceMonitor.namespace | default .Release.Namespace }}
|
||||||
apiVersion: monitoring.coreos.com/v1
|
apiVersion: monitoring.coreos.com/v1
|
||||||
kind: ServiceMonitor
|
kind: ServiceMonitor
|
||||||
metadata:
|
metadata:
|
||||||
@@ -8,7 +9,7 @@ metadata:
|
|||||||
{{- toYaml . | nindent 4 }}
|
{{- toYaml . | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
name: {{ include "actions-runner-controller-actions-metrics-server.serviceMonitorName" . }}
|
name: {{ include "actions-runner-controller-actions-metrics-server.serviceMonitorName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ $servicemonitornamespace }}
|
||||||
spec:
|
spec:
|
||||||
endpoints:
|
endpoints:
|
||||||
- path: /metrics
|
- path: /metrics
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor.enable }}
|
{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor.enable }}
|
||||||
|
{{- $servicemonitornamespace := .Values.actionsMetrics.serviceMonitor.namespace | default .Release.Namespace }}
|
||||||
apiVersion: monitoring.coreos.com/v1
|
apiVersion: monitoring.coreos.com/v1
|
||||||
kind: ServiceMonitor
|
kind: ServiceMonitor
|
||||||
metadata:
|
metadata:
|
||||||
@@ -8,7 +9,7 @@ metadata:
|
|||||||
{{- toYaml . | nindent 4 }}
|
{{- toYaml . | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
name: {{ include "actions-runner-controller-github-webhook-server.serviceMonitorName" . }}
|
name: {{ include "actions-runner-controller-github-webhook-server.serviceMonitorName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ $servicemonitornamespace }}
|
||||||
spec:
|
spec:
|
||||||
endpoints:
|
endpoints:
|
||||||
- path: /metrics
|
- path: /metrics
|
||||||
|
|||||||
@@ -111,6 +111,7 @@ metrics:
|
|||||||
serviceAnnotations: {}
|
serviceAnnotations: {}
|
||||||
serviceMonitor:
|
serviceMonitor:
|
||||||
enable: false
|
enable: false
|
||||||
|
namespace: ""
|
||||||
timeout: 30s
|
timeout: 30s
|
||||||
interval: 1m
|
interval: 1m
|
||||||
serviceMonitorLabels: {}
|
serviceMonitorLabels: {}
|
||||||
@@ -312,6 +313,7 @@ actionsMetrics:
|
|||||||
# to deploy the actions-metrics-server whose k8s service is referenced by the service monitor.
|
# to deploy the actions-metrics-server whose k8s service is referenced by the service monitor.
|
||||||
serviceMonitor:
|
serviceMonitor:
|
||||||
enable: false
|
enable: false
|
||||||
|
namespace: ""
|
||||||
timeout: 30s
|
timeout: 30s
|
||||||
interval: 1m
|
interval: 1m
|
||||||
serviceMonitorLabels: {}
|
serviceMonitorLabels: {}
|
||||||
|
|||||||
@@ -15,13 +15,13 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.6.0
|
version: 0.9.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "0.6.0"
|
appVersion: "0.9.1"
|
||||||
|
|
||||||
home: https://github.com/actions/actions-runner-controller
|
home: https://github.com/actions/actions-runner-controller
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -2,4 +2,4 @@ Thank you for installing {{ .Chart.Name }}.
|
|||||||
|
|
||||||
Your release is named {{ .Release.Name }}.
|
Your release is named {{ .Release.Name }}.
|
||||||
|
|
||||||
WARNING: value specified under image.pullPolicy will be ignored and no longer be applied to the listener pod spec as of gha-runner-scale-set-0.7.0. Please use the listenerTemplate in the gha-runner-scale-set chart to control the image pull policy of the listener.
|
WARNING: Older version of the listener (githubrunnerscalesetlistener) is deprecated and will be removed in the future gha-runner-scale-set-0.10.0 release. If you are using environment variable override to force the old listener, please remove the environment variable and use the new listener (ghalistener) instead.
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
|||||||
app.kubernetes.io/part-of: gha-rs-controller
|
app.kubernetes.io/part-of: gha-rs-controller
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
{{- range $k, $v := .Values.labels }}
|
{{- range $k, $v := .Values.labels }}
|
||||||
{{ $k }}: {{ $v }}
|
{{ $k }}: {{ $v | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
|
|||||||
@@ -27,6 +27,9 @@ spec:
|
|||||||
app.kubernetes.io/component: controller-manager
|
app.kubernetes.io/component: controller-manager
|
||||||
app.kubernetes.io/version: {{ .Chart.Version }}
|
app.kubernetes.io/version: {{ .Chart.Version }}
|
||||||
{{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }}
|
{{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }}
|
||||||
|
{{- with .Values.podLabels }}
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
spec:
|
spec:
|
||||||
{{- with .Values.imagePullSecrets }}
|
{{- with .Values.imagePullSecrets }}
|
||||||
imagePullSecrets:
|
imagePullSecrets:
|
||||||
@@ -91,8 +94,6 @@ spec:
|
|||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: metadata.namespace
|
fieldPath: metadata.namespace
|
||||||
- name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY
|
|
||||||
value: "{{ .Values.image.pullPolicy | default "IfNotPresent" }}"
|
|
||||||
{{- with .Values.env }}
|
{{- with .Values.env }}
|
||||||
{{- if kindIs "slice" . }}
|
{{- if kindIs "slice" . }}
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
@@ -109,10 +110,16 @@ spec:
|
|||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /tmp
|
- mountPath: /tmp
|
||||||
name: tmp
|
name: tmp
|
||||||
|
{{- range .Values.volumeMounts }}
|
||||||
|
- {{ toYaml . | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
terminationGracePeriodSeconds: 10
|
terminationGracePeriodSeconds: 10
|
||||||
volumes:
|
volumes:
|
||||||
- name: tmp
|
- name: tmp
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
|
{{- range .Values.volumes }}
|
||||||
|
- {{ toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
{{- with .Values.nodeSelector }}
|
{{- with .Values.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
@@ -121,6 +128,10 @@ spec:
|
|||||||
affinity:
|
affinity:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- with .Values.topologySpreadConstraints }}
|
||||||
|
topologySpreadConstraints:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
{{- with .Values.tolerations }}
|
{{- with .Values.tolerations }}
|
||||||
tolerations:
|
tolerations:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
|
|||||||
@@ -345,6 +345,7 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
|||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 0)
|
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 0)
|
||||||
assert.Nil(t, deployment.Spec.Template.Spec.Affinity)
|
assert.Nil(t, deployment.Spec.Template.Spec.Affinity)
|
||||||
|
assert.Len(t, deployment.Spec.Template.Spec.TopologySpreadConstraints, 0)
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0)
|
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0)
|
||||||
|
|
||||||
managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev"
|
managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev"
|
||||||
@@ -368,14 +369,12 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
|||||||
}
|
}
|
||||||
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||||
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||||
|
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
|
||||||
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
|
|
||||||
|
|
||||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
|
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
|
||||||
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
|
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||||
@@ -406,6 +405,8 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"labels.foo": "bar",
|
"labels.foo": "bar",
|
||||||
"labels.github": "actions",
|
"labels.github": "actions",
|
||||||
|
"labels.team": "GitHub Team",
|
||||||
|
"labels.teamMail": "team@github.com",
|
||||||
"replicaCount": "1",
|
"replicaCount": "1",
|
||||||
"image.pullPolicy": "Always",
|
"image.pullPolicy": "Always",
|
||||||
"image.tag": "dev",
|
"image.tag": "dev",
|
||||||
@@ -424,10 +425,17 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
"tolerations[0].key": "foo",
|
"tolerations[0].key": "foo",
|
||||||
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key": "foo",
|
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key": "foo",
|
||||||
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar",
|
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar",
|
||||||
"priorityClassName": "test-priority-class",
|
"topologySpreadConstraints[0].labelSelector.matchLabels.foo": "bar",
|
||||||
"flags.updateStrategy": "eventual",
|
"topologySpreadConstraints[0].maxSkew": "1",
|
||||||
"flags.logLevel": "info",
|
"topologySpreadConstraints[0].topologyKey": "foo",
|
||||||
"flags.logFormat": "json",
|
"priorityClassName": "test-priority-class",
|
||||||
|
"flags.updateStrategy": "eventual",
|
||||||
|
"flags.logLevel": "info",
|
||||||
|
"flags.logFormat": "json",
|
||||||
|
"volumes[0].name": "customMount",
|
||||||
|
"volumes[0].configMap.name": "my-configmap",
|
||||||
|
"volumeMounts[0].name": "customMount",
|
||||||
|
"volumeMounts[0].mountPath": "/my/mount/path",
|
||||||
},
|
},
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
}
|
}
|
||||||
@@ -447,6 +455,8 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
assert.Equal(t, "gha-rs-controller", deployment.Labels["app.kubernetes.io/part-of"])
|
assert.Equal(t, "gha-rs-controller", deployment.Labels["app.kubernetes.io/part-of"])
|
||||||
assert.Equal(t, "bar", deployment.Labels["foo"])
|
assert.Equal(t, "bar", deployment.Labels["foo"])
|
||||||
assert.Equal(t, "actions", deployment.Labels["github"])
|
assert.Equal(t, "actions", deployment.Labels["github"])
|
||||||
|
assert.Equal(t, "GitHub Team", deployment.Labels["team"])
|
||||||
|
assert.Equal(t, "team@github.com", deployment.Labels["teamMail"])
|
||||||
|
|
||||||
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
||||||
|
|
||||||
@@ -459,8 +469,8 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"])
|
assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"])
|
||||||
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
||||||
|
|
||||||
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||||
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
|
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Value)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1)
|
||||||
assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name)
|
assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name)
|
||||||
@@ -468,9 +478,11 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup)
|
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup)
|
||||||
assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName)
|
assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName)
|
||||||
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
|
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Volumes, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.Volumes, 2)
|
||||||
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name)
|
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name)
|
||||||
assert.NotNil(t, 10, deployment.Spec.Template.Spec.Volumes[0].EmptyDir)
|
assert.NotNil(t, deployment.Spec.Template.Spec.Volumes[0].EmptyDir)
|
||||||
|
assert.Equal(t, "customMount", deployment.Spec.Template.Spec.Volumes[1].Name)
|
||||||
|
assert.Equal(t, "my-configmap", deployment.Spec.Template.Spec.Volumes[1].ConfigMap.Name)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 1)
|
||||||
assert.Equal(t, "bar", deployment.Spec.Template.Spec.NodeSelector["foo"])
|
assert.Equal(t, "bar", deployment.Spec.Template.Spec.NodeSelector["foo"])
|
||||||
@@ -479,6 +491,11 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
assert.Equal(t, "foo", deployment.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key)
|
assert.Equal(t, "foo", deployment.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key)
|
||||||
assert.Equal(t, "bar", string(deployment.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Operator))
|
assert.Equal(t, "bar", string(deployment.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Operator))
|
||||||
|
|
||||||
|
assert.Len(t, deployment.Spec.Template.Spec.TopologySpreadConstraints, 1)
|
||||||
|
assert.Equal(t, "bar", deployment.Spec.Template.Spec.TopologySpreadConstraints[0].LabelSelector.MatchLabels["foo"])
|
||||||
|
assert.Equal(t, int32(1), deployment.Spec.Template.Spec.TopologySpreadConstraints[0].MaxSkew)
|
||||||
|
assert.Equal(t, "foo", deployment.Spec.Template.Spec.TopologySpreadConstraints[0].TopologyKey)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 1)
|
||||||
assert.Equal(t, "foo", deployment.Spec.Template.Spec.Tolerations[0].Key)
|
assert.Equal(t, "foo", deployment.Spec.Template.Spec.Tolerations[0].Key)
|
||||||
|
|
||||||
@@ -505,25 +522,25 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
|||||||
|
|
||||||
assert.ElementsMatch(t, expectArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
assert.ElementsMatch(t, expectArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 4)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||||
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||||
|
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
|
||||||
assert.Equal(t, "Always", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
|
|
||||||
|
|
||||||
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||||
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
|
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Value)
|
||||||
|
|
||||||
assert.Equal(t, "500m", deployment.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String())
|
assert.Equal(t, "500m", deployment.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String())
|
||||||
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsNonRoot)
|
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsNonRoot)
|
||||||
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser)
|
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 2)
|
||||||
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
|
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
|
||||||
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
|
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
|
||||||
|
assert.Equal(t, "customMount", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name)
|
||||||
|
assert.Equal(t, "/my/mount/path", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTemplate_EnableLeaderElectionRole(t *testing.T) {
|
func TestTemplate_EnableLeaderElectionRole(t *testing.T) {
|
||||||
@@ -737,6 +754,7 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
|||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 0)
|
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 0)
|
||||||
assert.Nil(t, deployment.Spec.Template.Spec.Affinity)
|
assert.Nil(t, deployment.Spec.Template.Spec.Affinity)
|
||||||
|
assert.Len(t, deployment.Spec.Template.Spec.TopologySpreadConstraints, 0)
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0)
|
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0)
|
||||||
|
|
||||||
managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev"
|
managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev"
|
||||||
@@ -762,14 +780,12 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
|||||||
|
|
||||||
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||||
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||||
|
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||||
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
|
||||||
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
|
|
||||||
|
|
||||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
|
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
|
||||||
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
|
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||||
@@ -812,17 +828,17 @@ func TestTemplate_ControllerContainerEnvironmentVariables(t *testing.T) {
|
|||||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||||
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Name)
|
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Name)
|
||||||
|
|
||||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 7)
|
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 6)
|
||||||
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||||
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
|
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Value)
|
||||||
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].Name)
|
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||||
assert.Equal(t, "secret-name", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Name)
|
assert.Equal(t, "secret-name", deployment.Spec.Template.Spec.Containers[0].Env[3].ValueFrom.SecretKeyRef.Name)
|
||||||
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Key)
|
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[3].ValueFrom.SecretKeyRef.Key)
|
||||||
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Optional)
|
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].Env[3].ValueFrom.SecretKeyRef.Optional)
|
||||||
assert.Equal(t, "ENV_VAR_NAME_3", deployment.Spec.Template.Spec.Containers[0].Env[5].Name)
|
assert.Equal(t, "ENV_VAR_NAME_3", deployment.Spec.Template.Spec.Containers[0].Env[4].Name)
|
||||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[5].Value)
|
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[4].Value)
|
||||||
assert.Equal(t, "ENV_VAR_NAME_4", deployment.Spec.Template.Spec.Containers[0].Env[6].Name)
|
assert.Equal(t, "ENV_VAR_NAME_4", deployment.Spec.Template.Spec.Containers[0].Env[5].Name)
|
||||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[6].ValueFrom)
|
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[5].ValueFrom)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T) {
|
func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T) {
|
||||||
|
|||||||
@@ -41,6 +41,8 @@ serviceAccount:
|
|||||||
|
|
||||||
podAnnotations: {}
|
podAnnotations: {}
|
||||||
|
|
||||||
|
podLabels: {}
|
||||||
|
|
||||||
podSecurityContext: {}
|
podSecurityContext: {}
|
||||||
# fsGroup: 2000
|
# fsGroup: 2000
|
||||||
|
|
||||||
@@ -70,14 +72,20 @@ tolerations: []
|
|||||||
|
|
||||||
affinity: {}
|
affinity: {}
|
||||||
|
|
||||||
|
topologySpreadConstraints: []
|
||||||
|
|
||||||
|
# Mount volumes in the container.
|
||||||
|
volumes: []
|
||||||
|
volumeMounts: []
|
||||||
|
|
||||||
# Leverage a PriorityClass to ensure your pods survive resource shortages
|
# Leverage a PriorityClass to ensure your pods survive resource shortages
|
||||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||||
# PriorityClass: system-cluster-critical
|
# PriorityClass: system-cluster-critical
|
||||||
priorityClassName: ""
|
priorityClassName: ""
|
||||||
|
|
||||||
## If `metrics:` object is not provided, or commented out, the following flags
|
## If `metrics:` object is not provided, or commented out, the following flags
|
||||||
## will be applied the controller-manager and listener pods with empty values:
|
## will be applied the controller-manager and listener pods with empty values:
|
||||||
## `--metrics-addr`, `--listener-metrics-addr`, `--listener-metrics-endpoint`.
|
## `--metrics-addr`, `--listener-metrics-addr`, `--listener-metrics-endpoint`.
|
||||||
## This will disable metrics.
|
## This will disable metrics.
|
||||||
##
|
##
|
||||||
## To enable metrics, uncomment the following lines.
|
## To enable metrics, uncomment the following lines.
|
||||||
@@ -100,7 +108,7 @@ flags:
|
|||||||
|
|
||||||
## Defines how the controller should handle upgrades while having running jobs.
|
## Defines how the controller should handle upgrades while having running jobs.
|
||||||
##
|
##
|
||||||
## The srategies available are:
|
## The strategies available are:
|
||||||
## - "immediate": (default) The controller will immediately apply the change causing the
|
## - "immediate": (default) The controller will immediately apply the change causing the
|
||||||
## recreation of the listener and ephemeral runner set. This can lead to an
|
## recreation of the listener and ephemeral runner set. This can lead to an
|
||||||
## overprovisioning of runners, if there are pending / running jobs. This should not
|
## overprovisioning of runners, if there are pending / running jobs. This should not
|
||||||
|
|||||||
@@ -15,18 +15,18 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.6.0
|
version: 0.9.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "0.6.0"
|
appVersion: "0.9.1"
|
||||||
|
|
||||||
home: https://github.com/actions/dev-arc
|
home: https://github.com/actions/actions-runner-controller
|
||||||
|
|
||||||
sources:
|
sources:
|
||||||
- "https://github.com/actions/dev-arc"
|
- "https://github.com/actions/actions-runner-controller"
|
||||||
|
|
||||||
maintainers:
|
maintainers:
|
||||||
- name: actions
|
- name: actions
|
||||||
|
|||||||
@@ -10,6 +10,10 @@ gha-rs
|
|||||||
{{- default (include "gha-base-name" .) .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
{{- default (include "gha-base-name" .) .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "gha-runner-scale-set.scale-set-name" -}}
|
||||||
|
{{ .Values.runnerScaleSetName | default .Release.Name }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
{{/*
|
{{/*
|
||||||
Create a default fully qualified app name.
|
Create a default fully qualified app name.
|
||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
@@ -17,7 +21,7 @@ If release name contains chart name it will be used as a full name.
|
|||||||
*/}}
|
*/}}
|
||||||
{{- define "gha-runner-scale-set.fullname" -}}
|
{{- define "gha-runner-scale-set.fullname" -}}
|
||||||
{{- $name := default (include "gha-base-name" .) }}
|
{{- $name := default (include "gha-base-name" .) }}
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
{{- printf "%s-%s" (include "gha-runner-scale-set.scale-set-name" .) $name | trunc 63 | trimSuffix "-" }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{/*
|
{{/*
|
||||||
@@ -38,7 +42,7 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
app.kubernetes.io/part-of: gha-rs
|
app.kubernetes.io/part-of: gha-rs
|
||||||
actions.github.com/scale-set-name: {{ .Release.Name }}
|
actions.github.com/scale-set-name: {{ include "gha-runner-scale-set.scale-set-name" . }}
|
||||||
actions.github.com/scale-set-namespace: {{ .Release.Namespace }}
|
actions.github.com/scale-set-namespace: {{ .Release.Namespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
@@ -46,8 +50,8 @@ actions.github.com/scale-set-namespace: {{ .Release.Namespace }}
|
|||||||
Selector labels
|
Selector labels
|
||||||
*/}}
|
*/}}
|
||||||
{{- define "gha-runner-scale-set.selectorLabels" -}}
|
{{- define "gha-runner-scale-set.selectorLabels" -}}
|
||||||
app.kubernetes.io/name: {{ include "gha-runner-scale-set.name" . }}
|
app.kubernetes.io/name: {{ include "gha-runner-scale-set.scale-set-name" . }}
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
app.kubernetes.io/instance: {{ include "gha-runner-scale-set.scale-set-name" . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set.githubsecret" -}}
|
{{- define "gha-runner-scale-set.githubsecret" -}}
|
||||||
@@ -93,19 +97,26 @@ volumeMounts:
|
|||||||
|
|
||||||
{{- define "gha-runner-scale-set.dind-container" -}}
|
{{- define "gha-runner-scale-set.dind-container" -}}
|
||||||
image: docker:dind
|
image: docker:dind
|
||||||
|
args:
|
||||||
|
- dockerd
|
||||||
|
- --host=unix:///var/run/docker.sock
|
||||||
|
- --group=$(DOCKER_GROUP_GID)
|
||||||
|
env:
|
||||||
|
- name: DOCKER_GROUP_GID
|
||||||
|
value: "123"
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: work
|
- name: work
|
||||||
mountPath: /home/runner/_work
|
mountPath: /home/runner/_work
|
||||||
- name: dind-cert
|
- name: dind-sock
|
||||||
mountPath: /certs/client
|
mountPath: /var/run
|
||||||
- name: dind-externals
|
- name: dind-externals
|
||||||
mountPath: /home/runner/externals
|
mountPath: /home/runner/externals
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "gha-runner-scale-set.dind-volume" -}}
|
{{- define "gha-runner-scale-set.dind-volume" -}}
|
||||||
- name: dind-cert
|
- name: dind-sock
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
- name: dind-externals
|
- name: dind-externals
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
@@ -185,8 +196,6 @@ volumeMounts:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- $setDockerHost := 1 }}
|
{{- $setDockerHost := 1 }}
|
||||||
{{- $setDockerTlsVerify := 1 }}
|
|
||||||
{{- $setDockerCertPath := 1 }}
|
|
||||||
{{- $setRunnerWaitDocker := 1 }}
|
{{- $setRunnerWaitDocker := 1 }}
|
||||||
{{- $setNodeExtraCaCerts := 0 }}
|
{{- $setNodeExtraCaCerts := 0 }}
|
||||||
{{- $setRunnerUpdateCaCerts := 0 }}
|
{{- $setRunnerUpdateCaCerts := 0 }}
|
||||||
@@ -200,12 +209,6 @@ env:
|
|||||||
{{- if eq $env.name "DOCKER_HOST" }}
|
{{- if eq $env.name "DOCKER_HOST" }}
|
||||||
{{- $setDockerHost = 0 }}
|
{{- $setDockerHost = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $env.name "DOCKER_TLS_VERIFY" }}
|
|
||||||
{{- $setDockerTlsVerify = 0 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if eq $env.name "DOCKER_CERT_PATH" }}
|
|
||||||
{{- $setDockerCertPath = 0 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if eq $env.name "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS" }}
|
{{- if eq $env.name "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS" }}
|
||||||
{{- $setRunnerWaitDocker = 0 }}
|
{{- $setRunnerWaitDocker = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -220,15 +223,7 @@ env:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if $setDockerHost }}
|
{{- if $setDockerHost }}
|
||||||
- name: DOCKER_HOST
|
- name: DOCKER_HOST
|
||||||
value: tcp://localhost:2376
|
value: unix:///var/run/docker.sock
|
||||||
{{- end }}
|
|
||||||
{{- if $setDockerTlsVerify }}
|
|
||||||
- name: DOCKER_TLS_VERIFY
|
|
||||||
value: "1"
|
|
||||||
{{- end }}
|
|
||||||
{{- if $setDockerCertPath }}
|
|
||||||
- name: DOCKER_CERT_PATH
|
|
||||||
value: /certs/client
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if $setRunnerWaitDocker }}
|
{{- if $setRunnerWaitDocker }}
|
||||||
- name: RUNNER_WAIT_FOR_DOCKER_IN_SECONDS
|
- name: RUNNER_WAIT_FOR_DOCKER_IN_SECONDS
|
||||||
@@ -254,7 +249,7 @@ volumeMounts:
|
|||||||
{{- if eq $volMount.name "work" }}
|
{{- if eq $volMount.name "work" }}
|
||||||
{{- $mountWork = 0 }}
|
{{- $mountWork = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $volMount.name "dind-cert" }}
|
{{- if eq $volMount.name "dind-sock" }}
|
||||||
{{- $mountDindCert = 0 }}
|
{{- $mountDindCert = 0 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $volMount.name "github-server-tls-cert" }}
|
{{- if eq $volMount.name "github-server-tls-cert" }}
|
||||||
@@ -268,9 +263,8 @@ volumeMounts:
|
|||||||
mountPath: /home/runner/_work
|
mountPath: /home/runner/_work
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if $mountDindCert }}
|
{{- if $mountDindCert }}
|
||||||
- name: dind-cert
|
- name: dind-sock
|
||||||
mountPath: /certs/client
|
mountPath: /var/run
|
||||||
readOnly: true
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if $mountGitHubServerTLS }}
|
{{- if $mountGitHubServerTLS }}
|
||||||
- name: github-server-tls-cert
|
- name: github-server-tls-cert
|
||||||
@@ -390,6 +384,9 @@ volumeMounts:
|
|||||||
{{- $setNodeExtraCaCerts = 1 }}
|
{{- $setNodeExtraCaCerts = 1 }}
|
||||||
{{- $setRunnerUpdateCaCerts = 1 }}
|
{{- $setRunnerUpdateCaCerts = 1 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
|
{{- $mountGitHubServerTLS := 0 }}
|
||||||
|
{{- if or $container.env $setNodeExtraCaCerts $setRunnerUpdateCaCerts }}
|
||||||
env:
|
env:
|
||||||
{{- with $container.env }}
|
{{- with $container.env }}
|
||||||
{{- range $i, $env := . }}
|
{{- range $i, $env := . }}
|
||||||
@@ -410,10 +407,12 @@ volumeMounts:
|
|||||||
- name: RUNNER_UPDATE_CA_CERTS
|
- name: RUNNER_UPDATE_CA_CERTS
|
||||||
value: "1"
|
value: "1"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- $mountGitHubServerTLS := 0 }}
|
|
||||||
{{- if $tlsConfig.runnerMountPath }}
|
{{- if $tlsConfig.runnerMountPath }}
|
||||||
{{- $mountGitHubServerTLS = 1 }}
|
{{- $mountGitHubServerTLS = 1 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if or $container.volumeMounts $mountGitHubServerTLS }}
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
{{- with $container.volumeMounts }}
|
{{- with $container.volumeMounts }}
|
||||||
{{- range $i, $volMount := . }}
|
{{- range $i, $volMount := . }}
|
||||||
@@ -428,6 +427,7 @@ volumeMounts:
|
|||||||
mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
|
mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
|
||||||
subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }}
|
subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- end}}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -525,13 +525,13 @@ volumeMounts:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
|
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
|
||||||
{{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
{{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
|
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
|
||||||
{{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
{{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if gt $multiNamespacesCounter 1 }}
|
{{- if gt $multiNamespacesCounter 1 }}
|
||||||
{{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
{{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $multiNamespacesCounter 1 }}
|
{{- if eq $multiNamespacesCounter 1 }}
|
||||||
{{- with $controllerDeployment.metadata }}
|
{{- with $controllerDeployment.metadata }}
|
||||||
@@ -544,11 +544,11 @@ volumeMounts:
|
|||||||
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
|
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- else }}
|
{{- else }}
|
||||||
{{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
{{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if eq $managerServiceAccountNamespace "" }}
|
{{- if eq $managerServiceAccountNamespace "" }}
|
||||||
{{- fail "No service account namespace found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
{{- fail "No service account namespace found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- $managerServiceAccountNamespace }}
|
{{- $managerServiceAccountNamespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -1,18 +1,19 @@
|
|||||||
apiVersion: actions.github.com/v1alpha1
|
apiVersion: actions.github.com/v1alpha1
|
||||||
kind: AutoscalingRunnerSet
|
kind: AutoscalingRunnerSet
|
||||||
metadata:
|
metadata:
|
||||||
{{- if or (not .Release.Name) (gt (len .Release.Name) 45) }}
|
{{- if or (not (include "gha-runner-scale-set.scale-set-name" .)) (gt (len (include "gha-runner-scale-set.scale-set-name" .)) 45) }}
|
||||||
{{ fail "Name must have up to 45 characters" }}
|
{{ fail "Name must have up to 45 characters" }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if gt (len .Release.Namespace) 63 }}
|
{{- if gt (len .Release.Namespace) 63 }}
|
||||||
{{ fail "Namespace must have up to 63 characters" }}
|
{{ fail "Namespace must have up to 63 characters" }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
name: {{ .Values.runnerScaleSetName | default .Release.Name }}
|
name: {{ include "gha-runner-scale-set.scale-set-name" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/component: "autoscaling-runner-set"
|
app.kubernetes.io/component: "autoscaling-runner-set"
|
||||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||||
annotations:
|
annotations:
|
||||||
|
actions.github.com/values-hash: {{ toJson .Values | sha256sum | trunc 63 }}
|
||||||
{{- $containerMode := .Values.containerMode }}
|
{{- $containerMode := .Values.containerMode }}
|
||||||
{{- if not (kindIs "string" .Values.githubConfigSecret) }}
|
{{- if not (kindIs "string" .Values.githubConfigSecret) }}
|
||||||
actions.github.com/cleanup-github-secret-name: {{ include "gha-runner-scale-set.githubsecret" . }}
|
actions.github.com/cleanup-github-secret-name: {{ include "gha-runner-scale-set.githubsecret" . }}
|
||||||
|
|||||||
@@ -5,6 +5,12 @@ kind: ServiceAccount
|
|||||||
metadata:
|
metadata:
|
||||||
name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
|
name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- if .Values.containerMode.kubernetesModeServiceAccount }}
|
||||||
|
{{- with .Values.containerMode.kubernetesModeServiceAccount.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
finalizers:
|
finalizers:
|
||||||
- actions.github.com/cleanup-protection
|
- actions.github.com/cleanup-protection
|
||||||
labels:
|
labels:
|
||||||
|
|||||||
@@ -330,7 +330,7 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
|
|||||||
assert.Equal(t, namespaceName, ars.Namespace)
|
assert.Equal(t, namespaceName, ars.Namespace)
|
||||||
assert.Equal(t, "test-runners", ars.Name)
|
assert.Equal(t, "test-runners", ars.Name)
|
||||||
|
|
||||||
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||||
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/part-of"])
|
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/part-of"])
|
||||||
assert.Equal(t, "autoscaling-runner-set", ars.Labels["app.kubernetes.io/component"])
|
assert.Equal(t, "autoscaling-runner-set", ars.Labels["app.kubernetes.io/component"])
|
||||||
@@ -361,6 +361,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
releaseName := "test-runners"
|
releaseName := "test-runners"
|
||||||
|
nameOverride := "test-runner-scale-set-name"
|
||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
options := &helm.Options{
|
options := &helm.Options{
|
||||||
@@ -368,7 +369,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) {
|
|||||||
SetValues: map[string]string{
|
SetValues: map[string]string{
|
||||||
"githubConfigUrl": "https://github.com/actions",
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
"githubConfigSecret.github_token": "gh_token12345",
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
"runnerScaleSetName": "test-runner-scale-set-name",
|
"runnerScaleSetName": nameOverride,
|
||||||
"controllerServiceAccount.name": "arc",
|
"controllerServiceAccount.name": "arc",
|
||||||
"controllerServiceAccount.namespace": "arc-system",
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
},
|
},
|
||||||
@@ -381,12 +382,15 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||||
|
|
||||||
assert.Equal(t, namespaceName, ars.Namespace)
|
assert.Equal(t, namespaceName, ars.Namespace)
|
||||||
assert.Equal(t, "test-runner-scale-set-name", ars.Name)
|
assert.Equal(t, nameOverride, ars.Name)
|
||||||
|
|
||||||
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, nameOverride, ars.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, releaseName, ars.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, nameOverride, ars.Labels["app.kubernetes.io/instance"])
|
||||||
|
assert.Equal(t, nameOverride, ars.Labels["actions.github.com/scale-set-name"])
|
||||||
|
assert.Equal(t, namespaceName, ars.Labels["actions.github.com/scale-set-namespace"])
|
||||||
|
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/part-of"])
|
||||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||||
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
assert.Equal(t, nameOverride+"-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
||||||
assert.Equal(t, "test-runner-scale-set-name", ars.Spec.RunnerScaleSetName)
|
assert.Equal(t, "test-runner-scale-set-name", ars.Spec.RunnerScaleSetName)
|
||||||
|
|
||||||
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
||||||
@@ -738,6 +742,37 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraInitContainers(t *testin
|
|||||||
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[2] Command[0] should be ls")
|
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[2] Command[0] should be ls")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTemplateRenderedKubernetesModeServiceAccountAnnotations(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
testValuesPath, err := filepath.Abs("../tests/values_kubernetes_mode_service_account_annotations.yaml")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-runners"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
|
ValuesFiles: []string{testValuesPath},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_serviceaccount.yaml"})
|
||||||
|
|
||||||
|
var sa corev1.ServiceAccount
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &sa)
|
||||||
|
|
||||||
|
assert.Equal(t, "arn:aws:iam::123456789012:role/sample-role", sa.Annotations["eks.amazonaws.com/role-arn"], "Annotations should be arn:aws:iam::123456789012:role/sample-role")
|
||||||
|
}
|
||||||
|
|
||||||
func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
|
func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
@@ -767,7 +802,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
|
|||||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||||
|
|
||||||
assert.Len(t, ars.Spec.Template.Spec.Volumes, 5, "Volumes should be 5")
|
assert.Len(t, ars.Spec.Template.Spec.Volumes, 5, "Volumes should be 5")
|
||||||
assert.Equal(t, "dind-cert", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be dind-cert")
|
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be dind-sock")
|
||||||
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Volumes[1].Name, "Volume name should be dind-externals")
|
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Volumes[1].Name, "Volume name should be dind-externals")
|
||||||
assert.Equal(t, "work", ars.Spec.Template.Spec.Volumes[2].Name, "Volume name should be work")
|
assert.Equal(t, "work", ars.Spec.Template.Spec.Volumes[2].Name, "Volume name should be work")
|
||||||
assert.Equal(t, "/data", ars.Spec.Template.Spec.Volumes[2].HostPath.Path, "Volume host path should be /data")
|
assert.Equal(t, "/data", ars.Spec.Template.Spec.Volumes[2].HostPath.Path, "Volume host path should be /data")
|
||||||
@@ -840,7 +875,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
|
|||||||
assert.Equal(t, namespaceName, ars.Namespace)
|
assert.Equal(t, namespaceName, ars.Namespace)
|
||||||
assert.Equal(t, "test-runners", ars.Name)
|
assert.Equal(t, "test-runners", ars.Name)
|
||||||
|
|
||||||
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||||
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
||||||
@@ -863,40 +898,35 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
|
|||||||
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "Template.Spec should have 2 container")
|
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "Template.Spec should have 2 container")
|
||||||
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name)
|
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name)
|
||||||
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
|
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
|
||||||
assert.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 4, "The runner container should have 4 env vars, DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH and RUNNER_WAIT_FOR_DOCKER_IN_SECONDS")
|
assert.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 2, "The runner container should have 2 env vars, DOCKER_HOST and RUNNER_WAIT_FOR_DOCKER_IN_SECONDS")
|
||||||
assert.Equal(t, "DOCKER_HOST", ars.Spec.Template.Spec.Containers[0].Env[0].Name)
|
assert.Equal(t, "DOCKER_HOST", ars.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||||
assert.Equal(t, "tcp://localhost:2376", ars.Spec.Template.Spec.Containers[0].Env[0].Value)
|
assert.Equal(t, "unix:///var/run/docker.sock", ars.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||||
assert.Equal(t, "DOCKER_TLS_VERIFY", ars.Spec.Template.Spec.Containers[0].Env[1].Name)
|
assert.Equal(t, "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS", ars.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||||
assert.Equal(t, "1", ars.Spec.Template.Spec.Containers[0].Env[1].Value)
|
assert.Equal(t, "120", ars.Spec.Template.Spec.Containers[0].Env[1].Value)
|
||||||
assert.Equal(t, "DOCKER_CERT_PATH", ars.Spec.Template.Spec.Containers[0].Env[2].Name)
|
|
||||||
assert.Equal(t, "/certs/client", ars.Spec.Template.Spec.Containers[0].Env[2].Value)
|
|
||||||
assert.Equal(t, "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS", ars.Spec.Template.Spec.Containers[0].Env[3].Name)
|
|
||||||
assert.Equal(t, "120", ars.Spec.Template.Spec.Containers[0].Env[3].Value)
|
|
||||||
|
|
||||||
assert.Len(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, 2, "The runner container should have 2 volume mounts, dind-cert and work")
|
assert.Len(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, 2, "The runner container should have 2 volume mounts, dind-sock and work")
|
||||||
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
|
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
|
||||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
|
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
|
||||||
assert.False(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].ReadOnly)
|
assert.False(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].ReadOnly)
|
||||||
|
|
||||||
assert.Equal(t, "dind-cert", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name)
|
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name)
|
||||||
assert.Equal(t, "/certs/client", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath)
|
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath)
|
||||||
assert.True(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].ReadOnly)
|
|
||||||
|
|
||||||
assert.Equal(t, "dind", ars.Spec.Template.Spec.Containers[1].Name)
|
assert.Equal(t, "dind", ars.Spec.Template.Spec.Containers[1].Name)
|
||||||
assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.Containers[1].Image)
|
assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.Containers[1].Image)
|
||||||
assert.True(t, *ars.Spec.Template.Spec.Containers[1].SecurityContext.Privileged)
|
assert.True(t, *ars.Spec.Template.Spec.Containers[1].SecurityContext.Privileged)
|
||||||
assert.Len(t, ars.Spec.Template.Spec.Containers[1].VolumeMounts, 3, "The dind container should have 3 volume mounts, dind-cert, work and externals")
|
assert.Len(t, ars.Spec.Template.Spec.Containers[1].VolumeMounts, 3, "The dind container should have 3 volume mounts, dind-sock, work and externals")
|
||||||
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].Name)
|
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].Name)
|
||||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].MountPath)
|
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].MountPath)
|
||||||
|
|
||||||
assert.Equal(t, "dind-cert", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].Name)
|
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].Name)
|
||||||
assert.Equal(t, "/certs/client", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].MountPath)
|
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].MountPath)
|
||||||
|
|
||||||
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].Name)
|
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].Name)
|
||||||
assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].MountPath)
|
assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].MountPath)
|
||||||
|
|
||||||
assert.Len(t, ars.Spec.Template.Spec.Volumes, 3, "Volumes should be 3")
|
assert.Len(t, ars.Spec.Template.Spec.Volumes, 3, "Volumes should be 3")
|
||||||
assert.Equal(t, "dind-cert", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be dind-cert")
|
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be dind-sock")
|
||||||
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Volumes[1].Name, "Volume name should be dind-externals")
|
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Volumes[1].Name, "Volume name should be dind-externals")
|
||||||
assert.Equal(t, "work", ars.Spec.Template.Spec.Volumes[2].Name, "Volume name should be work")
|
assert.Equal(t, "work", ars.Spec.Template.Spec.Volumes[2].Name, "Volume name should be work")
|
||||||
assert.NotNil(t, ars.Spec.Template.Spec.Volumes[2].EmptyDir, "Volume work should be an emptyDir")
|
assert.NotNil(t, ars.Spec.Template.Spec.Volumes[2].EmptyDir, "Volume work should be an emptyDir")
|
||||||
@@ -932,7 +962,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T)
|
|||||||
assert.Equal(t, namespaceName, ars.Namespace)
|
assert.Equal(t, namespaceName, ars.Namespace)
|
||||||
assert.Equal(t, "test-runners", ars.Name)
|
assert.Equal(t, "test-runners", ars.Name)
|
||||||
|
|
||||||
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||||
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
||||||
@@ -1033,7 +1063,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_UsePredefinedSecret(t *testing.T)
|
|||||||
assert.Equal(t, namespaceName, ars.Namespace)
|
assert.Equal(t, namespaceName, ars.Namespace)
|
||||||
assert.Equal(t, "test-runners", ars.Name)
|
assert.Equal(t, "test-runners", ars.Name)
|
||||||
|
|
||||||
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
|
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/name"])
|
||||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||||
assert.Equal(t, "pre-defined-secrets", ars.Spec.GitHubConfigSecret)
|
assert.Equal(t, "pre-defined-secrets", ars.Spec.GitHubConfigSecret)
|
||||||
@@ -1833,10 +1863,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinDMergePodSpec(t *testing.T) {
|
|||||||
assert.Equal(t, "tcp://localhost:9999", ars.Spec.Template.Spec.Containers[0].Env[0].Value, "DOCKER_HOST should be set to `tcp://localhost:9999`")
|
assert.Equal(t, "tcp://localhost:9999", ars.Spec.Template.Spec.Containers[0].Env[0].Value, "DOCKER_HOST should be set to `tcp://localhost:9999`")
|
||||||
assert.Equal(t, "MY_NODE_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name, "MY_NODE_NAME should be set")
|
assert.Equal(t, "MY_NODE_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name, "MY_NODE_NAME should be set")
|
||||||
assert.Equal(t, "spec.nodeName", ars.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath, "MY_NODE_NAME should be set to `spec.nodeName`")
|
assert.Equal(t, "spec.nodeName", ars.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath, "MY_NODE_NAME should be set to `spec.nodeName`")
|
||||||
assert.Equal(t, "DOCKER_TLS_VERIFY", ars.Spec.Template.Spec.Containers[0].Env[2].Name, "DOCKER_TLS_VERIFY should be set")
|
|
||||||
assert.Equal(t, "1", ars.Spec.Template.Spec.Containers[0].Env[2].Value, "DOCKER_TLS_VERIFY should be set to `1`")
|
|
||||||
assert.Equal(t, "DOCKER_CERT_PATH", ars.Spec.Template.Spec.Containers[0].Env[3].Name, "DOCKER_CERT_PATH should be set")
|
|
||||||
assert.Equal(t, "/certs/client", ars.Spec.Template.Spec.Containers[0].Env[3].Value, "DOCKER_CERT_PATH should be set to `/certs/client`")
|
|
||||||
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name, "VolumeMount name should be work")
|
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name, "VolumeMount name should be work")
|
||||||
assert.Equal(t, "/work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath, "VolumeMount mountPath should be /work")
|
assert.Equal(t, "/work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath, "VolumeMount mountPath should be /work")
|
||||||
assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others")
|
assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others")
|
||||||
@@ -1990,3 +2016,130 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t
|
|||||||
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
|
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRunnerContainerEnvNotEmptyMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
testValuesPath, err := filepath.Abs("../tests/values.yaml")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-runners"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
|
ValuesFiles: []string{testValuesPath},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||||
|
type testModel struct {
|
||||||
|
Spec struct {
|
||||||
|
Template struct {
|
||||||
|
Spec struct {
|
||||||
|
Containers []map[string]any `yaml:"containers"`
|
||||||
|
} `yaml:"spec"`
|
||||||
|
} `yaml:"template"`
|
||||||
|
} `yaml:"spec"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var m testModel
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &m)
|
||||||
|
_, ok := m.Spec.Template.Spec.Containers[0]["env"]
|
||||||
|
assert.False(t, ok, "env should not be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunnerContainerVolumeNotEmptyMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
testValuesPath, err := filepath.Abs("../tests/values.yaml")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-runners"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
|
ValuesFiles: []string{testValuesPath},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||||
|
type testModel struct {
|
||||||
|
Spec struct {
|
||||||
|
Template struct {
|
||||||
|
Spec struct {
|
||||||
|
Containers []map[string]any `yaml:"containers"`
|
||||||
|
} `yaml:"spec"`
|
||||||
|
} `yaml:"template"`
|
||||||
|
} `yaml:"spec"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var m testModel
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &m)
|
||||||
|
_, ok := m.Spec.Template.Spec.Containers[0]["volumeMounts"]
|
||||||
|
assert.False(t, ok, "volumeMounts should not be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAutoscalingRunnerSetAnnotationValuesHash(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
const valuesHash = "actions.github.com/values-hash"
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-runners"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
|
"githubConfigSecret.github_token": "gh_token12345",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||||
|
|
||||||
|
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||||
|
|
||||||
|
firstHash := autoscalingRunnerSet.Annotations["actions.github.com/values-hash"]
|
||||||
|
assert.NotEmpty(t, firstHash)
|
||||||
|
assert.LessOrEqual(t, len(firstHash), 63)
|
||||||
|
|
||||||
|
helmChartPath, err = filepath.Abs("../../gha-runner-scale-set")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
options = &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"githubConfigUrl": "https://github.com/actions",
|
||||||
|
"githubConfigSecret.github_token": "gh_token1234567890",
|
||||||
|
"controllerServiceAccount.name": "arc",
|
||||||
|
"controllerServiceAccount.namespace": "arc-system",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||||
|
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||||
|
secondHash := autoscalingRunnerSet.Annotations[valuesHash]
|
||||||
|
assert.NotEmpty(t, secondHash)
|
||||||
|
assert.NotEqual(t, firstHash, secondHash)
|
||||||
|
assert.LessOrEqual(t, len(secondHash), 63)
|
||||||
|
}
|
||||||
|
|||||||
@@ -28,4 +28,4 @@ template:
|
|||||||
path: /data
|
path: /data
|
||||||
type: Directory
|
type: Directory
|
||||||
containerMode:
|
containerMode:
|
||||||
type: kubernetes
|
type: kubernetes
|
||||||
|
|||||||
@@ -0,0 +1,8 @@
|
|||||||
|
githubConfigUrl: https://github.com/actions/actions-runner-controller
|
||||||
|
githubConfigSecret:
|
||||||
|
github_token: test
|
||||||
|
containerMode:
|
||||||
|
type: kubernetes
|
||||||
|
kubernetesModeServiceAccount:
|
||||||
|
annotations:
|
||||||
|
eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/sample-role
|
||||||
@@ -39,7 +39,8 @@ githubConfigSecret:
|
|||||||
## maxRunners is the max number of runners the autoscaling runner set will scale up to.
|
## maxRunners is the max number of runners the autoscaling runner set will scale up to.
|
||||||
# maxRunners: 5
|
# maxRunners: 5
|
||||||
|
|
||||||
## minRunners is the min number of runners the autoscaling runner set will scale down to.
|
## minRunners is the min number of idle runners. The target number of runners created will be
|
||||||
|
## calculated as a sum of minRunners and the number of jobs assigned to the scale set.
|
||||||
# minRunners: 0
|
# minRunners: 0
|
||||||
|
|
||||||
# runnerGroup: "default"
|
# runnerGroup: "default"
|
||||||
@@ -84,8 +85,10 @@ githubConfigSecret:
|
|||||||
# resources:
|
# resources:
|
||||||
# requests:
|
# requests:
|
||||||
# storage: 1Gi
|
# storage: 1Gi
|
||||||
|
# kubernetesModeServiceAccount:
|
||||||
|
# annotations:
|
||||||
|
|
||||||
## template is the PodSpec for each listener Pod
|
## listenerTemplate is the PodSpec for each listener Pod
|
||||||
## For reference: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec
|
## For reference: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec
|
||||||
# listenerTemplate:
|
# listenerTemplate:
|
||||||
# spec:
|
# spec:
|
||||||
@@ -122,32 +125,34 @@ template:
|
|||||||
## command: ["/home/runner/run.sh"]
|
## command: ["/home/runner/run.sh"]
|
||||||
## env:
|
## env:
|
||||||
## - name: DOCKER_HOST
|
## - name: DOCKER_HOST
|
||||||
## value: tcp://localhost:2376
|
## value: unix:///var/run/docker.sock
|
||||||
## - name: DOCKER_TLS_VERIFY
|
|
||||||
## value: "1"
|
|
||||||
## - name: DOCKER_CERT_PATH
|
|
||||||
## value: /certs/client
|
|
||||||
## volumeMounts:
|
## volumeMounts:
|
||||||
## - name: work
|
## - name: work
|
||||||
## mountPath: /home/runner/_work
|
## mountPath: /home/runner/_work
|
||||||
## - name: dind-cert
|
## - name: dind-sock
|
||||||
## mountPath: /certs/client
|
## mountPath: /var/run
|
||||||
## readOnly: true
|
|
||||||
## - name: dind
|
## - name: dind
|
||||||
## image: docker:dind
|
## image: docker:dind
|
||||||
|
## args:
|
||||||
|
## - dockerd
|
||||||
|
## - --host=unix:///var/run/docker.sock
|
||||||
|
## - --group=$(DOCKER_GROUP_GID)
|
||||||
|
## env:
|
||||||
|
## - name: DOCKER_GROUP_GID
|
||||||
|
## value: "123"
|
||||||
## securityContext:
|
## securityContext:
|
||||||
## privileged: true
|
## privileged: true
|
||||||
## volumeMounts:
|
## volumeMounts:
|
||||||
## - name: work
|
## - name: work
|
||||||
## mountPath: /home/runner/_work
|
## mountPath: /home/runner/_work
|
||||||
## - name: dind-cert
|
## - name: dind-sock
|
||||||
## mountPath: /certs/client
|
## mountPath: /var/run
|
||||||
## - name: dind-externals
|
## - name: dind-externals
|
||||||
## mountPath: /home/runner/externals
|
## mountPath: /home/runner/externals
|
||||||
## volumes:
|
## volumes:
|
||||||
## - name: work
|
## - name: work
|
||||||
## emptyDir: {}
|
## emptyDir: {}
|
||||||
## - name: dind-cert
|
## - name: dind-sock
|
||||||
## emptyDir: {}
|
## emptyDir: {}
|
||||||
## - name: dind-externals
|
## - name: dind-externals
|
||||||
## emptyDir: {}
|
## emptyDir: {}
|
||||||
|
|||||||
137
cmd/ghalistener/app/app.go
Normal file
137
cmd/ghalistener/app/app.go
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||||
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
|
||||||
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
|
||||||
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
|
||||||
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
// App is responsible for initializing required components and running the app.
|
||||||
|
type App struct {
|
||||||
|
// configured fields
|
||||||
|
config config.Config
|
||||||
|
logger logr.Logger
|
||||||
|
|
||||||
|
// initialized fields
|
||||||
|
listener Listener
|
||||||
|
worker Worker
|
||||||
|
metrics metrics.ServerPublisher
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate mockery --name Listener --output ./mocks --outpkg mocks --case underscore
|
||||||
|
type Listener interface {
|
||||||
|
Listen(ctx context.Context, handler listener.Handler) error
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate mockery --name Worker --output ./mocks --outpkg mocks --case underscore
|
||||||
|
type Worker interface {
|
||||||
|
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
|
||||||
|
HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(config config.Config) (*App, error) {
|
||||||
|
app := &App{
|
||||||
|
config: config,
|
||||||
|
}
|
||||||
|
|
||||||
|
ghConfig, err := actions.ParseGitHubConfigFromURL(config.ConfigureUrl)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse GitHub config from URL: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
logger, err := config.Logger()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create logger: %w", err)
|
||||||
|
}
|
||||||
|
app.logger = logger.WithName("listener-app")
|
||||||
|
}
|
||||||
|
|
||||||
|
actionsClient, err := config.ActionsClient(app.logger)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create actions client: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.MetricsAddr != "" {
|
||||||
|
app.metrics = metrics.NewExporter(metrics.ExporterConfig{
|
||||||
|
ScaleSetName: config.EphemeralRunnerSetName,
|
||||||
|
ScaleSetNamespace: config.EphemeralRunnerSetNamespace,
|
||||||
|
Enterprise: ghConfig.Enterprise,
|
||||||
|
Organization: ghConfig.Organization,
|
||||||
|
Repository: ghConfig.Repository,
|
||||||
|
ServerAddr: config.MetricsAddr,
|
||||||
|
ServerEndpoint: config.MetricsEndpoint,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
worker, err := worker.New(
|
||||||
|
worker.Config{
|
||||||
|
EphemeralRunnerSetNamespace: config.EphemeralRunnerSetNamespace,
|
||||||
|
EphemeralRunnerSetName: config.EphemeralRunnerSetName,
|
||||||
|
MaxRunners: config.MaxRunners,
|
||||||
|
MinRunners: config.MinRunners,
|
||||||
|
},
|
||||||
|
worker.WithLogger(app.logger.WithName("worker")),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create new kubernetes worker: %w", err)
|
||||||
|
}
|
||||||
|
app.worker = worker
|
||||||
|
|
||||||
|
listener, err := listener.New(listener.Config{
|
||||||
|
Client: actionsClient,
|
||||||
|
ScaleSetID: app.config.RunnerScaleSetId,
|
||||||
|
MinRunners: app.config.MinRunners,
|
||||||
|
MaxRunners: app.config.MaxRunners,
|
||||||
|
Logger: app.logger.WithName("listener"),
|
||||||
|
Metrics: app.metrics,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create new listener: %w", err)
|
||||||
|
}
|
||||||
|
app.listener = listener
|
||||||
|
|
||||||
|
app.logger.Info("app initialized")
|
||||||
|
|
||||||
|
return app, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *App) Run(ctx context.Context) error {
|
||||||
|
var errs []error
|
||||||
|
if app.worker == nil {
|
||||||
|
errs = append(errs, fmt.Errorf("worker not initialized"))
|
||||||
|
}
|
||||||
|
if app.listener == nil {
|
||||||
|
errs = append(errs, fmt.Errorf("listener not initialized"))
|
||||||
|
}
|
||||||
|
if err := errors.Join(errs...); err != nil {
|
||||||
|
return fmt.Errorf("app not initialized: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
g, ctx := errgroup.WithContext(ctx)
|
||||||
|
metricsCtx, cancelMetrics := context.WithCancelCause(ctx)
|
||||||
|
|
||||||
|
g.Go(func() error {
|
||||||
|
app.logger.Info("Starting listener")
|
||||||
|
listnerErr := app.listener.Listen(ctx, app.worker)
|
||||||
|
cancelMetrics(fmt.Errorf("Listener exited: %w", listnerErr))
|
||||||
|
return listnerErr
|
||||||
|
})
|
||||||
|
|
||||||
|
if app.metrics != nil {
|
||||||
|
g.Go(func() error {
|
||||||
|
app.logger.Info("Starting metrics server")
|
||||||
|
return app.metrics.ListenAndServe(metricsCtx)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return g.Wait()
|
||||||
|
}
|
||||||
85
cmd/ghalistener/app/app_test.go
Normal file
85
cmd/ghalistener/app/app_test.go
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
appmocks "github.com/actions/actions-runner-controller/cmd/ghalistener/app/mocks"
|
||||||
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
|
||||||
|
metricsMocks "github.com/actions/actions-runner-controller/cmd/ghalistener/metrics/mocks"
|
||||||
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestApp_Run(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
t.Run("ListenerWorkerGuard", func(t *testing.T) {
|
||||||
|
invalidApps := []*App{
|
||||||
|
{},
|
||||||
|
{worker: &worker.Worker{}},
|
||||||
|
{listener: &listener.Listener{}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, app := range invalidApps {
|
||||||
|
assert.Error(t, app.Run(context.Background()))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ExitsOnListenerError", func(t *testing.T) {
|
||||||
|
listener := appmocks.NewListener(t)
|
||||||
|
worker := appmocks.NewWorker(t)
|
||||||
|
|
||||||
|
listener.On("Listen", mock.Anything, mock.Anything).Return(errors.New("listener error")).Once()
|
||||||
|
|
||||||
|
app := &App{
|
||||||
|
listener: listener,
|
||||||
|
worker: worker,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := app.Run(context.Background())
|
||||||
|
assert.Error(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ExitsOnListenerNil", func(t *testing.T) {
|
||||||
|
listener := appmocks.NewListener(t)
|
||||||
|
worker := appmocks.NewWorker(t)
|
||||||
|
|
||||||
|
listener.On("Listen", mock.Anything, mock.Anything).Return(nil).Once()
|
||||||
|
|
||||||
|
app := &App{
|
||||||
|
listener: listener,
|
||||||
|
worker: worker,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := app.Run(context.Background())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("CancelListenerOnMetricsServerError", func(t *testing.T) {
|
||||||
|
listener := appmocks.NewListener(t)
|
||||||
|
worker := appmocks.NewWorker(t)
|
||||||
|
metrics := metricsMocks.NewServerPublisher(t)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
listener.On("Listen", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
|
||||||
|
ctx := args.Get(0).(context.Context)
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
}()
|
||||||
|
}).Return(nil).Once()
|
||||||
|
|
||||||
|
metrics.On("ListenAndServe", mock.Anything).Return(errors.New("metrics server error")).Once()
|
||||||
|
|
||||||
|
app := &App{
|
||||||
|
listener: listener,
|
||||||
|
worker: worker,
|
||||||
|
metrics: metrics,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := app.Run(ctx)
|
||||||
|
assert.Error(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
43
cmd/ghalistener/app/mocks/listener.go
Normal file
43
cmd/ghalistener/app/mocks/listener.go
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
||||||
|
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
|
||||||
|
listener "github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
|
||||||
|
mock "github.com/stretchr/testify/mock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Listener is an autogenerated mock type for the Listener type
|
||||||
|
type Listener struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// Listen provides a mock function with given fields: ctx, handler
|
||||||
|
func (_m *Listener) Listen(ctx context.Context, handler listener.Handler) error {
|
||||||
|
ret := _m.Called(ctx, handler)
|
||||||
|
|
||||||
|
var r0 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, listener.Handler) error); ok {
|
||||||
|
r0 = rf(ctx, handler)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Error(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewListener creates a new instance of Listener. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
|
// The first argument is typically a *testing.T value.
|
||||||
|
func NewListener(t interface {
|
||||||
|
mock.TestingT
|
||||||
|
Cleanup(func())
|
||||||
|
}) *Listener {
|
||||||
|
mock := &Listener{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
return mock
|
||||||
|
}
|
||||||
68
cmd/ghalistener/app/mocks/worker.go
Normal file
68
cmd/ghalistener/app/mocks/worker.go
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
||||||
|
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||||
|
|
||||||
|
context "context"
|
||||||
|
|
||||||
|
mock "github.com/stretchr/testify/mock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Worker is an autogenerated mock type for the Worker type
|
||||||
|
type Worker struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, acquireCount
|
||||||
|
func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, acquireCount int) (int, error) {
|
||||||
|
ret := _m.Called(ctx, count, acquireCount)
|
||||||
|
|
||||||
|
var r0 int
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, int, int) (int, error)); ok {
|
||||||
|
return rf(ctx, count, acquireCount)
|
||||||
|
}
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, int, int) int); ok {
|
||||||
|
r0 = rf(ctx, count, acquireCount)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Get(0).(int)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, int, int) error); ok {
|
||||||
|
r1 = rf(ctx, count, acquireCount)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
|
||||||
|
func (_m *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
|
||||||
|
ret := _m.Called(ctx, jobInfo)
|
||||||
|
|
||||||
|
var r0 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, *actions.JobStarted) error); ok {
|
||||||
|
r0 = rf(ctx, jobInfo)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Error(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWorker creates a new instance of Worker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
|
// The first argument is typically a *testing.T value.
|
||||||
|
func NewWorker(t interface {
|
||||||
|
mock.TestingT
|
||||||
|
Cleanup(func())
|
||||||
|
}) *Worker {
|
||||||
|
mock := &Worker{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
return mock
|
||||||
|
}
|
||||||
161
cmd/ghalistener/config/config.go
Normal file
161
cmd/ghalistener/config/config.go
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/actions/actions-runner-controller/build"
|
||||||
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
|
"github.com/actions/actions-runner-controller/logging"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"golang.org/x/net/http/httpproxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
ConfigureUrl string `json:"configureUrl"`
|
||||||
|
AppID int64 `json:"appID"`
|
||||||
|
AppInstallationID int64 `json:"appInstallationID"`
|
||||||
|
AppPrivateKey string `json:"appPrivateKey"`
|
||||||
|
Token string `json:"token"`
|
||||||
|
EphemeralRunnerSetNamespace string `json:"ephemeralRunnerSetNamespace"`
|
||||||
|
EphemeralRunnerSetName string `json:"ephemeralRunnerSetName"`
|
||||||
|
MaxRunners int `json:"maxRunners"`
|
||||||
|
MinRunners int `json:"minRunners"`
|
||||||
|
RunnerScaleSetId int `json:"runnerScaleSetId"`
|
||||||
|
RunnerScaleSetName string `json:"runnerScaleSetName"`
|
||||||
|
ServerRootCA string `json:"serverRootCA"`
|
||||||
|
LogLevel string `json:"logLevel"`
|
||||||
|
LogFormat string `json:"logFormat"`
|
||||||
|
MetricsAddr string `json:"metricsAddr"`
|
||||||
|
MetricsEndpoint string `json:"metricsEndpoint"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func Read(path string) (Config, error) {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return Config{}, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var config Config
|
||||||
|
if err := json.NewDecoder(f).Decode(&config); err != nil {
|
||||||
|
return Config{}, fmt.Errorf("failed to decode config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := config.validate(); err != nil {
|
||||||
|
return Config{}, fmt.Errorf("failed to validate config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return config, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) validate() error {
|
||||||
|
if len(c.ConfigureUrl) == 0 {
|
||||||
|
return fmt.Errorf("GitHubConfigUrl is not provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.EphemeralRunnerSetNamespace) == 0 || len(c.EphemeralRunnerSetName) == 0 {
|
||||||
|
return fmt.Errorf("EphemeralRunnerSetNamespace '%s' or EphemeralRunnerSetName '%s' is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.RunnerScaleSetId == 0 {
|
||||||
|
return fmt.Errorf("RunnerScaleSetId '%d' is missing", c.RunnerScaleSetId)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.MaxRunners < c.MinRunners {
|
||||||
|
return fmt.Errorf("MinRunners '%d' cannot be greater than MaxRunners '%d'", c.MinRunners, c.MaxRunners)
|
||||||
|
}
|
||||||
|
|
||||||
|
hasToken := len(c.Token) > 0
|
||||||
|
hasPrivateKeyConfig := c.AppID > 0 && c.AppPrivateKey != ""
|
||||||
|
|
||||||
|
if !hasToken && !hasPrivateKeyConfig {
|
||||||
|
return fmt.Errorf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasToken && hasPrivateKeyConfig {
|
||||||
|
return fmt.Errorf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) Logger() (logr.Logger, error) {
|
||||||
|
logLevel := string(logging.LogLevelDebug)
|
||||||
|
if c.LogLevel != "" {
|
||||||
|
logLevel = c.LogLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
logFormat := string(logging.LogFormatText)
|
||||||
|
if c.LogFormat != "" {
|
||||||
|
logFormat = c.LogFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
logger, err := logging.NewLogger(logLevel, logFormat)
|
||||||
|
if err != nil {
|
||||||
|
return logr.Logger{}, fmt.Errorf("NewLogger failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return logger, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) ActionsClient(logger logr.Logger, clientOptions ...actions.ClientOption) (*actions.Client, error) {
|
||||||
|
var creds actions.ActionsAuth
|
||||||
|
switch c.Token {
|
||||||
|
case "":
|
||||||
|
creds.AppCreds = &actions.GitHubAppAuth{
|
||||||
|
AppID: c.AppID,
|
||||||
|
AppInstallationID: c.AppInstallationID,
|
||||||
|
AppPrivateKey: c.AppPrivateKey,
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
creds.Token = c.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
options := append([]actions.ClientOption{
|
||||||
|
actions.WithLogger(logger),
|
||||||
|
}, clientOptions...)
|
||||||
|
|
||||||
|
if c.ServerRootCA != "" {
|
||||||
|
systemPool, err := x509.SystemCertPool()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to load system cert pool: %w", err)
|
||||||
|
}
|
||||||
|
pool := systemPool.Clone()
|
||||||
|
ok := pool.AppendCertsFromPEM([]byte(c.ServerRootCA))
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("failed to parse root certificate")
|
||||||
|
}
|
||||||
|
|
||||||
|
options = append(options, actions.WithRootCAs(pool))
|
||||||
|
}
|
||||||
|
|
||||||
|
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
|
||||||
|
options = append(options, actions.WithProxy(func(req *http.Request) (*url.URL, error) {
|
||||||
|
return proxyFunc(req.URL)
|
||||||
|
}))
|
||||||
|
|
||||||
|
client, err := actions.NewClient(c.ConfigureUrl, &creds, options...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create actions client: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
client.SetUserAgent(actions.UserAgentInfo{
|
||||||
|
Version: build.Version,
|
||||||
|
CommitSHA: build.CommitSHA,
|
||||||
|
ScaleSetID: c.RunnerScaleSetId,
|
||||||
|
HasProxy: hasProxy(),
|
||||||
|
Subsystem: "ghalistener",
|
||||||
|
})
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasProxy() bool {
|
||||||
|
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
|
||||||
|
return proxyFunc != nil
|
||||||
|
}
|
||||||
161
cmd/ghalistener/config/config_client_test.go
Normal file
161
cmd/ghalistener/config/config_client_test.go
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
package config_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||||
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
|
"github.com/actions/actions-runner-controller/github/actions/testserver"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCustomerServerRootCA(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
certsFolder := filepath.Join(
|
||||||
|
"../../../",
|
||||||
|
"github",
|
||||||
|
"actions",
|
||||||
|
"testdata",
|
||||||
|
)
|
||||||
|
certPath := filepath.Join(certsFolder, "server.crt")
|
||||||
|
keyPath := filepath.Join(certsFolder, "server.key")
|
||||||
|
|
||||||
|
serverCalledSuccessfully := false
|
||||||
|
|
||||||
|
server := testserver.NewUnstarted(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||||
|
serverCalledSuccessfully = true
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Write([]byte(`{"count": 0}`))
|
||||||
|
}))
|
||||||
|
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}}
|
||||||
|
server.StartTLS()
|
||||||
|
|
||||||
|
var certsString string
|
||||||
|
rootCA, err := os.ReadFile(filepath.Join(certsFolder, "rootCA.crt"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
certsString = string(rootCA)
|
||||||
|
|
||||||
|
intermediate, err := os.ReadFile(filepath.Join(certsFolder, "intermediate.pem"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
certsString = certsString + string(intermediate)
|
||||||
|
|
||||||
|
config := config.Config{
|
||||||
|
ConfigureUrl: server.ConfigURLForOrg("myorg"),
|
||||||
|
ServerRootCA: certsString,
|
||||||
|
Token: "token",
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := config.ActionsClient(logr.Discard())
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = client.GetRunnerScaleSet(ctx, 1, "test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, serverCalledSuccessfully)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxySettings(t *testing.T) {
|
||||||
|
t.Run("http", func(t *testing.T) {
|
||||||
|
wentThroughProxy := false
|
||||||
|
|
||||||
|
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
|
||||||
|
wentThroughProxy = true
|
||||||
|
}))
|
||||||
|
t.Cleanup(func() {
|
||||||
|
proxy.Close()
|
||||||
|
})
|
||||||
|
|
||||||
|
prevProxy := os.Getenv("http_proxy")
|
||||||
|
os.Setenv("http_proxy", proxy.URL)
|
||||||
|
defer os.Setenv("http_proxy", prevProxy)
|
||||||
|
|
||||||
|
config := config.Config{
|
||||||
|
ConfigureUrl: "https://github.com/org/repo",
|
||||||
|
Token: "token",
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := config.ActionsClient(logr.Discard())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = client.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.True(t, wentThroughProxy)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("https", func(t *testing.T) {
|
||||||
|
wentThroughProxy := false
|
||||||
|
|
||||||
|
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
|
||||||
|
wentThroughProxy = true
|
||||||
|
}))
|
||||||
|
t.Cleanup(func() {
|
||||||
|
proxy.Close()
|
||||||
|
})
|
||||||
|
|
||||||
|
prevProxy := os.Getenv("https_proxy")
|
||||||
|
os.Setenv("https_proxy", proxy.URL)
|
||||||
|
defer os.Setenv("https_proxy", prevProxy)
|
||||||
|
|
||||||
|
config := config.Config{
|
||||||
|
ConfigureUrl: "https://github.com/org/repo",
|
||||||
|
Token: "token",
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := config.ActionsClient(logr.Discard(), actions.WithRetryMax(0))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodGet, "https://example.com", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = client.Do(req)
|
||||||
|
// proxy doesn't support https
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.True(t, wentThroughProxy)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no_proxy", func(t *testing.T) {
|
||||||
|
wentThroughProxy := false
|
||||||
|
|
||||||
|
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
|
||||||
|
wentThroughProxy = true
|
||||||
|
}))
|
||||||
|
t.Cleanup(func() {
|
||||||
|
proxy.Close()
|
||||||
|
})
|
||||||
|
|
||||||
|
prevProxy := os.Getenv("http_proxy")
|
||||||
|
os.Setenv("http_proxy", proxy.URL)
|
||||||
|
defer os.Setenv("http_proxy", prevProxy)
|
||||||
|
|
||||||
|
prevNoProxy := os.Getenv("no_proxy")
|
||||||
|
os.Setenv("no_proxy", "example.com")
|
||||||
|
defer os.Setenv("no_proxy", prevNoProxy)
|
||||||
|
|
||||||
|
config := config.Config{
|
||||||
|
ConfigureUrl: "https://github.com/org/repo",
|
||||||
|
Token: "token",
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := config.ActionsClient(logr.Discard())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = client.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, wentThroughProxy)
|
||||||
|
})
|
||||||
|
}
|
||||||
92
cmd/ghalistener/config/config_test.go
Normal file
92
cmd/ghalistener/config/config_test.go
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConfigValidationMinMax(t *testing.T) {
|
||||||
|
config := &Config{
|
||||||
|
ConfigureUrl: "github.com/some_org/some_repo",
|
||||||
|
EphemeralRunnerSetNamespace: "namespace",
|
||||||
|
EphemeralRunnerSetName: "deployment",
|
||||||
|
RunnerScaleSetId: 1,
|
||||||
|
MinRunners: 5,
|
||||||
|
MaxRunners: 2,
|
||||||
|
Token: "token",
|
||||||
|
}
|
||||||
|
err := config.validate()
|
||||||
|
assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigValidationMissingToken(t *testing.T) {
|
||||||
|
config := &Config{
|
||||||
|
ConfigureUrl: "github.com/some_org/some_repo",
|
||||||
|
EphemeralRunnerSetNamespace: "namespace",
|
||||||
|
EphemeralRunnerSetName: "deployment",
|
||||||
|
RunnerScaleSetId: 1,
|
||||||
|
}
|
||||||
|
err := config.validate()
|
||||||
|
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
||||||
|
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigValidationAppKey(t *testing.T) {
|
||||||
|
config := &Config{
|
||||||
|
AppID: 1,
|
||||||
|
AppInstallationID: 10,
|
||||||
|
ConfigureUrl: "github.com/some_org/some_repo",
|
||||||
|
EphemeralRunnerSetNamespace: "namespace",
|
||||||
|
EphemeralRunnerSetName: "deployment",
|
||||||
|
RunnerScaleSetId: 1,
|
||||||
|
}
|
||||||
|
err := config.validate()
|
||||||
|
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
||||||
|
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
|
||||||
|
config := &Config{
|
||||||
|
AppID: 1,
|
||||||
|
AppInstallationID: 10,
|
||||||
|
AppPrivateKey: "asdf",
|
||||||
|
Token: "asdf",
|
||||||
|
ConfigureUrl: "github.com/some_org/some_repo",
|
||||||
|
EphemeralRunnerSetNamespace: "namespace",
|
||||||
|
EphemeralRunnerSetName: "deployment",
|
||||||
|
RunnerScaleSetId: 1,
|
||||||
|
}
|
||||||
|
err := config.validate()
|
||||||
|
expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
||||||
|
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigValidation(t *testing.T) {
|
||||||
|
config := &Config{
|
||||||
|
ConfigureUrl: "https://github.com/actions",
|
||||||
|
EphemeralRunnerSetNamespace: "namespace",
|
||||||
|
EphemeralRunnerSetName: "deployment",
|
||||||
|
RunnerScaleSetId: 1,
|
||||||
|
MinRunners: 1,
|
||||||
|
MaxRunners: 5,
|
||||||
|
Token: "asdf",
|
||||||
|
}
|
||||||
|
|
||||||
|
err := config.validate()
|
||||||
|
|
||||||
|
assert.NoError(t, err, "Expected no error")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigValidationConfigUrl(t *testing.T) {
|
||||||
|
config := &Config{
|
||||||
|
EphemeralRunnerSetNamespace: "namespace",
|
||||||
|
EphemeralRunnerSetName: "deployment",
|
||||||
|
RunnerScaleSetId: 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := config.validate()
|
||||||
|
|
||||||
|
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
|
||||||
|
}
|
||||||
437
cmd/ghalistener/listener/listener.go
Normal file
437
cmd/ghalistener/listener/listener.go
Normal file
@@ -0,0 +1,437 @@
|
|||||||
|
package listener
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
|
||||||
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
sessionCreationMaxRetries = 10
|
||||||
|
)
|
||||||
|
|
||||||
|
// message types
|
||||||
|
const (
|
||||||
|
messageTypeJobAvailable = "JobAvailable"
|
||||||
|
messageTypeJobAssigned = "JobAssigned"
|
||||||
|
messageTypeJobStarted = "JobStarted"
|
||||||
|
messageTypeJobCompleted = "JobCompleted"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:generate mockery --name Client --output ./mocks --outpkg mocks --case underscore
|
||||||
|
type Client interface {
|
||||||
|
GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error)
|
||||||
|
CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error)
|
||||||
|
GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error)
|
||||||
|
DeleteMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, messageId int64) error
|
||||||
|
AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error)
|
||||||
|
RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error)
|
||||||
|
DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
Client Client
|
||||||
|
ScaleSetID int
|
||||||
|
MinRunners int
|
||||||
|
MaxRunners int
|
||||||
|
Logger logr.Logger
|
||||||
|
Metrics metrics.Publisher
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) Validate() error {
|
||||||
|
if c.Client == nil {
|
||||||
|
return errors.New("client is required")
|
||||||
|
}
|
||||||
|
if c.ScaleSetID == 0 {
|
||||||
|
return errors.New("scaleSetID is required")
|
||||||
|
}
|
||||||
|
if c.MinRunners < 0 {
|
||||||
|
return errors.New("minRunners must be greater than or equal to 0")
|
||||||
|
}
|
||||||
|
if c.MaxRunners < 0 {
|
||||||
|
return errors.New("maxRunners must be greater than or equal to 0")
|
||||||
|
}
|
||||||
|
if c.MaxRunners > 0 && c.MinRunners > c.MaxRunners {
|
||||||
|
return errors.New("minRunners must be less than or equal to maxRunners")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Listener's role is to manage all interactions with the actions service.
|
||||||
|
// It receives messages and processes them using the given handler.
|
||||||
|
type Listener struct {
|
||||||
|
// configured fields
|
||||||
|
scaleSetID int // The ID of the scale set associated with the listener.
|
||||||
|
client Client // The client used to interact with the scale set.
|
||||||
|
metrics metrics.Publisher // The publisher used to publish metrics.
|
||||||
|
|
||||||
|
// internal fields
|
||||||
|
logger logr.Logger // The logger used for logging.
|
||||||
|
hostname string // The hostname of the listener.
|
||||||
|
|
||||||
|
// updated fields
|
||||||
|
lastMessageID int64 // The ID of the last processed message.
|
||||||
|
maxCapacity int // The maximum number of runners that can be created.
|
||||||
|
session *actions.RunnerScaleSetSession // The session for managing the runner scale set.
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(config Config) (*Listener, error) {
|
||||||
|
if err := config.Validate(); err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
listener := &Listener{
|
||||||
|
scaleSetID: config.ScaleSetID,
|
||||||
|
client: config.Client,
|
||||||
|
logger: config.Logger,
|
||||||
|
metrics: metrics.Discard,
|
||||||
|
maxCapacity: config.MaxRunners,
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.Metrics != nil {
|
||||||
|
listener.metrics = config.Metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
listener.metrics.PublishStatic(config.MinRunners, config.MaxRunners)
|
||||||
|
|
||||||
|
hostname, err := os.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
hostname = uuid.NewString()
|
||||||
|
listener.logger.Info("Failed to get hostname, fallback to uuid", "uuid", hostname, "error", err)
|
||||||
|
}
|
||||||
|
listener.hostname = hostname
|
||||||
|
|
||||||
|
return listener, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate mockery --name Handler --output ./mocks --outpkg mocks --case underscore
|
||||||
|
type Handler interface {
|
||||||
|
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
|
||||||
|
HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Listen listens for incoming messages and handles them using the provided handler.
|
||||||
|
// It continuously listens for messages until the context is cancelled.
|
||||||
|
// The initial message contains the current statistics and acquirable jobs, if any.
|
||||||
|
// The handler is responsible for handling the initial message and subsequent messages.
|
||||||
|
// If an error occurs during any step, Listen returns an error.
|
||||||
|
func (l *Listener) Listen(ctx context.Context, handler Handler) error {
|
||||||
|
if err := l.createSession(ctx); err != nil {
|
||||||
|
return fmt.Errorf("createSession failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err := l.deleteMessageSession(); err != nil {
|
||||||
|
l.logger.Error(err, "failed to delete message session")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
initialMessage := &actions.RunnerScaleSetMessage{
|
||||||
|
MessageId: 0,
|
||||||
|
MessageType: "RunnerScaleSetJobMessages",
|
||||||
|
Statistics: l.session.Statistics,
|
||||||
|
Body: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.session.Statistics == nil {
|
||||||
|
return fmt.Errorf("session statistics is nil")
|
||||||
|
}
|
||||||
|
l.metrics.PublishStatistics(initialMessage.Statistics)
|
||||||
|
|
||||||
|
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, initialMessage.Statistics.TotalAssignedJobs, 0)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("handling initial message failed: %w", err)
|
||||||
|
}
|
||||||
|
l.metrics.PublishDesiredRunners(desiredRunners)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
msg, err := l.getMessage(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if msg == nil {
|
||||||
|
_, err := handler.HandleDesiredRunnerCount(ctx, 0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("handling nil message failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove cancellation from the context to avoid cancelling the message handling.
|
||||||
|
if err := l.handleMessage(context.WithoutCancel(ctx), handler, msg); err != nil {
|
||||||
|
return fmt.Errorf("failed to handle message: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *actions.RunnerScaleSetMessage) error {
|
||||||
|
parsedMsg, err := l.parseMessage(ctx, msg)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to parse message: %w", err)
|
||||||
|
}
|
||||||
|
l.metrics.PublishStatistics(parsedMsg.statistics)
|
||||||
|
|
||||||
|
if len(parsedMsg.jobsAvailable) > 0 {
|
||||||
|
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, parsedMsg.jobsAvailable)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to acquire jobs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.logger.Info("Jobs are acquired", "count", len(acquiredJobIDs), "requestIds", fmt.Sprint(acquiredJobIDs))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, jobCompleted := range parsedMsg.jobsCompleted {
|
||||||
|
l.metrics.PublishJobCompleted(jobCompleted)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.lastMessageID = msg.MessageId
|
||||||
|
|
||||||
|
if err := l.deleteLastMessage(ctx); err != nil {
|
||||||
|
return fmt.Errorf("failed to delete message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, jobStarted := range parsedMsg.jobsStarted {
|
||||||
|
if err := handler.HandleJobStarted(ctx, jobStarted); err != nil {
|
||||||
|
return fmt.Errorf("failed to handle job started: %w", err)
|
||||||
|
}
|
||||||
|
l.metrics.PublishJobStarted(jobStarted)
|
||||||
|
}
|
||||||
|
|
||||||
|
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, parsedMsg.statistics.TotalAssignedJobs, len(parsedMsg.jobsCompleted))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to handle desired runner count: %w", err)
|
||||||
|
}
|
||||||
|
l.metrics.PublishDesiredRunners(desiredRunners)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Listener) createSession(ctx context.Context) error {
|
||||||
|
var session *actions.RunnerScaleSetSession
|
||||||
|
var retries int
|
||||||
|
|
||||||
|
for {
|
||||||
|
var err error
|
||||||
|
session, err = l.client.CreateMessageSession(ctx, l.scaleSetID, l.hostname)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
clientErr := &actions.HttpClientSideError{}
|
||||||
|
if !errors.As(err, &clientErr) {
|
||||||
|
return fmt.Errorf("failed to create session: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if clientErr.Code != http.StatusConflict {
|
||||||
|
return fmt.Errorf("failed to create session: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
retries++
|
||||||
|
if retries >= sessionCreationMaxRetries {
|
||||||
|
return fmt.Errorf("failed to create session after %d retries: %w", retries, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.logger.Info("Unable to create message session. Will try again in 30 seconds", "error", err.Error())
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return fmt.Errorf("context cancelled: %w", ctx.Err())
|
||||||
|
case <-time.After(30 * time.Second):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
statistics, err := json.Marshal(session.Statistics)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal statistics: %w", err)
|
||||||
|
}
|
||||||
|
l.logger.Info("Current runner scale set statistics.", "statistics", string(statistics))
|
||||||
|
|
||||||
|
l.session = session
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessage, error) {
|
||||||
|
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
|
||||||
|
msg, err := l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID, l.maxCapacity)
|
||||||
|
if err == nil { // if NO error
|
||||||
|
return msg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
expiredError := &actions.MessageQueueTokenExpiredError{}
|
||||||
|
if !errors.As(err, &expiredError) {
|
||||||
|
return nil, fmt.Errorf("failed to get next message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := l.refreshSession(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
|
||||||
|
|
||||||
|
msg, err = l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID, l.maxCapacity)
|
||||||
|
if err != nil { // if NO error
|
||||||
|
return nil, fmt.Errorf("failed to get next message after message session refresh: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return msg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Listener) deleteLastMessage(ctx context.Context) error {
|
||||||
|
l.logger.Info("Deleting last message", "lastMessageID", l.lastMessageID)
|
||||||
|
if err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID); err != nil {
|
||||||
|
return fmt.Errorf("failed to delete message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type parsedMessage struct {
|
||||||
|
statistics *actions.RunnerScaleSetStatistic
|
||||||
|
jobsStarted []*actions.JobStarted
|
||||||
|
jobsAvailable []*actions.JobAvailable
|
||||||
|
jobsCompleted []*actions.JobCompleted
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSetMessage) (*parsedMessage, error) {
|
||||||
|
if msg.MessageType != "RunnerScaleSetJobMessages" {
|
||||||
|
l.logger.Info("Skipping message", "messageType", msg.MessageType)
|
||||||
|
return nil, fmt.Errorf("invalid message type: %s", msg.MessageType)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.logger.Info("Processing message", "messageId", msg.MessageId, "messageType", msg.MessageType)
|
||||||
|
if msg.Statistics == nil {
|
||||||
|
return nil, fmt.Errorf("invalid message: statistics is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
l.logger.Info("New runner scale set statistics.", "statistics", msg.Statistics)
|
||||||
|
|
||||||
|
var batchedMessages []json.RawMessage
|
||||||
|
if len(msg.Body) > 0 {
|
||||||
|
if err := json.Unmarshal([]byte(msg.Body), &batchedMessages); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to unmarshal batched messages: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
parsedMsg := &parsedMessage{
|
||||||
|
statistics: msg.Statistics,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, msg := range batchedMessages {
|
||||||
|
var messageType actions.JobMessageType
|
||||||
|
if err := json.Unmarshal(msg, &messageType); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode job message type: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch messageType.MessageType {
|
||||||
|
case messageTypeJobAvailable:
|
||||||
|
var jobAvailable actions.JobAvailable
|
||||||
|
if err := json.Unmarshal(msg, &jobAvailable); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode job available: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.logger.Info("Job available message received", "jobId", jobAvailable.RunnerRequestId)
|
||||||
|
parsedMsg.jobsAvailable = append(parsedMsg.jobsAvailable, &jobAvailable)
|
||||||
|
|
||||||
|
case messageTypeJobAssigned:
|
||||||
|
var jobAssigned actions.JobAssigned
|
||||||
|
if err := json.Unmarshal(msg, &jobAssigned); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode job assigned: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.logger.Info("Job assigned message received", "jobId", jobAssigned.RunnerRequestId)
|
||||||
|
|
||||||
|
case messageTypeJobStarted:
|
||||||
|
var jobStarted actions.JobStarted
|
||||||
|
if err := json.Unmarshal(msg, &jobStarted); err != nil {
|
||||||
|
return nil, fmt.Errorf("could not decode job started message. %w", err)
|
||||||
|
}
|
||||||
|
l.logger.Info("Job started message received.", "RequestId", jobStarted.RunnerRequestId, "RunnerId", jobStarted.RunnerId)
|
||||||
|
parsedMsg.jobsStarted = append(parsedMsg.jobsStarted, &jobStarted)
|
||||||
|
|
||||||
|
case messageTypeJobCompleted:
|
||||||
|
var jobCompleted actions.JobCompleted
|
||||||
|
if err := json.Unmarshal(msg, &jobCompleted); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode job completed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.logger.Info("Job completed message received.", "RequestId", jobCompleted.RunnerRequestId, "Result", jobCompleted.Result, "RunnerId", jobCompleted.RunnerId, "RunnerName", jobCompleted.RunnerName)
|
||||||
|
parsedMsg.jobsCompleted = append(parsedMsg.jobsCompleted, &jobCompleted)
|
||||||
|
|
||||||
|
default:
|
||||||
|
l.logger.Info("unknown job message type.", "messageType", messageType.MessageType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return parsedMsg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
|
||||||
|
ids := make([]int64, 0, len(jobsAvailable))
|
||||||
|
for _, job := range jobsAvailable {
|
||||||
|
ids = append(ids, job.RunnerRequestId)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.logger.Info("Acquiring jobs", "count", len(ids), "requestIds", fmt.Sprint(ids))
|
||||||
|
|
||||||
|
idsAcquired, err := l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
|
||||||
|
if err == nil { // if NO errors
|
||||||
|
return idsAcquired, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
expiredError := &actions.MessageQueueTokenExpiredError{}
|
||||||
|
if !errors.As(err, &expiredError) {
|
||||||
|
return nil, fmt.Errorf("failed to acquire jobs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := l.refreshSession(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
idsAcquired, err = l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to acquire jobs after session refresh: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return idsAcquired, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Listener) refreshSession(ctx context.Context) error {
|
||||||
|
l.logger.Info("Message queue token is expired during GetNextMessage, refreshing...")
|
||||||
|
session, err := l.client.RefreshMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("refresh message session failed. %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.session = session
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Listener) deleteMessageSession() error {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
l.logger.Info("Deleting message session")
|
||||||
|
|
||||||
|
if err := l.client.DeleteMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId); err != nil {
|
||||||
|
return fmt.Errorf("failed to delete message session: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
883
cmd/ghalistener/listener/listener_test.go
Normal file
883
cmd/ghalistener/listener/listener_test.go
Normal file
@@ -0,0 +1,883 @@
|
|||||||
|
package listener
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
listenermocks "github.com/actions/actions-runner-controller/cmd/ghalistener/listener/mocks"
|
||||||
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
|
||||||
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNew(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
t.Run("InvalidConfig", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
var config Config
|
||||||
|
_, err := New(config)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ValidConfig", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
config := Config{
|
||||||
|
Client: listenermocks.NewClient(t),
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
}
|
||||||
|
l, err := New(config)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, l)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListener_createSession(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
t.Run("FailOnce", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
err = l.createSession(ctx)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("FailContext", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil,
|
||||||
|
&actions.HttpClientSideError{Code: http.StatusConflict}).Once()
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
err = l.createSession(ctx)
|
||||||
|
assert.True(t, errors.Is(err, context.DeadlineExceeded))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("SetsSession", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
|
||||||
|
uuid := uuid.New()
|
||||||
|
session := &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &uuid,
|
||||||
|
OwnerName: "example",
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
MessageQueueUrl: "https://example.com",
|
||||||
|
MessageQueueAccessToken: "1234567890",
|
||||||
|
Statistics: nil,
|
||||||
|
}
|
||||||
|
client.On("CreateMessageSession", mock.Anything, mock.Anything, mock.Anything).Return(session, nil).Once()
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
err = l.createSession(context.Background())
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, session, l.session)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListener_getMessage(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
t.Run("ReceivesMessage", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
MaxRunners: 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
want := &actions.RunnerScaleSetMessage{
|
||||||
|
MessageId: 1,
|
||||||
|
}
|
||||||
|
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(want, nil).Once()
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
l.session = &actions.RunnerScaleSetSession{}
|
||||||
|
|
||||||
|
got, err := l.getMessage(ctx)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, want, got)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("NotExpiredError", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
MaxRunners: 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(nil, &actions.HttpClientSideError{Code: http.StatusNotFound}).Once()
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
l.session = &actions.RunnerScaleSetSession{}
|
||||||
|
|
||||||
|
_, err = l.getMessage(ctx)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("RefreshAndSucceeds", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
MaxRunners: 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
|
||||||
|
uuid := uuid.New()
|
||||||
|
session := &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &uuid,
|
||||||
|
OwnerName: "example",
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
MessageQueueUrl: "https://example.com",
|
||||||
|
MessageQueueAccessToken: "1234567890",
|
||||||
|
Statistics: nil,
|
||||||
|
}
|
||||||
|
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
|
||||||
|
|
||||||
|
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
|
||||||
|
|
||||||
|
want := &actions.RunnerScaleSetMessage{
|
||||||
|
MessageId: 1,
|
||||||
|
}
|
||||||
|
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(want, nil).Once()
|
||||||
|
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
l.session = &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &uuid,
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := l.getMessage(ctx)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, want, got)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("RefreshAndFails", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
MaxRunners: 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
|
||||||
|
uuid := uuid.New()
|
||||||
|
session := &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &uuid,
|
||||||
|
OwnerName: "example",
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
MessageQueueUrl: "https://example.com",
|
||||||
|
MessageQueueAccessToken: "1234567890",
|
||||||
|
Statistics: nil,
|
||||||
|
}
|
||||||
|
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
|
||||||
|
|
||||||
|
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Twice()
|
||||||
|
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
l.session = &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &uuid,
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := l.getMessage(ctx)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
assert.Nil(t, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListener_refreshSession(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
t.Run("SuccessfullyRefreshes", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
|
||||||
|
newUUID := uuid.New()
|
||||||
|
session := &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &newUUID,
|
||||||
|
OwnerName: "example",
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
MessageQueueUrl: "https://example.com",
|
||||||
|
MessageQueueAccessToken: "1234567890",
|
||||||
|
Statistics: nil,
|
||||||
|
}
|
||||||
|
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
|
||||||
|
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
oldUUID := uuid.New()
|
||||||
|
l.session = &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &oldUUID,
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = l.refreshSession(ctx)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, session, l.session)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("FailsToRefresh", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
|
||||||
|
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, errors.New("error")).Once()
|
||||||
|
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
oldUUID := uuid.New()
|
||||||
|
oldSession := &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &oldUUID,
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
}
|
||||||
|
l.session = oldSession
|
||||||
|
|
||||||
|
err = l.refreshSession(ctx)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
assert.Equal(t, oldSession, l.session)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListener_deleteLastMessage(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
t.Run("SuccessfullyDeletes", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
|
||||||
|
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.MatchedBy(func(lastMessageID any) bool {
|
||||||
|
return lastMessageID.(int64) == int64(5)
|
||||||
|
})).Return(nil).Once()
|
||||||
|
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
l.session = &actions.RunnerScaleSetSession{}
|
||||||
|
l.lastMessageID = 5
|
||||||
|
|
||||||
|
err = l.deleteLastMessage(ctx)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("FailsToDelete", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
|
||||||
|
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("error")).Once()
|
||||||
|
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
l.session = &actions.RunnerScaleSetSession{}
|
||||||
|
l.lastMessageID = 5
|
||||||
|
|
||||||
|
err = l.deleteLastMessage(ctx)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListener_Listen(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
t.Run("CreateSessionFails", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
ctx := context.Background()
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
err = l.Listen(ctx, nil)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("CallHandleRegardlessOfInitialMessage", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
|
||||||
|
uuid := uuid.New()
|
||||||
|
session := &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &uuid,
|
||||||
|
OwnerName: "example",
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
MessageQueueUrl: "https://example.com",
|
||||||
|
MessageQueueAccessToken: "1234567890",
|
||||||
|
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||||
|
}
|
||||||
|
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
|
||||||
|
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
|
||||||
|
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
var called bool
|
||||||
|
handler := listenermocks.NewHandler(t)
|
||||||
|
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
|
||||||
|
Return(0, nil).
|
||||||
|
Run(
|
||||||
|
func(mock.Arguments) {
|
||||||
|
called = true
|
||||||
|
cancel()
|
||||||
|
},
|
||||||
|
).
|
||||||
|
Once()
|
||||||
|
|
||||||
|
err = l.Listen(ctx, handler)
|
||||||
|
assert.True(t, errors.Is(err, context.Canceled))
|
||||||
|
assert.True(t, called)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("CancelContextAfterGetMessage", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
MaxRunners: 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
uuid := uuid.New()
|
||||||
|
session := &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &uuid,
|
||||||
|
OwnerName: "example",
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
MessageQueueUrl: "https://example.com",
|
||||||
|
MessageQueueAccessToken: "1234567890",
|
||||||
|
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||||
|
}
|
||||||
|
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
|
||||||
|
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
|
||||||
|
|
||||||
|
msg := &actions.RunnerScaleSetMessage{
|
||||||
|
MessageId: 1,
|
||||||
|
MessageType: "RunnerScaleSetJobMessages",
|
||||||
|
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||||
|
}
|
||||||
|
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).
|
||||||
|
Return(msg, nil).
|
||||||
|
Run(
|
||||||
|
func(mock.Arguments) {
|
||||||
|
cancel()
|
||||||
|
},
|
||||||
|
).
|
||||||
|
Once()
|
||||||
|
|
||||||
|
// Ensure delete message is called without cancel
|
||||||
|
client.On("DeleteMessage", context.WithoutCancel(ctx), mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||||
|
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
handler := listenermocks.NewHandler(t)
|
||||||
|
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
|
||||||
|
Return(0, nil).
|
||||||
|
Once()
|
||||||
|
|
||||||
|
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
|
||||||
|
Return(0, nil).
|
||||||
|
Once()
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
err = l.Listen(ctx, handler)
|
||||||
|
assert.ErrorIs(t, context.Canceled, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListener_acquireAvailableJobs(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
t.Run("FailingToAcquireJobs", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
|
||||||
|
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
|
||||||
|
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
uuid := uuid.New()
|
||||||
|
l.session = &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &uuid,
|
||||||
|
OwnerName: "example",
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
MessageQueueUrl: "https://example.com",
|
||||||
|
MessageQueueAccessToken: "1234567890",
|
||||||
|
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||||
|
}
|
||||||
|
|
||||||
|
availableJobs := []*actions.JobAvailable{
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
RunnerRequestId: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
RunnerRequestId: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
RunnerRequestId: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, err = l.acquireAvailableJobs(ctx, availableJobs)
|
||||||
|
assert.Error(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("SuccessfullyAcquiresJobsOnFirstRun", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
|
||||||
|
jobIDs := []int64{1, 2, 3}
|
||||||
|
|
||||||
|
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(jobIDs, nil).Once()
|
||||||
|
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
uuid := uuid.New()
|
||||||
|
l.session = &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &uuid,
|
||||||
|
OwnerName: "example",
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
MessageQueueUrl: "https://example.com",
|
||||||
|
MessageQueueAccessToken: "1234567890",
|
||||||
|
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||||
|
}
|
||||||
|
|
||||||
|
availableJobs := []*actions.JobAvailable{
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
RunnerRequestId: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
RunnerRequestId: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
RunnerRequestId: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, availableJobs)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, []int64{1, 2, 3}, acquiredJobIDs)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("RefreshAndSucceeds", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
|
||||||
|
uuid := uuid.New()
|
||||||
|
session := &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &uuid,
|
||||||
|
OwnerName: "example",
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
MessageQueueUrl: "https://example.com",
|
||||||
|
MessageQueueAccessToken: "1234567890",
|
||||||
|
Statistics: nil,
|
||||||
|
}
|
||||||
|
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
|
||||||
|
|
||||||
|
// Second call to AcquireJobs will succeed
|
||||||
|
want := []int64{1, 2, 3}
|
||||||
|
availableJobs := []*actions.JobAvailable{
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
RunnerRequestId: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
RunnerRequestId: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
RunnerRequestId: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// First call to AcquireJobs will fail with a token expired error
|
||||||
|
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).
|
||||||
|
Run(func(args mock.Arguments) {
|
||||||
|
ids := args.Get(3).([]int64)
|
||||||
|
assert.Equal(t, want, ids)
|
||||||
|
}).
|
||||||
|
Return(nil, &actions.MessageQueueTokenExpiredError{}).
|
||||||
|
Once()
|
||||||
|
|
||||||
|
// Second call should succeed
|
||||||
|
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).
|
||||||
|
Run(func(args mock.Arguments) {
|
||||||
|
ids := args.Get(3).([]int64)
|
||||||
|
assert.Equal(t, want, ids)
|
||||||
|
}).
|
||||||
|
Return(want, nil).
|
||||||
|
Once()
|
||||||
|
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
l.session = &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &uuid,
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := l.acquireAvailableJobs(ctx, availableJobs)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, want, got)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("RefreshAndFails", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
config := Config{
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics.Discard,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
|
||||||
|
uuid := uuid.New()
|
||||||
|
session := &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &uuid,
|
||||||
|
OwnerName: "example",
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
MessageQueueUrl: "https://example.com",
|
||||||
|
MessageQueueAccessToken: "1234567890",
|
||||||
|
Statistics: nil,
|
||||||
|
}
|
||||||
|
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
|
||||||
|
|
||||||
|
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.MessageQueueTokenExpiredError{}).Twice()
|
||||||
|
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
l.session = &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &uuid,
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
}
|
||||||
|
|
||||||
|
availableJobs := []*actions.JobAvailable{
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
RunnerRequestId: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
RunnerRequestId: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
RunnerRequestId: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := l.acquireAvailableJobs(ctx, availableJobs)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
assert.Nil(t, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListener_parseMessage(t *testing.T) {
|
||||||
|
t.Run("FailOnEmptyStatistics", func(t *testing.T) {
|
||||||
|
msg := &actions.RunnerScaleSetMessage{
|
||||||
|
MessageId: 1,
|
||||||
|
MessageType: "RunnerScaleSetJobMessages",
|
||||||
|
Statistics: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
l := &Listener{}
|
||||||
|
parsedMsg, err := l.parseMessage(context.Background(), msg)
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Nil(t, parsedMsg)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("FailOnIncorrectMessageType", func(t *testing.T) {
|
||||||
|
msg := &actions.RunnerScaleSetMessage{
|
||||||
|
MessageId: 1,
|
||||||
|
MessageType: "RunnerMessages", // arbitrary message type
|
||||||
|
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||||
|
}
|
||||||
|
|
||||||
|
l := &Listener{}
|
||||||
|
parsedMsg, err := l.parseMessage(context.Background(), msg)
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Nil(t, parsedMsg)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ParseAll", func(t *testing.T) {
|
||||||
|
msg := &actions.RunnerScaleSetMessage{
|
||||||
|
MessageId: 1,
|
||||||
|
MessageType: "RunnerScaleSetJobMessages",
|
||||||
|
Body: "",
|
||||||
|
Statistics: &actions.RunnerScaleSetStatistic{
|
||||||
|
TotalAvailableJobs: 1,
|
||||||
|
TotalAcquiredJobs: 2,
|
||||||
|
TotalAssignedJobs: 3,
|
||||||
|
TotalRunningJobs: 4,
|
||||||
|
TotalRegisteredRunners: 5,
|
||||||
|
TotalBusyRunners: 6,
|
||||||
|
TotalIdleRunners: 7,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var batchedMessages []any
|
||||||
|
jobsAvailable := []*actions.JobAvailable{
|
||||||
|
{
|
||||||
|
AcquireJobUrl: "https://github.com/example",
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
JobMessageType: actions.JobMessageType{
|
||||||
|
MessageType: messageTypeJobAvailable,
|
||||||
|
},
|
||||||
|
RunnerRequestId: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
AcquireJobUrl: "https://github.com/example",
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
JobMessageType: actions.JobMessageType{
|
||||||
|
MessageType: messageTypeJobAvailable,
|
||||||
|
},
|
||||||
|
RunnerRequestId: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, msg := range jobsAvailable {
|
||||||
|
batchedMessages = append(batchedMessages, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
jobsAssigned := []*actions.JobAssigned{
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
JobMessageType: actions.JobMessageType{
|
||||||
|
MessageType: messageTypeJobAssigned,
|
||||||
|
},
|
||||||
|
RunnerRequestId: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
JobMessageType: actions.JobMessageType{
|
||||||
|
MessageType: messageTypeJobAssigned,
|
||||||
|
},
|
||||||
|
RunnerRequestId: 4,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, msg := range jobsAssigned {
|
||||||
|
batchedMessages = append(batchedMessages, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
jobsStarted := []*actions.JobStarted{
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
JobMessageType: actions.JobMessageType{
|
||||||
|
MessageType: messageTypeJobStarted,
|
||||||
|
},
|
||||||
|
RunnerRequestId: 5,
|
||||||
|
},
|
||||||
|
RunnerId: 2,
|
||||||
|
RunnerName: "runner2",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, msg := range jobsStarted {
|
||||||
|
batchedMessages = append(batchedMessages, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
jobsCompleted := []*actions.JobCompleted{
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
JobMessageType: actions.JobMessageType{
|
||||||
|
MessageType: messageTypeJobCompleted,
|
||||||
|
},
|
||||||
|
RunnerRequestId: 6,
|
||||||
|
},
|
||||||
|
Result: "success",
|
||||||
|
RunnerId: 1,
|
||||||
|
RunnerName: "runner1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, msg := range jobsCompleted {
|
||||||
|
batchedMessages = append(batchedMessages, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := json.Marshal(batchedMessages)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
msg.Body = string(b)
|
||||||
|
|
||||||
|
l := &Listener{}
|
||||||
|
parsedMsg, err := l.parseMessage(context.Background(), msg)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, msg.Statistics, parsedMsg.statistics)
|
||||||
|
assert.Equal(t, jobsAvailable, parsedMsg.jobsAvailable)
|
||||||
|
assert.Equal(t, jobsStarted, parsedMsg.jobsStarted)
|
||||||
|
assert.Equal(t, jobsCompleted, parsedMsg.jobsCompleted)
|
||||||
|
})
|
||||||
|
}
|
||||||
205
cmd/ghalistener/listener/metrics_test.go
Normal file
205
cmd/ghalistener/listener/metrics_test.go
Normal file
@@ -0,0 +1,205 @@
|
|||||||
|
package listener
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
listenermocks "github.com/actions/actions-runner-controller/cmd/ghalistener/listener/mocks"
|
||||||
|
metricsmocks "github.com/actions/actions-runner-controller/cmd/ghalistener/metrics/mocks"
|
||||||
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInitialMetrics(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
t.Run("SetStaticMetrics", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
metrics := metricsmocks.NewPublisher(t)
|
||||||
|
|
||||||
|
minRunners := 5
|
||||||
|
maxRunners := 10
|
||||||
|
metrics.On("PublishStatic", minRunners, maxRunners).Once()
|
||||||
|
|
||||||
|
config := Config{
|
||||||
|
Client: listenermocks.NewClient(t),
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics,
|
||||||
|
MinRunners: minRunners,
|
||||||
|
MaxRunners: maxRunners,
|
||||||
|
}
|
||||||
|
l, err := New(config)
|
||||||
|
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, l)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("InitialMessageStatistics", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
sessionStatistics := &actions.RunnerScaleSetStatistic{
|
||||||
|
TotalAvailableJobs: 1,
|
||||||
|
TotalAcquiredJobs: 2,
|
||||||
|
TotalAssignedJobs: 3,
|
||||||
|
TotalRunningJobs: 4,
|
||||||
|
TotalRegisteredRunners: 5,
|
||||||
|
TotalBusyRunners: 6,
|
||||||
|
TotalIdleRunners: 7,
|
||||||
|
}
|
||||||
|
|
||||||
|
uuid := uuid.New()
|
||||||
|
session := &actions.RunnerScaleSetSession{
|
||||||
|
SessionId: &uuid,
|
||||||
|
OwnerName: "example",
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
MessageQueueUrl: "https://example.com",
|
||||||
|
MessageQueueAccessToken: "1234567890",
|
||||||
|
Statistics: sessionStatistics,
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics := metricsmocks.NewPublisher(t)
|
||||||
|
metrics.On("PublishStatic", mock.Anything, mock.Anything).Once()
|
||||||
|
metrics.On("PublishStatistics", sessionStatistics).Once()
|
||||||
|
metrics.On("PublishDesiredRunners", sessionStatistics.TotalAssignedJobs).
|
||||||
|
Run(
|
||||||
|
func(mock.Arguments) {
|
||||||
|
cancel()
|
||||||
|
},
|
||||||
|
).Once()
|
||||||
|
|
||||||
|
config := Config{
|
||||||
|
Client: listenermocks.NewClient(t),
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
client.On("CreateMessageSession", mock.Anything, mock.Anything, mock.Anything).Return(session, nil).Once()
|
||||||
|
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
|
||||||
|
config.Client = client
|
||||||
|
|
||||||
|
handler := listenermocks.NewHandler(t)
|
||||||
|
handler.On("HandleDesiredRunnerCount", mock.Anything, sessionStatistics.TotalAssignedJobs, 0).
|
||||||
|
Return(sessionStatistics.TotalAssignedJobs, nil).
|
||||||
|
Once()
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, l)
|
||||||
|
|
||||||
|
assert.ErrorIs(t, context.Canceled, l.Listen(ctx, handler))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHandleMessageMetrics(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
msg := &actions.RunnerScaleSetMessage{
|
||||||
|
MessageId: 1,
|
||||||
|
MessageType: "RunnerScaleSetJobMessages",
|
||||||
|
Body: "",
|
||||||
|
Statistics: &actions.RunnerScaleSetStatistic{
|
||||||
|
TotalAvailableJobs: 1,
|
||||||
|
TotalAcquiredJobs: 2,
|
||||||
|
TotalAssignedJobs: 3,
|
||||||
|
TotalRunningJobs: 4,
|
||||||
|
TotalRegisteredRunners: 5,
|
||||||
|
TotalBusyRunners: 6,
|
||||||
|
TotalIdleRunners: 7,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var batchedMessages []any
|
||||||
|
jobsStarted := []*actions.JobStarted{
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
JobMessageType: actions.JobMessageType{
|
||||||
|
MessageType: messageTypeJobStarted,
|
||||||
|
},
|
||||||
|
RunnerRequestId: 8,
|
||||||
|
},
|
||||||
|
RunnerId: 3,
|
||||||
|
RunnerName: "runner3",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, msg := range jobsStarted {
|
||||||
|
batchedMessages = append(batchedMessages, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
jobsCompleted := []*actions.JobCompleted{
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
JobMessageType: actions.JobMessageType{
|
||||||
|
MessageType: messageTypeJobCompleted,
|
||||||
|
},
|
||||||
|
RunnerRequestId: 6,
|
||||||
|
},
|
||||||
|
Result: "success",
|
||||||
|
RunnerId: 1,
|
||||||
|
RunnerName: "runner1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobMessageBase: actions.JobMessageBase{
|
||||||
|
JobMessageType: actions.JobMessageType{
|
||||||
|
MessageType: messageTypeJobCompleted,
|
||||||
|
},
|
||||||
|
RunnerRequestId: 7,
|
||||||
|
},
|
||||||
|
Result: "success",
|
||||||
|
RunnerId: 2,
|
||||||
|
RunnerName: "runner2",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, msg := range jobsCompleted {
|
||||||
|
batchedMessages = append(batchedMessages, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := json.Marshal(batchedMessages)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
msg.Body = string(b)
|
||||||
|
|
||||||
|
desiredResult := 4
|
||||||
|
|
||||||
|
metrics := metricsmocks.NewPublisher(t)
|
||||||
|
metrics.On("PublishStatic", 0, 0).Once()
|
||||||
|
metrics.On("PublishStatistics", msg.Statistics).Once()
|
||||||
|
metrics.On("PublishJobCompleted", jobsCompleted[0]).Once()
|
||||||
|
metrics.On("PublishJobCompleted", jobsCompleted[1]).Once()
|
||||||
|
metrics.On("PublishJobStarted", jobsStarted[0]).Once()
|
||||||
|
metrics.On("PublishDesiredRunners", desiredResult).Once()
|
||||||
|
|
||||||
|
handler := listenermocks.NewHandler(t)
|
||||||
|
handler.On("HandleJobStarted", mock.Anything, jobsStarted[0]).Return(nil).Once()
|
||||||
|
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 2).Return(desiredResult, nil).Once()
|
||||||
|
|
||||||
|
client := listenermocks.NewClient(t)
|
||||||
|
client.On("DeleteMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||||
|
|
||||||
|
config := Config{
|
||||||
|
Client: listenermocks.NewClient(t),
|
||||||
|
ScaleSetID: 1,
|
||||||
|
Metrics: metrics,
|
||||||
|
}
|
||||||
|
|
||||||
|
l, err := New(config)
|
||||||
|
require.NoError(t, err)
|
||||||
|
l.client = client
|
||||||
|
l.session = &actions.RunnerScaleSetSession{
|
||||||
|
OwnerName: "",
|
||||||
|
RunnerScaleSet: &actions.RunnerScaleSet{},
|
||||||
|
MessageQueueUrl: "",
|
||||||
|
MessageQueueAccessToken: "",
|
||||||
|
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = l.handleMessage(context.Background(), handler, msg)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
190
cmd/ghalistener/listener/mocks/client.go
Normal file
190
cmd/ghalistener/listener/mocks/client.go
Normal file
@@ -0,0 +1,190 @@
|
|||||||
|
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
||||||
|
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
|
||||||
|
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||||
|
|
||||||
|
mock "github.com/stretchr/testify/mock"
|
||||||
|
|
||||||
|
uuid "github.com/google/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client is an autogenerated mock type for the Client type
|
||||||
|
type Client struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// AcquireJobs provides a mock function with given fields: ctx, runnerScaleSetId, messageQueueAccessToken, requestIds
|
||||||
|
func (_m *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) {
|
||||||
|
ret := _m.Called(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
|
||||||
|
|
||||||
|
var r0 []int64
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, int, string, []int64) ([]int64, error)); ok {
|
||||||
|
return rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
|
||||||
|
}
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, int, string, []int64) []int64); ok {
|
||||||
|
r0 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).([]int64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, int, string, []int64) error); ok {
|
||||||
|
r1 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, owner
|
||||||
|
func (_m *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error) {
|
||||||
|
ret := _m.Called(ctx, runnerScaleSetId, owner)
|
||||||
|
|
||||||
|
var r0 *actions.RunnerScaleSetSession
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, int, string) (*actions.RunnerScaleSetSession, error)); ok {
|
||||||
|
return rf(ctx, runnerScaleSetId, owner)
|
||||||
|
}
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, int, string) *actions.RunnerScaleSetSession); ok {
|
||||||
|
r0 = rf(ctx, runnerScaleSetId, owner)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*actions.RunnerScaleSetSession)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, int, string) error); ok {
|
||||||
|
r1 = rf(ctx, runnerScaleSetId, owner)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, messageId
|
||||||
|
func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, messageId int64) error {
|
||||||
|
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
|
||||||
|
|
||||||
|
var r0 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) error); ok {
|
||||||
|
r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Error(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
|
||||||
|
func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error {
|
||||||
|
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
|
||||||
|
|
||||||
|
var r0 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) error); ok {
|
||||||
|
r0 = rf(ctx, runnerScaleSetId, sessionId)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Error(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAcquirableJobs provides a mock function with given fields: ctx, runnerScaleSetId
|
||||||
|
func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error) {
|
||||||
|
ret := _m.Called(ctx, runnerScaleSetId)
|
||||||
|
|
||||||
|
var r0 *actions.AcquirableJobList
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, int) (*actions.AcquirableJobList, error)); ok {
|
||||||
|
return rf(ctx, runnerScaleSetId)
|
||||||
|
}
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, int) *actions.AcquirableJobList); ok {
|
||||||
|
r0 = rf(ctx, runnerScaleSetId)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*actions.AcquirableJobList)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, int) error); ok {
|
||||||
|
r1 = rf(ctx, runnerScaleSetId)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity
|
||||||
|
func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
|
||||||
|
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
|
||||||
|
|
||||||
|
var r0 *actions.RunnerScaleSetMessage
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64, int) (*actions.RunnerScaleSetMessage, error)); ok {
|
||||||
|
return rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
|
||||||
|
}
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64, int) *actions.RunnerScaleSetMessage); ok {
|
||||||
|
r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*actions.RunnerScaleSetMessage)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, string, string, int64, int) error); ok {
|
||||||
|
r1 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// RefreshMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
|
||||||
|
func (_m *Client) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error) {
|
||||||
|
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
|
||||||
|
|
||||||
|
var r0 *actions.RunnerScaleSetSession
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) (*actions.RunnerScaleSetSession, error)); ok {
|
||||||
|
return rf(ctx, runnerScaleSetId, sessionId)
|
||||||
|
}
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) *actions.RunnerScaleSetSession); ok {
|
||||||
|
r0 = rf(ctx, runnerScaleSetId, sessionId)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*actions.RunnerScaleSetSession)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, int, *uuid.UUID) error); ok {
|
||||||
|
r1 = rf(ctx, runnerScaleSetId, sessionId)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
|
// The first argument is typically a *testing.T value.
|
||||||
|
func NewClient(t interface {
|
||||||
|
mock.TestingT
|
||||||
|
Cleanup(func())
|
||||||
|
}) *Client {
|
||||||
|
mock := &Client{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
return mock
|
||||||
|
}
|
||||||
68
cmd/ghalistener/listener/mocks/handler.go
Normal file
68
cmd/ghalistener/listener/mocks/handler.go
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
||||||
|
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
|
||||||
|
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||||
|
|
||||||
|
mock "github.com/stretchr/testify/mock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Handler is an autogenerated mock type for the Handler type
|
||||||
|
type Handler struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, jobsCompleted
|
||||||
|
func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
|
||||||
|
ret := _m.Called(ctx, count, jobsCompleted)
|
||||||
|
|
||||||
|
var r0 int
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, int, int) (int, error)); ok {
|
||||||
|
return rf(ctx, count, jobsCompleted)
|
||||||
|
}
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, int, int) int); ok {
|
||||||
|
r0 = rf(ctx, count, jobsCompleted)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Get(0).(int)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, int, int) error); ok {
|
||||||
|
r1 = rf(ctx, count, jobsCompleted)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
|
||||||
|
func (_m *Handler) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
|
||||||
|
ret := _m.Called(ctx, jobInfo)
|
||||||
|
|
||||||
|
var r0 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, *actions.JobStarted) error); ok {
|
||||||
|
r0 = rf(ctx, jobInfo)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Error(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHandler creates a new instance of Handler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
|
// The first argument is typically a *testing.T value.
|
||||||
|
func NewHandler(t interface {
|
||||||
|
mock.TestingT
|
||||||
|
Cleanup(func())
|
||||||
|
}) *Handler {
|
||||||
|
mock := &Handler{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
return mock
|
||||||
|
}
|
||||||
40
cmd/ghalistener/main.go
Normal file
40
cmd/ghalistener/main.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/app"
|
||||||
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH")
|
||||||
|
if !ok {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
config, err := config.Read(configPath)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to read config: %v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
app, err := app.New(config)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to initialize app: %v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
defer stop()
|
||||||
|
|
||||||
|
if err := app.Run(ctx); err != nil {
|
||||||
|
log.Printf("Application returned an error: %v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
392
cmd/ghalistener/metrics/metrics.go
Normal file
392
cmd/ghalistener/metrics/metrics.go
Normal file
@@ -0,0 +1,392 @@
|
|||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
labelKeyRunnerScaleSetName = "name"
|
||||||
|
labelKeyRunnerScaleSetNamespace = "namespace"
|
||||||
|
labelKeyEnterprise = "enterprise"
|
||||||
|
labelKeyOrganization = "organization"
|
||||||
|
labelKeyRepository = "repository"
|
||||||
|
labelKeyJobName = "job_name"
|
||||||
|
labelKeyJobWorkflowRef = "job_workflow_ref"
|
||||||
|
labelKeyEventName = "event_name"
|
||||||
|
labelKeyJobResult = "job_result"
|
||||||
|
labelKeyRunnerID = "runner_id"
|
||||||
|
labelKeyRunnerName = "runner_name"
|
||||||
|
)
|
||||||
|
|
||||||
|
const githubScaleSetSubsystem = "gha"
|
||||||
|
|
||||||
|
// labels
|
||||||
|
var (
|
||||||
|
scaleSetLabels = []string{
|
||||||
|
labelKeyRunnerScaleSetName,
|
||||||
|
labelKeyRepository,
|
||||||
|
labelKeyOrganization,
|
||||||
|
labelKeyEnterprise,
|
||||||
|
labelKeyRunnerScaleSetNamespace,
|
||||||
|
}
|
||||||
|
|
||||||
|
jobLabels = []string{
|
||||||
|
labelKeyRepository,
|
||||||
|
labelKeyOrganization,
|
||||||
|
labelKeyEnterprise,
|
||||||
|
labelKeyJobName,
|
||||||
|
labelKeyJobWorkflowRef,
|
||||||
|
labelKeyEventName,
|
||||||
|
}
|
||||||
|
|
||||||
|
completedJobsTotalLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
|
||||||
|
jobExecutionDurationLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
|
||||||
|
startedJobsTotalLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
|
||||||
|
jobStartupDurationLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
assignedJobs = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "assigned_jobs",
|
||||||
|
Help: "Number of jobs assigned to this scale set.",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
runningJobs = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "running_jobs",
|
||||||
|
Help: "Number of jobs running (or about to be run).",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
registeredRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "registered_runners",
|
||||||
|
Help: "Number of runners registered by the scale set.",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
busyRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "busy_runners",
|
||||||
|
Help: "Number of registered runners running a job.",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
minRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "min_runners",
|
||||||
|
Help: "Minimum number of runners.",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
maxRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "max_runners",
|
||||||
|
Help: "Maximum number of runners.",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
desiredRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "desired_runners",
|
||||||
|
Help: "Number of runners desired by the scale set.",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
idleRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "idle_runners",
|
||||||
|
Help: "Number of registered runners not running a job.",
|
||||||
|
},
|
||||||
|
scaleSetLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
startedJobsTotal = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "started_jobs_total",
|
||||||
|
Help: "Total number of jobs started.",
|
||||||
|
},
|
||||||
|
startedJobsTotalLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
completedJobsTotal = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "completed_jobs_total",
|
||||||
|
Help: "Total number of jobs completed.",
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
},
|
||||||
|
completedJobsTotalLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
jobStartupDurationSeconds = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "job_startup_duration_seconds",
|
||||||
|
Help: "Time spent waiting for workflow job to get started on the runner owned by the scale set (in seconds).",
|
||||||
|
Buckets: runtimeBuckets,
|
||||||
|
},
|
||||||
|
jobStartupDurationLabels,
|
||||||
|
)
|
||||||
|
|
||||||
|
jobExecutionDurationSeconds = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Subsystem: githubScaleSetSubsystem,
|
||||||
|
Name: "job_execution_duration_seconds",
|
||||||
|
Help: "Time spent executing workflow jobs by the scale set (in seconds).",
|
||||||
|
Buckets: runtimeBuckets,
|
||||||
|
},
|
||||||
|
jobExecutionDurationLabels,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
var runtimeBuckets []float64 = []float64{
|
||||||
|
0.01,
|
||||||
|
0.05,
|
||||||
|
0.1,
|
||||||
|
0.5,
|
||||||
|
1,
|
||||||
|
2,
|
||||||
|
3,
|
||||||
|
4,
|
||||||
|
5,
|
||||||
|
6,
|
||||||
|
7,
|
||||||
|
8,
|
||||||
|
9,
|
||||||
|
10,
|
||||||
|
12,
|
||||||
|
15,
|
||||||
|
18,
|
||||||
|
20,
|
||||||
|
25,
|
||||||
|
30,
|
||||||
|
40,
|
||||||
|
50,
|
||||||
|
60,
|
||||||
|
70,
|
||||||
|
80,
|
||||||
|
90,
|
||||||
|
100,
|
||||||
|
110,
|
||||||
|
120,
|
||||||
|
150,
|
||||||
|
180,
|
||||||
|
210,
|
||||||
|
240,
|
||||||
|
300,
|
||||||
|
360,
|
||||||
|
420,
|
||||||
|
480,
|
||||||
|
540,
|
||||||
|
600,
|
||||||
|
900,
|
||||||
|
1200,
|
||||||
|
1800,
|
||||||
|
2400,
|
||||||
|
3000,
|
||||||
|
3600,
|
||||||
|
}
|
||||||
|
|
||||||
|
type baseLabels struct {
|
||||||
|
scaleSetName string
|
||||||
|
scaleSetNamespace string
|
||||||
|
enterprise string
|
||||||
|
organization string
|
||||||
|
repository string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
|
||||||
|
return prometheus.Labels{
|
||||||
|
labelKeyEnterprise: b.enterprise,
|
||||||
|
labelKeyOrganization: jobBase.OwnerName,
|
||||||
|
labelKeyRepository: jobBase.RepositoryName,
|
||||||
|
labelKeyJobName: jobBase.JobDisplayName,
|
||||||
|
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
|
||||||
|
labelKeyEventName: jobBase.EventName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *baseLabels) scaleSetLabels() prometheus.Labels {
|
||||||
|
return prometheus.Labels{
|
||||||
|
labelKeyRunnerScaleSetName: b.scaleSetName,
|
||||||
|
labelKeyRunnerScaleSetNamespace: b.scaleSetNamespace,
|
||||||
|
labelKeyEnterprise: b.enterprise,
|
||||||
|
labelKeyOrganization: b.organization,
|
||||||
|
labelKeyRepository: b.repository,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *baseLabels) completedJobLabels(msg *actions.JobCompleted) prometheus.Labels {
|
||||||
|
l := b.jobLabels(&msg.JobMessageBase)
|
||||||
|
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
|
||||||
|
l[labelKeyJobResult] = msg.Result
|
||||||
|
l[labelKeyRunnerName] = msg.RunnerName
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *baseLabels) startedJobLabels(msg *actions.JobStarted) prometheus.Labels {
|
||||||
|
l := b.jobLabels(&msg.JobMessageBase)
|
||||||
|
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
|
||||||
|
l[labelKeyRunnerName] = msg.RunnerName
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate mockery --name Publisher --output ./mocks --outpkg mocks --case underscore
|
||||||
|
type Publisher interface {
|
||||||
|
PublishStatic(min, max int)
|
||||||
|
PublishStatistics(stats *actions.RunnerScaleSetStatistic)
|
||||||
|
PublishJobStarted(msg *actions.JobStarted)
|
||||||
|
PublishJobCompleted(msg *actions.JobCompleted)
|
||||||
|
PublishDesiredRunners(count int)
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate mockery --name ServerPublisher --output ./mocks --outpkg mocks --case underscore
|
||||||
|
type ServerPublisher interface {
|
||||||
|
Publisher
|
||||||
|
ListenAndServe(ctx context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ Publisher = &discard{}
|
||||||
|
_ ServerPublisher = &exporter{}
|
||||||
|
)
|
||||||
|
|
||||||
|
var Discard Publisher = &discard{}
|
||||||
|
|
||||||
|
type exporter struct {
|
||||||
|
logger logr.Logger
|
||||||
|
baseLabels
|
||||||
|
srv *http.Server
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExporterConfig struct {
|
||||||
|
ScaleSetName string
|
||||||
|
ScaleSetNamespace string
|
||||||
|
Enterprise string
|
||||||
|
Organization string
|
||||||
|
Repository string
|
||||||
|
ServerAddr string
|
||||||
|
ServerEndpoint string
|
||||||
|
Logger logr.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewExporter(config ExporterConfig) ServerPublisher {
|
||||||
|
reg := prometheus.NewRegistry()
|
||||||
|
reg.MustRegister(
|
||||||
|
assignedJobs,
|
||||||
|
runningJobs,
|
||||||
|
registeredRunners,
|
||||||
|
busyRunners,
|
||||||
|
minRunners,
|
||||||
|
maxRunners,
|
||||||
|
desiredRunners,
|
||||||
|
idleRunners,
|
||||||
|
startedJobsTotal,
|
||||||
|
completedJobsTotal,
|
||||||
|
jobStartupDurationSeconds,
|
||||||
|
jobExecutionDurationSeconds,
|
||||||
|
)
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
mux.Handle(
|
||||||
|
config.ServerEndpoint,
|
||||||
|
promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}),
|
||||||
|
)
|
||||||
|
|
||||||
|
return &exporter{
|
||||||
|
logger: config.Logger.WithName("metrics"),
|
||||||
|
baseLabels: baseLabels{
|
||||||
|
scaleSetName: config.ScaleSetName,
|
||||||
|
scaleSetNamespace: config.ScaleSetNamespace,
|
||||||
|
enterprise: config.Enterprise,
|
||||||
|
organization: config.Organization,
|
||||||
|
repository: config.Repository,
|
||||||
|
},
|
||||||
|
srv: &http.Server{
|
||||||
|
Addr: config.ServerAddr,
|
||||||
|
Handler: mux,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *exporter) ListenAndServe(ctx context.Context) error {
|
||||||
|
e.logger.Info("starting metrics server", "addr", e.srv.Addr)
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
e.logger.Info("stopping metrics server", "err", ctx.Err())
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
e.srv.Shutdown(ctx)
|
||||||
|
}()
|
||||||
|
return e.srv.ListenAndServe()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *exporter) PublishStatic(min, max int) {
|
||||||
|
l := m.scaleSetLabels()
|
||||||
|
maxRunners.With(l).Set(float64(max))
|
||||||
|
minRunners.With(l).Set(float64(min))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *exporter) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
|
||||||
|
l := e.scaleSetLabels()
|
||||||
|
|
||||||
|
assignedJobs.With(l).Set(float64(stats.TotalAssignedJobs))
|
||||||
|
runningJobs.With(l).Set(float64(stats.TotalRunningJobs))
|
||||||
|
registeredRunners.With(l).Set(float64(stats.TotalRegisteredRunners))
|
||||||
|
busyRunners.With(l).Set(float64(stats.TotalBusyRunners))
|
||||||
|
idleRunners.With(l).Set(float64(stats.TotalIdleRunners))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *exporter) PublishJobStarted(msg *actions.JobStarted) {
|
||||||
|
l := e.startedJobLabels(msg)
|
||||||
|
startedJobsTotal.With(l).Inc()
|
||||||
|
|
||||||
|
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
|
||||||
|
jobStartupDurationSeconds.With(l).Observe(float64(startupDuration))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *exporter) PublishJobCompleted(msg *actions.JobCompleted) {
|
||||||
|
l := e.completedJobLabels(msg)
|
||||||
|
completedJobsTotal.With(l).Inc()
|
||||||
|
|
||||||
|
executionDuration := msg.JobMessageBase.FinishTime.Unix() - msg.JobMessageBase.RunnerAssignTime.Unix()
|
||||||
|
jobExecutionDurationSeconds.With(l).Observe(float64(executionDuration))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *exporter) PublishDesiredRunners(count int) {
|
||||||
|
desiredRunners.With(m.scaleSetLabels()).Set(float64(count))
|
||||||
|
}
|
||||||
|
|
||||||
|
type discard struct{}
|
||||||
|
|
||||||
|
func (*discard) PublishStatic(int, int) {}
|
||||||
|
func (*discard) PublishStatistics(*actions.RunnerScaleSetStatistic) {}
|
||||||
|
func (*discard) PublishJobStarted(*actions.JobStarted) {}
|
||||||
|
func (*discard) PublishJobCompleted(*actions.JobCompleted) {}
|
||||||
|
func (*discard) PublishDesiredRunners(int) {}
|
||||||
53
cmd/ghalistener/metrics/mocks/publisher.go
Normal file
53
cmd/ghalistener/metrics/mocks/publisher.go
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
||||||
|
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||||
|
|
||||||
|
mock "github.com/stretchr/testify/mock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Publisher is an autogenerated mock type for the Publisher type
|
||||||
|
type Publisher struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishDesiredRunners provides a mock function with given fields: count
|
||||||
|
func (_m *Publisher) PublishDesiredRunners(count int) {
|
||||||
|
_m.Called(count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishJobCompleted provides a mock function with given fields: msg
|
||||||
|
func (_m *Publisher) PublishJobCompleted(msg *actions.JobCompleted) {
|
||||||
|
_m.Called(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishJobStarted provides a mock function with given fields: msg
|
||||||
|
func (_m *Publisher) PublishJobStarted(msg *actions.JobStarted) {
|
||||||
|
_m.Called(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishStatic provides a mock function with given fields: min, max
|
||||||
|
func (_m *Publisher) PublishStatic(min int, max int) {
|
||||||
|
_m.Called(min, max)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishStatistics provides a mock function with given fields: stats
|
||||||
|
func (_m *Publisher) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
|
||||||
|
_m.Called(stats)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPublisher creates a new instance of Publisher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
|
// The first argument is typically a *testing.T value.
|
||||||
|
func NewPublisher(t interface {
|
||||||
|
mock.TestingT
|
||||||
|
Cleanup(func())
|
||||||
|
}) *Publisher {
|
||||||
|
mock := &Publisher{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
return mock
|
||||||
|
}
|
||||||
69
cmd/ghalistener/metrics/mocks/server_publisher.go
Normal file
69
cmd/ghalistener/metrics/mocks/server_publisher.go
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
||||||
|
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
|
||||||
|
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||||
|
|
||||||
|
mock "github.com/stretchr/testify/mock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ServerPublisher is an autogenerated mock type for the ServerPublisher type
|
||||||
|
type ServerPublisher struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListenAndServe provides a mock function with given fields: ctx
|
||||||
|
func (_m *ServerPublisher) ListenAndServe(ctx context.Context) error {
|
||||||
|
ret := _m.Called(ctx)
|
||||||
|
|
||||||
|
var r0 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
|
||||||
|
r0 = rf(ctx)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Error(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishDesiredRunners provides a mock function with given fields: count
|
||||||
|
func (_m *ServerPublisher) PublishDesiredRunners(count int) {
|
||||||
|
_m.Called(count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishJobCompleted provides a mock function with given fields: msg
|
||||||
|
func (_m *ServerPublisher) PublishJobCompleted(msg *actions.JobCompleted) {
|
||||||
|
_m.Called(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishJobStarted provides a mock function with given fields: msg
|
||||||
|
func (_m *ServerPublisher) PublishJobStarted(msg *actions.JobStarted) {
|
||||||
|
_m.Called(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishStatic provides a mock function with given fields: min, max
|
||||||
|
func (_m *ServerPublisher) PublishStatic(min int, max int) {
|
||||||
|
_m.Called(min, max)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishStatistics provides a mock function with given fields: stats
|
||||||
|
func (_m *ServerPublisher) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
|
||||||
|
_m.Called(stats)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServerPublisher creates a new instance of ServerPublisher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
|
// The first argument is typically a *testing.T value.
|
||||||
|
func NewServerPublisher(t interface {
|
||||||
|
mock.TestingT
|
||||||
|
Cleanup(func())
|
||||||
|
}) *ServerPublisher {
|
||||||
|
mock := &ServerPublisher{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
return mock
|
||||||
|
}
|
||||||
242
cmd/ghalistener/worker/worker.go
Normal file
242
cmd/ghalistener/worker/worker.go
Normal file
@@ -0,0 +1,242 @@
|
|||||||
|
package worker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||||
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
|
||||||
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
|
"github.com/actions/actions-runner-controller/logging"
|
||||||
|
jsonpatch "github.com/evanphx/json-patch"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const workerName = "kubernetesworker"
|
||||||
|
|
||||||
|
type Option func(*Worker)
|
||||||
|
|
||||||
|
func WithLogger(logger logr.Logger) Option {
|
||||||
|
return func(w *Worker) {
|
||||||
|
logger = logger.WithName(workerName)
|
||||||
|
w.logger = &logger
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
EphemeralRunnerSetNamespace string
|
||||||
|
EphemeralRunnerSetName string
|
||||||
|
MaxRunners int
|
||||||
|
MinRunners int
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Worker's role is to process the messages it receives from the listener.
|
||||||
|
// It then initiates Kubernetes API requests to carry out the necessary actions.
|
||||||
|
type Worker struct {
|
||||||
|
clientset *kubernetes.Clientset
|
||||||
|
config Config
|
||||||
|
lastPatch int
|
||||||
|
lastPatchID int
|
||||||
|
logger *logr.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ listener.Handler = (*Worker)(nil)
|
||||||
|
|
||||||
|
func New(config Config, options ...Option) (*Worker, error) {
|
||||||
|
w := &Worker{
|
||||||
|
config: config,
|
||||||
|
lastPatch: -1,
|
||||||
|
lastPatchID: -1,
|
||||||
|
}
|
||||||
|
|
||||||
|
conf, err := rest.InClusterConfig()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
clientset, err := kubernetes.NewForConfig(conf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.clientset = clientset
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.applyDefaults(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Worker) applyDefaults() error {
|
||||||
|
if w.logger == nil {
|
||||||
|
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatJSON)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("NewLogger failed: %w", err)
|
||||||
|
}
|
||||||
|
logger = logger.WithName(workerName)
|
||||||
|
w.logger = &logger
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleJobStarted updates the job information for the ephemeral runner when a job is started.
|
||||||
|
// It takes a context and a jobInfo parameter which contains the details of the started job.
|
||||||
|
// This update marks the ephemeral runner so that the controller would have more context
|
||||||
|
// about the ephemeral runner that should not be deleted when scaling down.
|
||||||
|
// It returns an error if there is any issue with updating the job information.
|
||||||
|
func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
|
||||||
|
w.logger.Info("Updating job info for the runner",
|
||||||
|
"runnerName", jobInfo.RunnerName,
|
||||||
|
"ownerName", jobInfo.OwnerName,
|
||||||
|
"repoName", jobInfo.RepositoryName,
|
||||||
|
"workflowRef", jobInfo.JobWorkflowRef,
|
||||||
|
"workflowRunId", jobInfo.WorkflowRunId,
|
||||||
|
"jobDisplayName", jobInfo.JobDisplayName,
|
||||||
|
"requestId", jobInfo.RunnerRequestId)
|
||||||
|
|
||||||
|
original, err := json.Marshal(&v1alpha1.EphemeralRunner{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal empty ephemeral runner: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
patch, err := json.Marshal(
|
||||||
|
&v1alpha1.EphemeralRunner{
|
||||||
|
Status: v1alpha1.EphemeralRunnerStatus{
|
||||||
|
JobRequestId: jobInfo.RunnerRequestId,
|
||||||
|
JobRepositoryName: fmt.Sprintf("%s/%s", jobInfo.OwnerName, jobInfo.RepositoryName),
|
||||||
|
WorkflowRunId: jobInfo.WorkflowRunId,
|
||||||
|
JobWorkflowRef: jobInfo.JobWorkflowRef,
|
||||||
|
JobDisplayName: jobInfo.JobDisplayName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal ephemeral runner patch: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mergePatch, err := jsonpatch.CreateMergePatch(original, patch)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create merge patch json for ephemeral runner: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.logger.Info("Updating ephemeral runner with merge patch", "json", string(mergePatch))
|
||||||
|
|
||||||
|
patchedStatus := &v1alpha1.EphemeralRunner{}
|
||||||
|
err = w.clientset.RESTClient().
|
||||||
|
Patch(types.MergePatchType).
|
||||||
|
Prefix("apis", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version).
|
||||||
|
Namespace(w.config.EphemeralRunnerSetNamespace).
|
||||||
|
Resource("EphemeralRunners").
|
||||||
|
Name(jobInfo.RunnerName).
|
||||||
|
SubResource("status").
|
||||||
|
Body(mergePatch).
|
||||||
|
Do(ctx).
|
||||||
|
Into(patchedStatus)
|
||||||
|
if err != nil {
|
||||||
|
if kerrors.IsNotFound(err) {
|
||||||
|
w.logger.Info("Ephemeral runner not found, skipping patching of ephemeral runner status", "runnerName", jobInfo.RunnerName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("could not patch ephemeral runner status, patch JSON: %s, error: %w", string(mergePatch), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.logger.Info("Ephemeral runner status updated with the merge patch successfully.")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleDesiredRunnerCount handles the desired runner count by scaling the ephemeral runner set.
|
||||||
|
// The function calculates the target runner count based on the minimum and maximum runner count configuration.
|
||||||
|
// If the target runner count is the same as the last patched count, it skips patching and returns nil.
|
||||||
|
// Otherwise, it creates a merge patch JSON for updating the ephemeral runner set with the desired count.
|
||||||
|
// The function then scales the ephemeral runner set by applying the merge patch.
|
||||||
|
// Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
|
||||||
|
// If any error occurs during the process, it returns an error with a descriptive message.
|
||||||
|
func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
|
||||||
|
// Max runners should always be set by the resource builder either to the configured value,
|
||||||
|
// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
|
||||||
|
targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners)
|
||||||
|
|
||||||
|
logValues := []any{
|
||||||
|
"assigned job", count,
|
||||||
|
"decision", targetRunnerCount,
|
||||||
|
"min", w.config.MinRunners,
|
||||||
|
"max", w.config.MaxRunners,
|
||||||
|
"currentRunnerCount", w.lastPatch,
|
||||||
|
"jobsCompleted", jobsCompleted,
|
||||||
|
}
|
||||||
|
|
||||||
|
if count == 0 && jobsCompleted == 0 {
|
||||||
|
w.lastPatchID = 0
|
||||||
|
} else {
|
||||||
|
w.lastPatchID++
|
||||||
|
}
|
||||||
|
|
||||||
|
w.lastPatch = targetRunnerCount
|
||||||
|
|
||||||
|
original, err := json.Marshal(
|
||||||
|
&v1alpha1.EphemeralRunnerSet{
|
||||||
|
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
||||||
|
Replicas: -1,
|
||||||
|
PatchID: -1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to marshal empty ephemeral runner set: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
patch, err := json.Marshal(
|
||||||
|
&v1alpha1.EphemeralRunnerSet{
|
||||||
|
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
||||||
|
Replicas: targetRunnerCount,
|
||||||
|
PatchID: w.lastPatchID,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
w.logger.Error(err, "could not marshal patch ephemeral runner set")
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
mergePatch, err := jsonpatch.CreateMergePatch(original, patch)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to create merge patch json for ephemeral runner set: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.logger.Info("Created merge patch json for EphemeralRunnerSet update", "json", string(mergePatch))
|
||||||
|
|
||||||
|
w.logger.Info("Scaling ephemeral runner set", logValues...)
|
||||||
|
|
||||||
|
patchedEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{}
|
||||||
|
err = w.clientset.RESTClient().
|
||||||
|
Patch(types.MergePatchType).
|
||||||
|
Prefix("apis", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version).
|
||||||
|
Namespace(w.config.EphemeralRunnerSetNamespace).
|
||||||
|
Resource("ephemeralrunnersets").
|
||||||
|
Name(w.config.EphemeralRunnerSetName).
|
||||||
|
Body([]byte(mergePatch)).
|
||||||
|
Do(ctx).
|
||||||
|
Into(patchedEphemeralRunnerSet)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("could not patch ephemeral runner set , patch JSON: %s, error: %w", string(mergePatch), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.logger.Info("Ephemeral runner set scaled.",
|
||||||
|
"namespace", w.config.EphemeralRunnerSetNamespace,
|
||||||
|
"name", w.config.EphemeralRunnerSetName,
|
||||||
|
"replicas", patchedEphemeralRunnerSet.Spec.Replicas,
|
||||||
|
)
|
||||||
|
return targetRunnerCount, nil
|
||||||
|
}
|
||||||
@@ -129,7 +129,7 @@ func (m *AutoScalerClient) Close() error {
|
|||||||
return m.client.Close()
|
return m.client.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error) error {
|
func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error, maxCapacity int) error {
|
||||||
if m.initialMessage != nil {
|
if m.initialMessage != nil {
|
||||||
err := handler(m.initialMessage)
|
err := handler(m.initialMessage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -141,7 +141,7 @@ func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler
|
|||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
message, err := m.client.GetMessage(ctx, m.lastMessageId)
|
message, err := m.client.GetMessage(ctx, m.lastMessageId, maxCapacity)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("get message failed from refreshing client. %w", err)
|
return fmt.Errorf("get message failed from refreshing client. %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -317,7 +317,7 @@ func TestGetRunnerScaleSetMessage(t *testing.T) {
|
|||||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||||
}
|
}
|
||||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||||
mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{
|
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
MessageType: "test",
|
MessageType: "test",
|
||||||
Body: "test",
|
Body: "test",
|
||||||
@@ -332,7 +332,7 @@ func TestGetRunnerScaleSetMessage(t *testing.T) {
|
|||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
return nil
|
return nil
|
||||||
})
|
}, 10)
|
||||||
|
|
||||||
assert.NoError(t, err, "Error getting message")
|
assert.NoError(t, err, "Error getting message")
|
||||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Initial message")
|
assert.Equal(t, int64(0), asClient.lastMessageId, "Initial message")
|
||||||
@@ -340,7 +340,7 @@ func TestGetRunnerScaleSetMessage(t *testing.T) {
|
|||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
return nil
|
return nil
|
||||||
})
|
}, 10)
|
||||||
|
|
||||||
assert.NoError(t, err, "Error getting message")
|
assert.NoError(t, err, "Error getting message")
|
||||||
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
|
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
|
||||||
@@ -368,7 +368,7 @@ func TestGetRunnerScaleSetMessage_HandleFailed(t *testing.T) {
|
|||||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||||
}
|
}
|
||||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||||
mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{
|
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
MessageType: "test",
|
MessageType: "test",
|
||||||
Body: "test",
|
Body: "test",
|
||||||
@@ -383,14 +383,14 @@ func TestGetRunnerScaleSetMessage_HandleFailed(t *testing.T) {
|
|||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
return nil
|
return nil
|
||||||
})
|
}, 10)
|
||||||
|
|
||||||
assert.NoError(t, err, "Error getting message")
|
assert.NoError(t, err, "Error getting message")
|
||||||
|
|
||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
return fmt.Errorf("error")
|
return fmt.Errorf("error")
|
||||||
})
|
}, 10)
|
||||||
|
|
||||||
assert.ErrorContains(t, err, "handle message failed. error", "Error getting message")
|
assert.ErrorContains(t, err, "handle message failed. error", "Error getting message")
|
||||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should not be updated")
|
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should not be updated")
|
||||||
@@ -419,7 +419,7 @@ func TestGetRunnerScaleSetMessage_HandleInitialMessage(t *testing.T) {
|
|||||||
TotalAssignedJobs: 2,
|
TotalAssignedJobs: 2,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything, mock.Anything).Return(session, nil)
|
||||||
mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{
|
mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{
|
||||||
Count: 1,
|
Count: 1,
|
||||||
Jobs: []actions.AcquirableJob{
|
Jobs: []actions.AcquirableJob{
|
||||||
@@ -439,7 +439,7 @@ func TestGetRunnerScaleSetMessage_HandleInitialMessage(t *testing.T) {
|
|||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
return nil
|
return nil
|
||||||
})
|
}, 10)
|
||||||
|
|
||||||
assert.NoError(t, err, "Error getting message")
|
assert.NoError(t, err, "Error getting message")
|
||||||
assert.Nil(t, asClient.initialMessage, "Initial message should be nil")
|
assert.Nil(t, asClient.initialMessage, "Initial message should be nil")
|
||||||
@@ -488,7 +488,7 @@ func TestGetRunnerScaleSetMessage_HandleInitialMessageFailed(t *testing.T) {
|
|||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
return fmt.Errorf("error")
|
return fmt.Errorf("error")
|
||||||
})
|
}, 10)
|
||||||
|
|
||||||
assert.ErrorContains(t, err, "fail to process initial message. error", "Error getting message")
|
assert.ErrorContains(t, err, "fail to process initial message. error", "Error getting message")
|
||||||
assert.NotNil(t, asClient.initialMessage, "Initial message should be nil")
|
assert.NotNil(t, asClient.initialMessage, "Initial message should be nil")
|
||||||
@@ -516,8 +516,8 @@ func TestGetRunnerScaleSetMessage_RetryUntilGetMessage(t *testing.T) {
|
|||||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||||
}
|
}
|
||||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||||
mockSessionClient.On("GetMessage", ctx, int64(0)).Return(nil, nil).Times(3)
|
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(nil, nil).Times(3)
|
||||||
mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{
|
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
MessageType: "test",
|
MessageType: "test",
|
||||||
Body: "test",
|
Body: "test",
|
||||||
@@ -532,13 +532,13 @@ func TestGetRunnerScaleSetMessage_RetryUntilGetMessage(t *testing.T) {
|
|||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
return nil
|
return nil
|
||||||
})
|
}, 10)
|
||||||
assert.NoError(t, err, "Error getting initial message")
|
assert.NoError(t, err, "Error getting initial message")
|
||||||
|
|
||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
return nil
|
return nil
|
||||||
})
|
}, 10)
|
||||||
|
|
||||||
assert.NoError(t, err, "Error getting message")
|
assert.NoError(t, err, "Error getting message")
|
||||||
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
|
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
|
||||||
@@ -565,7 +565,7 @@ func TestGetRunnerScaleSetMessage_ErrorOnGetMessage(t *testing.T) {
|
|||||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||||
}
|
}
|
||||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||||
mockSessionClient.On("GetMessage", ctx, int64(0)).Return(nil, fmt.Errorf("error"))
|
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(nil, fmt.Errorf("error"))
|
||||||
|
|
||||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
|
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
|
||||||
asc.client = mockSessionClient
|
asc.client = mockSessionClient
|
||||||
@@ -575,12 +575,12 @@ func TestGetRunnerScaleSetMessage_ErrorOnGetMessage(t *testing.T) {
|
|||||||
// process initial message
|
// process initial message
|
||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
return nil
|
return nil
|
||||||
})
|
}, 10)
|
||||||
assert.NoError(t, err, "Error getting initial message")
|
assert.NoError(t, err, "Error getting initial message")
|
||||||
|
|
||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
return fmt.Errorf("Should not be called")
|
return fmt.Errorf("Should not be called")
|
||||||
})
|
}, 10)
|
||||||
|
|
||||||
assert.ErrorContains(t, err, "get message failed from refreshing client. error", "Error should be returned")
|
assert.ErrorContains(t, err, "get message failed from refreshing client. error", "Error should be returned")
|
||||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated")
|
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated")
|
||||||
@@ -608,7 +608,7 @@ func TestDeleteRunnerScaleSetMessage_Error(t *testing.T) {
|
|||||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||||
}
|
}
|
||||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||||
mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{
|
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
MessageType: "test",
|
MessageType: "test",
|
||||||
Body: "test",
|
Body: "test",
|
||||||
@@ -623,13 +623,13 @@ func TestDeleteRunnerScaleSetMessage_Error(t *testing.T) {
|
|||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
return nil
|
return nil
|
||||||
})
|
}, 10)
|
||||||
assert.NoError(t, err, "Error getting initial message")
|
assert.NoError(t, err, "Error getting initial message")
|
||||||
|
|
||||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||||
return nil
|
return nil
|
||||||
})
|
}, 10)
|
||||||
|
|
||||||
assert.ErrorContains(t, err, "delete message failed from refreshing client. error", "Error getting message")
|
assert.ErrorContains(t, err, "delete message failed from refreshing client. error", "Error getting message")
|
||||||
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
|
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
|
||||||
|
|||||||
@@ -5,9 +5,9 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
)
|
)
|
||||||
@@ -30,7 +30,7 @@ type Service struct {
|
|||||||
errs []error
|
errs []error
|
||||||
}
|
}
|
||||||
|
|
||||||
func WithPrometheusMetrics(conf RunnerScaleSetListenerConfig) func(*Service) {
|
func WithPrometheusMetrics(conf config.Config) func(*Service) {
|
||||||
return func(svc *Service) {
|
return func(svc *Service) {
|
||||||
parsedURL, err := actions.ParseGitHubConfigFromURL(conf.ConfigureUrl)
|
parsedURL, err := actions.ParseGitHubConfigFromURL(conf.ConfigureUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -81,6 +81,7 @@ func NewService(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) Start() error {
|
func (s *Service) Start() error {
|
||||||
|
s.metricsExporter.publishStatic(s.settings.MaxRunners, s.settings.MinRunners)
|
||||||
for {
|
for {
|
||||||
s.logger.Info("waiting for message...")
|
s.logger.Info("waiting for message...")
|
||||||
select {
|
select {
|
||||||
@@ -88,7 +89,7 @@ func (s *Service) Start() error {
|
|||||||
s.logger.Info("service is stopped.")
|
s.logger.Info("service is stopped.")
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
err := s.rsClient.GetRunnerScaleSetMessage(s.ctx, s.processMessage)
|
err := s.rsClient.GetRunnerScaleSetMessage(s.ctx, s.processMessage, s.settings.MaxRunners)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not get and process message. %w", err)
|
return fmt.Errorf("could not get and process message. %w", err)
|
||||||
}
|
}
|
||||||
@@ -204,7 +205,9 @@ func (s *Service) processMessage(message *actions.RunnerScaleSetMessage) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) scaleForAssignedJobCount(count int) error {
|
func (s *Service) scaleForAssignedJobCount(count int) error {
|
||||||
targetRunnerCount := int(math.Max(math.Min(float64(s.settings.MaxRunners), float64(count)), float64(s.settings.MinRunners)))
|
// Max runners should always be set by the resource builder either to the configured value,
|
||||||
|
// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
|
||||||
|
targetRunnerCount := min(s.settings.MinRunners+count, s.settings.MaxRunners)
|
||||||
s.metricsExporter.publishDesiredRunners(targetRunnerCount)
|
s.metricsExporter.publishDesiredRunners(targetRunnerCount)
|
||||||
if targetRunnerCount != s.currentRunnerCount {
|
if targetRunnerCount != s.currentRunnerCount {
|
||||||
s.logger.Info("try scale runner request up/down base on assigned job count",
|
s.logger.Info("try scale runner request up/down base on assigned job count",
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ func TestStart(t *testing.T) {
|
|||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Run(func(mock.Arguments) { cancel() }).Return(nil).Once()
|
||||||
|
|
||||||
err = service.Start()
|
err = service.Start()
|
||||||
|
|
||||||
@@ -98,7 +98,7 @@ func TestStart_ScaleToMinRunners(t *testing.T) {
|
|||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything).Run(func(args mock.Arguments) {
|
mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
|
||||||
_ = service.scaleForAssignedJobCount(5)
|
_ = service.scaleForAssignedJobCount(5)
|
||||||
}).Return(nil)
|
}).Return(nil)
|
||||||
|
|
||||||
@@ -137,7 +137,7 @@ func TestStart_ScaleToMinRunnersFailed(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
c := mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(fmt.Errorf("error")).Once()
|
c := mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(fmt.Errorf("error")).Once()
|
||||||
mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything).Run(func(args mock.Arguments) {
|
mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
|
||||||
_ = service.scaleForAssignedJobCount(5)
|
_ = service.scaleForAssignedJobCount(5)
|
||||||
}).Return(c.ReturnArguments.Get(0))
|
}).Return(c.ReturnArguments.Get(0))
|
||||||
|
|
||||||
@@ -172,8 +172,8 @@ func TestStart_GetMultipleMessages(t *testing.T) {
|
|||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(nil).Times(5)
|
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Return(nil).Times(5)
|
||||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||||
|
|
||||||
err = service.Start()
|
err = service.Start()
|
||||||
|
|
||||||
@@ -207,8 +207,8 @@ func TestStart_ErrorOnMessage(t *testing.T) {
|
|||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(nil).Times(2)
|
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Return(nil).Times(2)
|
||||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(fmt.Errorf("error")).Once()
|
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("error")).Once()
|
||||||
|
|
||||||
err = service.Start()
|
err = service.Start()
|
||||||
|
|
||||||
@@ -397,7 +397,7 @@ func TestProcessMessage_MultipleMessages(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 3 && ids[1] == 4 })).Return(nil).Once()
|
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 3 && ids[1] == 4 })).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||||
|
|
||||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
@@ -523,9 +523,9 @@ func TestScaleForAssignedJobCount_ScaleWithinMinMax(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 4).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
||||||
|
|
||||||
err = service.scaleForAssignedJobCount(0)
|
err = service.scaleForAssignedJobCount(0)
|
||||||
@@ -569,7 +569,7 @@ func TestScaleForAssignedJobCount_ScaleFailed(t *testing.T) {
|
|||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(fmt.Errorf("error"))
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Return(fmt.Errorf("error"))
|
||||||
|
|
||||||
err = service.scaleForAssignedJobCount(2)
|
err = service.scaleForAssignedJobCount(2)
|
||||||
|
|
||||||
@@ -605,8 +605,23 @@ func TestProcessMessage_JobStartedMessage(t *testing.T) {
|
|||||||
|
|
||||||
service.currentRunnerCount = 1
|
service.currentRunnerCount = 1
|
||||||
|
|
||||||
mockKubeManager.On("UpdateEphemeralRunnerWithJobInfo", ctx, service.settings.Namespace, "runner1", "owner1", "repo1", ".github/workflows/ci.yaml", "job1", int64(100), int64(3)).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
mockKubeManager.On(
|
||||||
|
"UpdateEphemeralRunnerWithJobInfo",
|
||||||
|
ctx,
|
||||||
|
service.settings.Namespace,
|
||||||
|
"runner1",
|
||||||
|
"owner1",
|
||||||
|
"repo1",
|
||||||
|
".github/workflows/ci.yaml",
|
||||||
|
"job1",
|
||||||
|
int64(100),
|
||||||
|
int64(3),
|
||||||
|
).Run(
|
||||||
|
func(_ mock.Arguments) { cancel() },
|
||||||
|
).Return(nil).Once()
|
||||||
|
|
||||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
|
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
|
||||||
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil)
|
||||||
|
|
||||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
|
|||||||
76
cmd/githubrunnerscalesetlistener/config/config.go
Normal file
76
cmd/githubrunnerscalesetlistener/config/config.go
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
ConfigureUrl string `json:"configureUrl"`
|
||||||
|
AppID int64 `json:"appID"`
|
||||||
|
AppInstallationID int64 `json:"appInstallationID"`
|
||||||
|
AppPrivateKey string `json:"appPrivateKey"`
|
||||||
|
Token string `json:"token"`
|
||||||
|
EphemeralRunnerSetNamespace string `json:"ephemeralRunnerSetNamespace"`
|
||||||
|
EphemeralRunnerSetName string `json:"ephemeralRunnerSetName"`
|
||||||
|
MaxRunners int `json:"maxRunners"`
|
||||||
|
MinRunners int `json:"minRunners"`
|
||||||
|
RunnerScaleSetId int `json:"runnerScaleSetId"`
|
||||||
|
RunnerScaleSetName string `json:"runnerScaleSetName"`
|
||||||
|
ServerRootCA string `json:"serverRootCA"`
|
||||||
|
LogLevel string `json:"logLevel"`
|
||||||
|
LogFormat string `json:"logFormat"`
|
||||||
|
MetricsAddr string `json:"metricsAddr"`
|
||||||
|
MetricsEndpoint string `json:"metricsEndpoint"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func Read(path string) (Config, error) {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return Config{}, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var config Config
|
||||||
|
if err := json.NewDecoder(f).Decode(&config); err != nil {
|
||||||
|
return Config{}, fmt.Errorf("failed to decode config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := config.validate(); err != nil {
|
||||||
|
return Config{}, fmt.Errorf("failed to validate config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return config, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) validate() error {
|
||||||
|
if len(c.ConfigureUrl) == 0 {
|
||||||
|
return fmt.Errorf("GitHubConfigUrl is not provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.EphemeralRunnerSetNamespace) == 0 || len(c.EphemeralRunnerSetName) == 0 {
|
||||||
|
return fmt.Errorf("EphemeralRunnerSetNamespace '%s' or EphemeralRunnerSetName '%s' is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.RunnerScaleSetId == 0 {
|
||||||
|
return fmt.Errorf("RunnerScaleSetId '%d' is missing", c.RunnerScaleSetId)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.MaxRunners < c.MinRunners {
|
||||||
|
return fmt.Errorf("MinRunners '%d' cannot be greater than MaxRunners '%d'", c.MinRunners, c.MaxRunners)
|
||||||
|
}
|
||||||
|
|
||||||
|
hasToken := len(c.Token) > 0
|
||||||
|
hasPrivateKeyConfig := c.AppID > 0 && c.AppPrivateKey != ""
|
||||||
|
|
||||||
|
if !hasToken && !hasPrivateKeyConfig {
|
||||||
|
return fmt.Errorf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasToken && hasPrivateKeyConfig {
|
||||||
|
return fmt.Errorf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
92
cmd/githubrunnerscalesetlistener/config/config_test.go
Normal file
92
cmd/githubrunnerscalesetlistener/config/config_test.go
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConfigValidationMinMax(t *testing.T) {
|
||||||
|
config := &Config{
|
||||||
|
ConfigureUrl: "github.com/some_org/some_repo",
|
||||||
|
EphemeralRunnerSetNamespace: "namespace",
|
||||||
|
EphemeralRunnerSetName: "deployment",
|
||||||
|
RunnerScaleSetId: 1,
|
||||||
|
MinRunners: 5,
|
||||||
|
MaxRunners: 2,
|
||||||
|
Token: "token",
|
||||||
|
}
|
||||||
|
err := config.validate()
|
||||||
|
assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigValidationMissingToken(t *testing.T) {
|
||||||
|
config := &Config{
|
||||||
|
ConfigureUrl: "github.com/some_org/some_repo",
|
||||||
|
EphemeralRunnerSetNamespace: "namespace",
|
||||||
|
EphemeralRunnerSetName: "deployment",
|
||||||
|
RunnerScaleSetId: 1,
|
||||||
|
}
|
||||||
|
err := config.validate()
|
||||||
|
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
||||||
|
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigValidationAppKey(t *testing.T) {
|
||||||
|
config := &Config{
|
||||||
|
AppID: 1,
|
||||||
|
AppInstallationID: 10,
|
||||||
|
ConfigureUrl: "github.com/some_org/some_repo",
|
||||||
|
EphemeralRunnerSetNamespace: "namespace",
|
||||||
|
EphemeralRunnerSetName: "deployment",
|
||||||
|
RunnerScaleSetId: 1,
|
||||||
|
}
|
||||||
|
err := config.validate()
|
||||||
|
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
||||||
|
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
|
||||||
|
config := &Config{
|
||||||
|
AppID: 1,
|
||||||
|
AppInstallationID: 10,
|
||||||
|
AppPrivateKey: "asdf",
|
||||||
|
Token: "asdf",
|
||||||
|
ConfigureUrl: "github.com/some_org/some_repo",
|
||||||
|
EphemeralRunnerSetNamespace: "namespace",
|
||||||
|
EphemeralRunnerSetName: "deployment",
|
||||||
|
RunnerScaleSetId: 1,
|
||||||
|
}
|
||||||
|
err := config.validate()
|
||||||
|
expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
||||||
|
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigValidation(t *testing.T) {
|
||||||
|
config := &Config{
|
||||||
|
ConfigureUrl: "https://github.com/actions",
|
||||||
|
EphemeralRunnerSetNamespace: "namespace",
|
||||||
|
EphemeralRunnerSetName: "deployment",
|
||||||
|
RunnerScaleSetId: 1,
|
||||||
|
MinRunners: 1,
|
||||||
|
MaxRunners: 5,
|
||||||
|
Token: "asdf",
|
||||||
|
}
|
||||||
|
|
||||||
|
err := config.validate()
|
||||||
|
|
||||||
|
assert.NoError(t, err, "Expected no error")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigValidationConfigUrl(t *testing.T) {
|
||||||
|
config := &Config{
|
||||||
|
EphemeralRunnerSetNamespace: "namespace",
|
||||||
|
EphemeralRunnerSetName: "deployment",
|
||||||
|
RunnerScaleSetId: 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := config.validate()
|
||||||
|
|
||||||
|
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
|
||||||
|
}
|
||||||
@@ -28,39 +28,26 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/build"
|
"github.com/actions/actions-runner-controller/build"
|
||||||
|
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/actions/actions-runner-controller/logging"
|
"github.com/actions/actions-runner-controller/logging"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/kelseyhightower/envconfig"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
"golang.org/x/net/http/httpproxy"
|
"golang.org/x/net/http/httpproxy"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
type RunnerScaleSetListenerConfig struct {
|
|
||||||
ConfigureUrl string `split_words:"true"`
|
|
||||||
AppID int64 `split_words:"true"`
|
|
||||||
AppInstallationID int64 `split_words:"true"`
|
|
||||||
AppPrivateKey string `split_words:"true"`
|
|
||||||
Token string `split_words:"true"`
|
|
||||||
EphemeralRunnerSetNamespace string `split_words:"true"`
|
|
||||||
EphemeralRunnerSetName string `split_words:"true"`
|
|
||||||
MaxRunners int `split_words:"true"`
|
|
||||||
MinRunners int `split_words:"true"`
|
|
||||||
RunnerScaleSetId int `split_words:"true"`
|
|
||||||
RunnerScaleSetName string `split_words:"true"`
|
|
||||||
ServerRootCA string `split_words:"true"`
|
|
||||||
LogLevel string `split_words:"true"`
|
|
||||||
LogFormat string `split_words:"true"`
|
|
||||||
MetricsAddr string `split_words:"true"`
|
|
||||||
MetricsEndpoint string `split_words:"true"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var rc RunnerScaleSetListenerConfig
|
configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH")
|
||||||
if err := envconfig.Process("github", &rc); err != nil {
|
if !ok {
|
||||||
fmt.Fprintf(os.Stderr, "Error: processing environment variables for RunnerScaleSetListenerConfig: %v\n", err)
|
fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
rc, err := config.Read(configPath)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: reading config from path(%q): %v\n", configPath, err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -80,12 +67,6 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate all inputs
|
|
||||||
if err := validateConfig(&rc); err != nil {
|
|
||||||
logger.Error(err, "Inputs validation failed")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||||
defer stop()
|
defer stop()
|
||||||
|
|
||||||
@@ -123,7 +104,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type metricsServer struct {
|
type metricsServer struct {
|
||||||
rc RunnerScaleSetListenerConfig
|
rc config.Config
|
||||||
logger logr.Logger
|
logger logr.Logger
|
||||||
srv *http.Server
|
srv *http.Server
|
||||||
}
|
}
|
||||||
@@ -173,7 +154,7 @@ type runOptions struct {
|
|||||||
serviceOptions []func(*Service)
|
serviceOptions []func(*Service)
|
||||||
}
|
}
|
||||||
|
|
||||||
func run(ctx context.Context, rc RunnerScaleSetListenerConfig, logger logr.Logger, opts runOptions) error {
|
func run(ctx context.Context, rc config.Config, logger logr.Logger, opts runOptions) error {
|
||||||
// Create root context and hook with sigint and sigterm
|
// Create root context and hook with sigint and sigterm
|
||||||
creds := &actions.ActionsAuth{}
|
creds := &actions.ActionsAuth{}
|
||||||
if rc.Token != "" {
|
if rc.Token != "" {
|
||||||
@@ -195,6 +176,8 @@ func run(ctx context.Context, rc RunnerScaleSetListenerConfig, logger logr.Logge
|
|||||||
Version: build.Version,
|
Version: build.Version,
|
||||||
CommitSHA: build.CommitSHA,
|
CommitSHA: build.CommitSHA,
|
||||||
ScaleSetID: rc.RunnerScaleSetId,
|
ScaleSetID: rc.RunnerScaleSetId,
|
||||||
|
HasProxy: hasProxy(),
|
||||||
|
Subsystem: "githubrunnerscalesetlistener",
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create an Actions Service client: %w", err)
|
return fmt.Errorf("failed to create an Actions Service client: %w", err)
|
||||||
@@ -232,38 +215,7 @@ func run(ctx context.Context, rc RunnerScaleSetListenerConfig, logger logr.Logge
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateConfig(config *RunnerScaleSetListenerConfig) error {
|
func newActionsClientFromConfig(config config.Config, creds *actions.ActionsAuth, options ...actions.ClientOption) (*actions.Client, error) {
|
||||||
if len(config.ConfigureUrl) == 0 {
|
|
||||||
return fmt.Errorf("GitHubConfigUrl is not provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(config.EphemeralRunnerSetNamespace) == 0 || len(config.EphemeralRunnerSetName) == 0 {
|
|
||||||
return fmt.Errorf("EphemeralRunnerSetNamespace '%s' or EphemeralRunnerSetName '%s' is missing", config.EphemeralRunnerSetNamespace, config.EphemeralRunnerSetName)
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.RunnerScaleSetId == 0 {
|
|
||||||
return fmt.Errorf("RunnerScaleSetId '%d' is missing", config.RunnerScaleSetId)
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.MaxRunners < config.MinRunners {
|
|
||||||
return fmt.Errorf("MinRunners '%d' cannot be greater than MaxRunners '%d'", config.MinRunners, config.MaxRunners)
|
|
||||||
}
|
|
||||||
|
|
||||||
hasToken := len(config.Token) > 0
|
|
||||||
hasPrivateKeyConfig := config.AppID > 0 && config.AppPrivateKey != ""
|
|
||||||
|
|
||||||
if !hasToken && !hasPrivateKeyConfig {
|
|
||||||
return fmt.Errorf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasToken && hasPrivateKeyConfig {
|
|
||||||
return fmt.Errorf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newActionsClientFromConfig(config RunnerScaleSetListenerConfig, creds *actions.ActionsAuth, options ...actions.ClientOption) (*actions.Client, error) {
|
|
||||||
if config.ServerRootCA != "" {
|
if config.ServerRootCA != "" {
|
||||||
systemPool, err := x509.SystemCertPool()
|
systemPool, err := x509.SystemCertPool()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -285,3 +237,8 @@ func newActionsClientFromConfig(config RunnerScaleSetListenerConfig, creds *acti
|
|||||||
|
|
||||||
return actions.NewClient(config.ConfigureUrl, creds, options...)
|
return actions.NewClient(config.ConfigureUrl, creds, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func hasProxy() bool {
|
||||||
|
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
|
||||||
|
return proxyFunc != nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"os"
|
"os"
|
||||||
@@ -13,94 +12,11 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/actions/actions-runner-controller/github/actions/testserver"
|
"github.com/actions/actions-runner-controller/github/actions/testserver"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConfigValidationMinMax(t *testing.T) {
|
|
||||||
config := &RunnerScaleSetListenerConfig{
|
|
||||||
ConfigureUrl: "github.com/some_org/some_repo",
|
|
||||||
EphemeralRunnerSetNamespace: "namespace",
|
|
||||||
EphemeralRunnerSetName: "deployment",
|
|
||||||
RunnerScaleSetId: 1,
|
|
||||||
MinRunners: 5,
|
|
||||||
MaxRunners: 2,
|
|
||||||
Token: "token",
|
|
||||||
}
|
|
||||||
err := validateConfig(config)
|
|
||||||
assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidationMissingToken(t *testing.T) {
|
|
||||||
config := &RunnerScaleSetListenerConfig{
|
|
||||||
ConfigureUrl: "github.com/some_org/some_repo",
|
|
||||||
EphemeralRunnerSetNamespace: "namespace",
|
|
||||||
EphemeralRunnerSetName: "deployment",
|
|
||||||
RunnerScaleSetId: 1,
|
|
||||||
}
|
|
||||||
err := validateConfig(config)
|
|
||||||
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
|
||||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidationAppKey(t *testing.T) {
|
|
||||||
config := &RunnerScaleSetListenerConfig{
|
|
||||||
AppID: 1,
|
|
||||||
AppInstallationID: 10,
|
|
||||||
ConfigureUrl: "github.com/some_org/some_repo",
|
|
||||||
EphemeralRunnerSetNamespace: "namespace",
|
|
||||||
EphemeralRunnerSetName: "deployment",
|
|
||||||
RunnerScaleSetId: 1,
|
|
||||||
}
|
|
||||||
err := validateConfig(config)
|
|
||||||
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
|
||||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
|
|
||||||
config := &RunnerScaleSetListenerConfig{
|
|
||||||
AppID: 1,
|
|
||||||
AppInstallationID: 10,
|
|
||||||
AppPrivateKey: "asdf",
|
|
||||||
Token: "asdf",
|
|
||||||
ConfigureUrl: "github.com/some_org/some_repo",
|
|
||||||
EphemeralRunnerSetNamespace: "namespace",
|
|
||||||
EphemeralRunnerSetName: "deployment",
|
|
||||||
RunnerScaleSetId: 1,
|
|
||||||
}
|
|
||||||
err := validateConfig(config)
|
|
||||||
expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
|
||||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidation(t *testing.T) {
|
|
||||||
config := &RunnerScaleSetListenerConfig{
|
|
||||||
ConfigureUrl: "https://github.com/actions",
|
|
||||||
EphemeralRunnerSetNamespace: "namespace",
|
|
||||||
EphemeralRunnerSetName: "deployment",
|
|
||||||
RunnerScaleSetId: 1,
|
|
||||||
MinRunners: 1,
|
|
||||||
MaxRunners: 5,
|
|
||||||
Token: "asdf",
|
|
||||||
}
|
|
||||||
|
|
||||||
err := validateConfig(config)
|
|
||||||
|
|
||||||
assert.NoError(t, err, "Expected no error")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidationConfigUrl(t *testing.T) {
|
|
||||||
config := &RunnerScaleSetListenerConfig{
|
|
||||||
EphemeralRunnerSetNamespace: "namespace",
|
|
||||||
EphemeralRunnerSetName: "deployment",
|
|
||||||
RunnerScaleSetId: 1,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := validateConfig(config)
|
|
||||||
|
|
||||||
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCustomerServerRootCA(t *testing.T) {
|
func TestCustomerServerRootCA(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
certsFolder := filepath.Join(
|
certsFolder := filepath.Join(
|
||||||
@@ -134,7 +50,7 @@ func TestCustomerServerRootCA(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
certsString = certsString + string(intermediate)
|
certsString = certsString + string(intermediate)
|
||||||
|
|
||||||
config := RunnerScaleSetListenerConfig{
|
config := config.Config{
|
||||||
ConfigureUrl: server.ConfigURLForOrg("myorg"),
|
ConfigureUrl: server.ConfigURLForOrg("myorg"),
|
||||||
ServerRootCA: certsString,
|
ServerRootCA: certsString,
|
||||||
}
|
}
|
||||||
@@ -164,7 +80,7 @@ func TestProxySettings(t *testing.T) {
|
|||||||
os.Setenv("http_proxy", proxy.URL)
|
os.Setenv("http_proxy", proxy.URL)
|
||||||
defer os.Setenv("http_proxy", prevProxy)
|
defer os.Setenv("http_proxy", prevProxy)
|
||||||
|
|
||||||
config := RunnerScaleSetListenerConfig{
|
config := config.Config{
|
||||||
ConfigureUrl: "https://github.com/org/repo",
|
ConfigureUrl: "https://github.com/org/repo",
|
||||||
}
|
}
|
||||||
creds := &actions.ActionsAuth{
|
creds := &actions.ActionsAuth{
|
||||||
@@ -196,7 +112,7 @@ func TestProxySettings(t *testing.T) {
|
|||||||
os.Setenv("https_proxy", proxy.URL)
|
os.Setenv("https_proxy", proxy.URL)
|
||||||
defer os.Setenv("https_proxy", prevProxy)
|
defer os.Setenv("https_proxy", prevProxy)
|
||||||
|
|
||||||
config := RunnerScaleSetListenerConfig{
|
config := config.Config{
|
||||||
ConfigureUrl: "https://github.com/org/repo",
|
ConfigureUrl: "https://github.com/org/repo",
|
||||||
}
|
}
|
||||||
creds := &actions.ActionsAuth{
|
creds := &actions.ActionsAuth{
|
||||||
@@ -233,7 +149,7 @@ func TestProxySettings(t *testing.T) {
|
|||||||
os.Setenv("no_proxy", "example.com")
|
os.Setenv("no_proxy", "example.com")
|
||||||
defer os.Setenv("no_proxy", prevNoProxy)
|
defer os.Setenv("no_proxy", prevNoProxy)
|
||||||
|
|
||||||
config := RunnerScaleSetListenerConfig{
|
config := config.Config{
|
||||||
ConfigureUrl: "https://github.com/org/repo",
|
ConfigureUrl: "https://github.com/org/repo",
|
||||||
}
|
}
|
||||||
creds := &actions.ActionsAuth{
|
creds := &actions.ActionsAuth{
|
||||||
|
|||||||
@@ -8,6 +8,6 @@ import (
|
|||||||
|
|
||||||
//go:generate mockery --inpackage --name=RunnerScaleSetClient
|
//go:generate mockery --inpackage --name=RunnerScaleSetClient
|
||||||
type RunnerScaleSetClient interface {
|
type RunnerScaleSetClient interface {
|
||||||
GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error) error
|
GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error, maxCapacity int) error
|
||||||
AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error
|
AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
@@ -18,8 +16,6 @@ const (
|
|||||||
labelKeyJobWorkflowRef = "job_workflow_ref"
|
labelKeyJobWorkflowRef = "job_workflow_ref"
|
||||||
labelKeyEventName = "event_name"
|
labelKeyEventName = "event_name"
|
||||||
labelKeyJobResult = "job_result"
|
labelKeyJobResult = "job_result"
|
||||||
labelKeyRunnerID = "runner_id"
|
|
||||||
labelKeyRunnerName = "runner_name"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const githubScaleSetSubsystem = "gha"
|
const githubScaleSetSubsystem = "gha"
|
||||||
@@ -43,10 +39,15 @@ var (
|
|||||||
labelKeyEventName,
|
labelKeyEventName,
|
||||||
}
|
}
|
||||||
|
|
||||||
completedJobsTotalLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
|
completedJobsTotalLabels = append(jobLabels, labelKeyJobResult)
|
||||||
jobExecutionDurationLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
|
jobExecutionDurationLabels = append(jobLabels, labelKeyJobResult)
|
||||||
startedJobsTotalLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
|
startedJobsTotalLabels = jobLabels
|
||||||
jobStartupDurationLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
|
jobStartupDurationLabels = []string{
|
||||||
|
labelKeyRepository,
|
||||||
|
labelKeyOrganization,
|
||||||
|
labelKeyEnterprise,
|
||||||
|
labelKeyEventName,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// metrics
|
// metrics
|
||||||
@@ -274,23 +275,34 @@ func (b *baseLabels) scaleSetLabels() prometheus.Labels {
|
|||||||
|
|
||||||
func (b *baseLabels) completedJobLabels(msg *actions.JobCompleted) prometheus.Labels {
|
func (b *baseLabels) completedJobLabels(msg *actions.JobCompleted) prometheus.Labels {
|
||||||
l := b.jobLabels(&msg.JobMessageBase)
|
l := b.jobLabels(&msg.JobMessageBase)
|
||||||
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
|
|
||||||
l[labelKeyJobResult] = msg.Result
|
l[labelKeyJobResult] = msg.Result
|
||||||
l[labelKeyRunnerName] = msg.RunnerName
|
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *baseLabels) startedJobLabels(msg *actions.JobStarted) prometheus.Labels {
|
func (b *baseLabels) startedJobLabels(msg *actions.JobStarted) prometheus.Labels {
|
||||||
l := b.jobLabels(&msg.JobMessageBase)
|
l := b.jobLabels(&msg.JobMessageBase)
|
||||||
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
|
|
||||||
l[labelKeyRunnerName] = msg.RunnerName
|
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *baseLabels) jobStartupDurationLabels(msg *actions.JobStarted) prometheus.Labels {
|
||||||
|
return prometheus.Labels{
|
||||||
|
labelKeyEnterprise: b.enterprise,
|
||||||
|
labelKeyOrganization: b.organization,
|
||||||
|
labelKeyRepository: b.repository,
|
||||||
|
labelKeyEventName: msg.EventName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (m *metricsExporter) withBaseLabels(base baseLabels) {
|
func (m *metricsExporter) withBaseLabels(base baseLabels) {
|
||||||
m.baseLabels = base
|
m.baseLabels = base
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *metricsExporter) publishStatic(max, min int) {
|
||||||
|
l := m.scaleSetLabels()
|
||||||
|
maxRunners.With(l).Set(float64(max))
|
||||||
|
minRunners.With(l).Set(float64(min))
|
||||||
|
}
|
||||||
|
|
||||||
func (m *metricsExporter) publishStatistics(stats *actions.RunnerScaleSetStatistic) {
|
func (m *metricsExporter) publishStatistics(stats *actions.RunnerScaleSetStatistic) {
|
||||||
l := m.scaleSetLabels()
|
l := m.scaleSetLabels()
|
||||||
|
|
||||||
@@ -307,6 +319,7 @@ func (m *metricsExporter) publishJobStarted(msg *actions.JobStarted) {
|
|||||||
l := m.startedJobLabels(msg)
|
l := m.startedJobLabels(msg)
|
||||||
startedJobsTotal.With(l).Inc()
|
startedJobsTotal.With(l).Inc()
|
||||||
|
|
||||||
|
l = m.jobStartupDurationLabels(msg)
|
||||||
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
|
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
|
||||||
jobStartupDurationSeconds.With(l).Observe(float64(startupDuration))
|
jobStartupDurationSeconds.With(l).Observe(float64(startupDuration))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Code generated by mockery v2.33.2. DO NOT EDIT.
|
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Code generated by mockery v2.33.2. DO NOT EDIT.
|
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
@@ -29,13 +29,13 @@ func (_m *MockRunnerScaleSetClient) AcquireJobsForRunnerScaleSet(ctx context.Con
|
|||||||
return r0
|
return r0
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRunnerScaleSetMessage provides a mock function with given fields: ctx, handler
|
// GetRunnerScaleSetMessage provides a mock function with given fields: ctx, handler, maxCapacity
|
||||||
func (_m *MockRunnerScaleSetClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(*actions.RunnerScaleSetMessage) error) error {
|
func (_m *MockRunnerScaleSetClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(*actions.RunnerScaleSetMessage) error, maxCapacity int) error {
|
||||||
ret := _m.Called(ctx, handler)
|
ret := _m.Called(ctx, handler, maxCapacity)
|
||||||
|
|
||||||
var r0 error
|
var r0 error
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, func(*actions.RunnerScaleSetMessage) error) error); ok {
|
if rf, ok := ret.Get(0).(func(context.Context, func(*actions.RunnerScaleSetMessage) error, int) error); ok {
|
||||||
r0 = rf(ctx, handler)
|
r0 = rf(ctx, handler, maxCapacity)
|
||||||
} else {
|
} else {
|
||||||
r0 = ret.Error(0)
|
r0 = ret.Error(0)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,8 +24,12 @@ func newSessionClient(client actions.ActionsService, logger *logr.Logger, sessio
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId int64) (*actions.RunnerScaleSetMessage, error) {
|
func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
|
||||||
message, err := m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId)
|
if maxCapacity < 0 {
|
||||||
|
return nil, fmt.Errorf("maxCapacity must be greater than or equal to 0")
|
||||||
|
}
|
||||||
|
|
||||||
|
message, err := m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId, maxCapacity)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return message, nil
|
return message, nil
|
||||||
}
|
}
|
||||||
@@ -42,7 +46,7 @@ func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId
|
|||||||
}
|
}
|
||||||
|
|
||||||
m.session = session
|
m.session = session
|
||||||
message, err = m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId)
|
message, err = m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId, maxCapacity)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("delete message failed after refresh message session. %w", err)
|
return nil, fmt.Errorf("delete message failed after refresh message session. %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -31,17 +31,17 @@ func TestGetMessage(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, nil).Once()
|
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, nil).Once()
|
||||||
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(&actions.RunnerScaleSetMessage{MessageId: 1}, nil).Once()
|
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(&actions.RunnerScaleSetMessage{MessageId: 1}, nil).Once()
|
||||||
|
|
||||||
client := newSessionClient(mockActionsClient, &logger, session)
|
client := newSessionClient(mockActionsClient, &logger, session)
|
||||||
|
|
||||||
msg, err := client.GetMessage(ctx, 0)
|
msg, err := client.GetMessage(ctx, 0, 10)
|
||||||
require.NoError(t, err, "GetMessage should not return an error")
|
require.NoError(t, err, "GetMessage should not return an error")
|
||||||
|
|
||||||
assert.Nil(t, msg, "GetMessage should return nil message")
|
assert.Nil(t, msg, "GetMessage should return nil message")
|
||||||
|
|
||||||
msg, err = client.GetMessage(ctx, 0)
|
msg, err = client.GetMessage(ctx, 0, 10)
|
||||||
require.NoError(t, err, "GetMessage should not return an error")
|
require.NoError(t, err, "GetMessage should not return an error")
|
||||||
|
|
||||||
assert.Equal(t, int64(1), msg.MessageId, "GetMessage should return a message with id 1")
|
assert.Equal(t, int64(1), msg.MessageId, "GetMessage should return a message with id 1")
|
||||||
@@ -146,11 +146,11 @@ func TestGetMessage_Error(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, fmt.Errorf("error")).Once()
|
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, fmt.Errorf("error")).Once()
|
||||||
|
|
||||||
client := newSessionClient(mockActionsClient, &logger, session)
|
client := newSessionClient(mockActionsClient, &logger, session)
|
||||||
|
|
||||||
msg, err := client.GetMessage(ctx, 0)
|
msg, err := client.GetMessage(ctx, 0, 10)
|
||||||
assert.ErrorContains(t, err, "get message failed. error", "GetMessage should return an error")
|
assert.ErrorContains(t, err, "get message failed. error", "GetMessage should return an error")
|
||||||
assert.Nil(t, msg, "GetMessage should return nil message")
|
assert.Nil(t, msg, "GetMessage should return nil message")
|
||||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made")
|
assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made")
|
||||||
@@ -227,8 +227,8 @@ func TestGetMessage_RefreshToken(t *testing.T) {
|
|||||||
Id: 1,
|
Id: 1,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
|
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
|
||||||
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, "token2", int64(0)).Return(&actions.RunnerScaleSetMessage{
|
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, "token2", int64(0), 10).Return(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
MessageType: "test",
|
MessageType: "test",
|
||||||
Body: "test",
|
Body: "test",
|
||||||
@@ -243,7 +243,7 @@ func TestGetMessage_RefreshToken(t *testing.T) {
|
|||||||
}, nil).Once()
|
}, nil).Once()
|
||||||
|
|
||||||
client := newSessionClient(mockActionsClient, &logger, session)
|
client := newSessionClient(mockActionsClient, &logger, session)
|
||||||
msg, err := client.GetMessage(ctx, 0)
|
msg, err := client.GetMessage(ctx, 0, 10)
|
||||||
assert.NoError(t, err, "Error getting message")
|
assert.NoError(t, err, "Error getting message")
|
||||||
assert.Equal(t, int64(1), msg.MessageId, "message id should be updated")
|
assert.Equal(t, int64(1), msg.MessageId, "message id should be updated")
|
||||||
assert.Equal(t, "token2", client.session.MessageQueueAccessToken, "Message queue access token should be updated")
|
assert.Equal(t, "token2", client.session.MessageQueueAccessToken, "Message queue access token should be updated")
|
||||||
@@ -340,11 +340,11 @@ func TestGetMessage_RefreshToken_Failed(t *testing.T) {
|
|||||||
Id: 1,
|
Id: 1,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
|
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
|
||||||
mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(nil, fmt.Errorf("error"))
|
mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(nil, fmt.Errorf("error"))
|
||||||
|
|
||||||
client := newSessionClient(mockActionsClient, &logger, session)
|
client := newSessionClient(mockActionsClient, &logger, session)
|
||||||
msg, err := client.GetMessage(ctx, 0)
|
msg, err := client.GetMessage(ctx, 0, 10)
|
||||||
assert.ErrorContains(t, err, "refresh message session failed. error", "Error should be returned")
|
assert.ErrorContains(t, err, "refresh message session failed. error", "Error should be returned")
|
||||||
assert.Nil(t, msg, "Message should be nil")
|
assert.Nil(t, msg, "Message should be nil")
|
||||||
assert.Equal(t, "token", client.session.MessageQueueAccessToken, "Message queue access token should not be updated")
|
assert.Equal(t, "token", client.session.MessageQueueAccessToken, "Message queue access token should not be updated")
|
||||||
|
|||||||
@@ -39,6 +39,9 @@ import (
|
|||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||||
|
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||||
// +kubebuilder:scaffold:imports
|
// +kubebuilder:scaffold:imports
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -149,11 +152,19 @@ func main() {
|
|||||||
|
|
||||||
syncPeriod := 10 * time.Minute
|
syncPeriod := 10 * time.Minute
|
||||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
SyncPeriod: &syncPeriod,
|
Cache: cache.Options{
|
||||||
Namespace: watchNamespace,
|
SyncPeriod: &syncPeriod,
|
||||||
MetricsBindAddress: metricsAddr,
|
DefaultNamespaces: map[string]cache.Config{
|
||||||
Port: 9443,
|
watchNamespace: {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Metrics: metricsserver.Options{
|
||||||
|
BindAddress: metricsAddr,
|
||||||
|
},
|
||||||
|
WebhookServer: webhook.NewServer(webhook.Options{
|
||||||
|
Port: 9443,
|
||||||
|
}),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(err, "unable to start manager")
|
logger.Error(err, "unable to start manager")
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,9 @@
|
|||||||
|
---
|
||||||
apiVersion: apiextensions.k8s.io/v1
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
controller-gen.kubebuilder.io/version: v0.7.0
|
controller-gen.kubebuilder.io/version: v0.14.0
|
||||||
creationTimestamp: null
|
|
||||||
name: horizontalrunnerautoscalers.actions.summerwind.dev
|
name: horizontalrunnerautoscalers.actions.summerwind.dev
|
||||||
spec:
|
spec:
|
||||||
group: actions.summerwind.dev
|
group: actions.summerwind.dev
|
||||||
@@ -35,10 +35,19 @@ spec:
|
|||||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
|
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
|
||||||
properties:
|
properties:
|
||||||
apiVersion:
|
apiVersion:
|
||||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
description: |-
|
||||||
|
APIVersion defines the versioned schema of this representation of an object.
|
||||||
|
Servers should convert recognized schemas to the latest internal value, and
|
||||||
|
may reject unrecognized values.
|
||||||
|
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||||
type: string
|
type: string
|
||||||
kind:
|
kind:
|
||||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
description: |-
|
||||||
|
Kind is a string value representing the REST resource this object represents.
|
||||||
|
Servers may infer this from the endpoint the client submits requests to.
|
||||||
|
Cannot be updated.
|
||||||
|
In CamelCase.
|
||||||
|
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||||
type: string
|
type: string
|
||||||
metadata:
|
metadata:
|
||||||
type: object
|
type: object
|
||||||
@@ -47,7 +56,9 @@ spec:
|
|||||||
properties:
|
properties:
|
||||||
capacityReservations:
|
capacityReservations:
|
||||||
items:
|
items:
|
||||||
description: CapacityReservation specifies the number of replicas temporarily added to the scale target until ExpirationTime.
|
description: |-
|
||||||
|
CapacityReservation specifies the number of replicas temporarily added
|
||||||
|
to the scale target until ExpirationTime.
|
||||||
properties:
|
properties:
|
||||||
effectiveTime:
|
effectiveTime:
|
||||||
format: date-time
|
format: date-time
|
||||||
@@ -79,30 +90,46 @@ spec:
|
|||||||
items:
|
items:
|
||||||
properties:
|
properties:
|
||||||
repositoryNames:
|
repositoryNames:
|
||||||
description: RepositoryNames is the list of repository names to be used for calculating the metric. For example, a repository name is the REPO part of `github.com/USER/REPO`.
|
description: |-
|
||||||
|
RepositoryNames is the list of repository names to be used for calculating the metric.
|
||||||
|
For example, a repository name is the REPO part of `github.com/USER/REPO`.
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
scaleDownAdjustment:
|
scaleDownAdjustment:
|
||||||
description: ScaleDownAdjustment is the number of runners removed on scale-down. You can only specify either ScaleDownFactor or ScaleDownAdjustment.
|
description: |-
|
||||||
|
ScaleDownAdjustment is the number of runners removed on scale-down.
|
||||||
|
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
|
||||||
type: integer
|
type: integer
|
||||||
scaleDownFactor:
|
scaleDownFactor:
|
||||||
description: ScaleDownFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be removed.
|
description: |-
|
||||||
|
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
|
||||||
|
to determine how many pods should be removed.
|
||||||
type: string
|
type: string
|
||||||
scaleDownThreshold:
|
scaleDownThreshold:
|
||||||
description: ScaleDownThreshold is the percentage of busy runners less than which will trigger the hpa to scale the runners down.
|
description: |-
|
||||||
|
ScaleDownThreshold is the percentage of busy runners less than which will
|
||||||
|
trigger the hpa to scale the runners down.
|
||||||
type: string
|
type: string
|
||||||
scaleUpAdjustment:
|
scaleUpAdjustment:
|
||||||
description: ScaleUpAdjustment is the number of runners added on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
description: |-
|
||||||
|
ScaleUpAdjustment is the number of runners added on scale-up.
|
||||||
|
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
||||||
type: integer
|
type: integer
|
||||||
scaleUpFactor:
|
scaleUpFactor:
|
||||||
description: ScaleUpFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be added.
|
description: |-
|
||||||
|
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
|
||||||
|
to determine how many pods should be added.
|
||||||
type: string
|
type: string
|
||||||
scaleUpThreshold:
|
scaleUpThreshold:
|
||||||
description: ScaleUpThreshold is the percentage of busy runners greater than which will trigger the hpa to scale runners up.
|
description: |-
|
||||||
|
ScaleUpThreshold is the percentage of busy runners greater than which will
|
||||||
|
trigger the hpa to scale runners up.
|
||||||
type: string
|
type: string
|
||||||
type:
|
type:
|
||||||
description: Type is the type of metric to be used for autoscaling. It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
|
description: |-
|
||||||
|
Type is the type of metric to be used for autoscaling.
|
||||||
|
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
@@ -110,7 +137,9 @@ spec:
|
|||||||
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
|
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
|
||||||
type: integer
|
type: integer
|
||||||
scaleDownDelaySecondsAfterScaleOut:
|
scaleDownDelaySecondsAfterScaleOut:
|
||||||
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop)
|
description: |-
|
||||||
|
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
|
||||||
|
Used to prevent flapping (down->up->down->... loop)
|
||||||
type: integer
|
type: integer
|
||||||
scaleTargetRef:
|
scaleTargetRef:
|
||||||
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
||||||
@@ -126,7 +155,18 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
scaleUpTriggers:
|
scaleUpTriggers:
|
||||||
description: "ScaleUpTriggers is an experimental feature to increase the desired replicas by 1 on each webhook requested received by the webhookBasedAutoscaler. \n This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster. \n Note that the added runners remain until the next sync period at least, and they may or may not be used by GitHub Actions depending on the timing. They are intended to be used to gain \"resource slack\" immediately after you receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available."
|
description: |-
|
||||||
|
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
|
||||||
|
on each webhook requested received by the webhookBasedAutoscaler.
|
||||||
|
|
||||||
|
|
||||||
|
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
|
||||||
|
|
||||||
|
|
||||||
|
Note that the added runners remain until the next sync period at least,
|
||||||
|
and they may or may not be used by GitHub Actions depending on the timing.
|
||||||
|
They are intended to be used to gain "resource slack" immediately after you
|
||||||
|
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
|
||||||
items:
|
items:
|
||||||
properties:
|
properties:
|
||||||
amount:
|
amount:
|
||||||
@@ -139,12 +179,18 @@ spec:
|
|||||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||||
properties:
|
properties:
|
||||||
names:
|
names:
|
||||||
description: Names is a list of GitHub Actions glob patterns. Any check_run event whose name matches one of patterns in the list can trigger autoscaling. Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file. So it is very likely that you can utilize this to trigger depending on the job.
|
description: |-
|
||||||
|
Names is a list of GitHub Actions glob patterns.
|
||||||
|
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
|
||||||
|
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
|
||||||
|
So it is very likely that you can utilize this to trigger depending on the job.
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
repositories:
|
repositories:
|
||||||
description: Repositories is a list of GitHub repositories. Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
|
description: |-
|
||||||
|
Repositories is a list of GitHub repositories.
|
||||||
|
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
@@ -169,7 +215,9 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
type: object
|
type: object
|
||||||
push:
|
push:
|
||||||
description: PushSpec is the condition for triggering scale-up on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
description: |-
|
||||||
|
PushSpec is the condition for triggering scale-up on push event
|
||||||
|
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||||
type: object
|
type: object
|
||||||
workflowJob:
|
workflowJob:
|
||||||
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
|
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
|
||||||
@@ -178,23 +226,33 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
scheduledOverrides:
|
scheduledOverrides:
|
||||||
description: ScheduledOverrides is the list of ScheduledOverride. It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. The earlier a scheduled override is, the higher it is prioritized.
|
description: |-
|
||||||
|
ScheduledOverrides is the list of ScheduledOverride.
|
||||||
|
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||||
|
The earlier a scheduled override is, the higher it is prioritized.
|
||||||
items:
|
items:
|
||||||
description: ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
|
description: |-
|
||||||
|
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||||
|
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
|
||||||
properties:
|
properties:
|
||||||
endTime:
|
endTime:
|
||||||
description: EndTime is the time at which the first override ends.
|
description: EndTime is the time at which the first override ends.
|
||||||
format: date-time
|
format: date-time
|
||||||
type: string
|
type: string
|
||||||
minReplicas:
|
minReplicas:
|
||||||
description: MinReplicas is the number of runners while overriding. If omitted, it doesn't override minReplicas.
|
description: |-
|
||||||
|
MinReplicas is the number of runners while overriding.
|
||||||
|
If omitted, it doesn't override minReplicas.
|
||||||
minimum: 0
|
minimum: 0
|
||||||
nullable: true
|
nullable: true
|
||||||
type: integer
|
type: integer
|
||||||
recurrenceRule:
|
recurrenceRule:
|
||||||
properties:
|
properties:
|
||||||
frequency:
|
frequency:
|
||||||
description: Frequency is the name of a predefined interval of each recurrence. The valid values are "Daily", "Weekly", "Monthly", and "Yearly". If empty, the corresponding override happens only once.
|
description: |-
|
||||||
|
Frequency is the name of a predefined interval of each recurrence.
|
||||||
|
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
|
||||||
|
If empty, the corresponding override happens only once.
|
||||||
enum:
|
enum:
|
||||||
- Daily
|
- Daily
|
||||||
- Weekly
|
- Weekly
|
||||||
@@ -202,7 +260,9 @@ spec:
|
|||||||
- Yearly
|
- Yearly
|
||||||
type: string
|
type: string
|
||||||
untilTime:
|
untilTime:
|
||||||
description: UntilTime is the time of the final recurrence. If empty, the schedule recurs forever.
|
description: |-
|
||||||
|
UntilTime is the time of the final recurrence.
|
||||||
|
If empty, the schedule recurs forever.
|
||||||
format: date-time
|
format: date-time
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
@@ -231,18 +291,24 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
desiredReplicas:
|
desiredReplicas:
|
||||||
description: DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
description: |-
|
||||||
|
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
|
||||||
|
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
||||||
type: integer
|
type: integer
|
||||||
lastSuccessfulScaleOutTime:
|
lastSuccessfulScaleOutTime:
|
||||||
format: date-time
|
format: date-time
|
||||||
nullable: true
|
nullable: true
|
||||||
type: string
|
type: string
|
||||||
observedGeneration:
|
observedGeneration:
|
||||||
description: ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g. RunnerDeployment's generation, which is updated on mutation by the API Server.
|
description: |-
|
||||||
|
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
|
||||||
|
RunnerDeployment's generation, which is updated on mutation by the API Server.
|
||||||
format: int64
|
format: int64
|
||||||
type: integer
|
type: integer
|
||||||
scheduledOverridesSummary:
|
scheduledOverridesSummary:
|
||||||
description: ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output for observability.
|
description: |-
|
||||||
|
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
|
||||||
|
for observability.
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
@@ -251,9 +317,3 @@ spec:
|
|||||||
subresources:
|
subresources:
|
||||||
status: {}
|
status: {}
|
||||||
preserveUnknownFields: false
|
preserveUnknownFields: false
|
||||||
status:
|
|
||||||
acceptedNames:
|
|
||||||
kind: ""
|
|
||||||
plural: ""
|
|
||||||
conditions: []
|
|
||||||
storedVersions: []
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,7 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
metadata:
|
metadata:
|
||||||
creationTimestamp: null
|
|
||||||
name: manager-role
|
name: manager-role
|
||||||
rules:
|
rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
|
|||||||
@@ -1,9 +1,7 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
apiVersion: admissionregistration.k8s.io/v1
|
apiVersion: admissionregistration.k8s.io/v1
|
||||||
kind: MutatingWebhookConfiguration
|
kind: MutatingWebhookConfiguration
|
||||||
metadata:
|
metadata:
|
||||||
creationTimestamp: null
|
|
||||||
name: mutating-webhook-configuration
|
name: mutating-webhook-configuration
|
||||||
webhooks:
|
webhooks:
|
||||||
- admissionReviewVersions:
|
- admissionReviewVersions:
|
||||||
@@ -85,12 +83,10 @@ webhooks:
|
|||||||
resources:
|
resources:
|
||||||
- pods
|
- pods
|
||||||
sideEffects: None
|
sideEffects: None
|
||||||
|
|
||||||
---
|
---
|
||||||
apiVersion: admissionregistration.k8s.io/v1
|
apiVersion: admissionregistration.k8s.io/v1
|
||||||
kind: ValidatingWebhookConfiguration
|
kind: ValidatingWebhookConfiguration
|
||||||
metadata:
|
metadata:
|
||||||
creationTimestamp: null
|
|
||||||
name: validating-webhook-configuration
|
name: validating-webhook-configuration
|
||||||
webhooks:
|
webhooks:
|
||||||
- admissionReviewVersions:
|
- admissionReviewVersions:
|
||||||
|
|||||||
@@ -30,7 +30,6 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
|
||||||
|
|
||||||
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||||
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
|
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
|
||||||
@@ -42,7 +41,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
autoscalingListenerContainerName = "autoscaler"
|
autoscalingListenerContainerName = "listener"
|
||||||
autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer"
|
autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -287,6 +286,21 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||||||
}
|
}
|
||||||
logger.Info("Listener pod is deleted")
|
logger.Info("Listener pod is deleted")
|
||||||
|
|
||||||
|
var secret corev1.Secret
|
||||||
|
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &secret)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
if secret.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
logger.Info("Deleting the listener config secret")
|
||||||
|
if err := r.Delete(ctx, &secret); err != nil {
|
||||||
|
return false, fmt.Errorf("failed to delete listener config secret: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
|
return false, fmt.Errorf("failed to get listener config secret: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if autoscalingListener.Spec.Proxy != nil {
|
if autoscalingListener.Spec.Proxy != nil {
|
||||||
logger.Info("Cleaning up the listener proxy secret")
|
logger.Info("Cleaning up the listener proxy secret")
|
||||||
proxySecret := new(corev1.Secret)
|
proxySecret := new(corev1.Secret)
|
||||||
@@ -306,6 +320,38 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||||||
logger.Info("Listener proxy secret is deleted")
|
logger.Info("Listener proxy secret is deleted")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
listenerRoleBinding := new(rbacv1.RoleBinding)
|
||||||
|
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRoleBinding)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
if listenerRoleBinding.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
logger.Info("Deleting the listener role binding")
|
||||||
|
if err := r.Delete(ctx, listenerRoleBinding); err != nil {
|
||||||
|
return false, fmt.Errorf("failed to delete listener role binding: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
|
return false, fmt.Errorf("failed to get listener role binding: %v", err)
|
||||||
|
}
|
||||||
|
logger.Info("Listener role binding is deleted")
|
||||||
|
|
||||||
|
listenerRole := new(rbacv1.Role)
|
||||||
|
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRole)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
if listenerRole.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
logger.Info("Deleting the listener role")
|
||||||
|
if err := r.Delete(ctx, listenerRole); err != nil {
|
||||||
|
return false, fmt.Errorf("failed to delete listener role: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
|
return false, fmt.Errorf("failed to get listener role: %v", err)
|
||||||
|
}
|
||||||
|
logger.Info("Listener role is deleted")
|
||||||
|
|
||||||
logger.Info("Cleaning up the listener service account")
|
logger.Info("Cleaning up the listener service account")
|
||||||
listenerSa := new(corev1.ServiceAccount)
|
listenerSa := new(corev1.ServiceAccount)
|
||||||
err = r.Get(ctx, types.NamespacedName{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, listenerSa)
|
err = r.Get(ctx, types.NamespacedName{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, listenerSa)
|
||||||
@@ -386,13 +432,13 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cert := ""
|
||||||
if autoscalingListener.Spec.GitHubServerTLS != nil {
|
if autoscalingListener.Spec.GitHubServerTLS != nil {
|
||||||
env, err := r.certificateEnvVarForListener(ctx, autoscalingRunnerSet, autoscalingListener)
|
var err error
|
||||||
|
cert, err = r.certificate(ctx, autoscalingRunnerSet, autoscalingListener)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ctrl.Result{}, fmt.Errorf("failed to create certificate env var for listener: %v", err)
|
return ctrl.Result{}, fmt.Errorf("failed to create certificate env var for listener: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
envs = append(envs, env)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var metricsConfig *listenerMetricsServerConfig
|
var metricsConfig *listenerMetricsServerConfig
|
||||||
@@ -403,7 +449,35 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
newPod, err := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, serviceAccount, secret, metricsConfig, envs...)
|
var podConfig corev1.Secret
|
||||||
|
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &podConfig); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
logger.Error(err, "Unable to get listener config secret", "namespace", autoscalingListener.Namespace, "name", scaleSetListenerConfigName(autoscalingListener))
|
||||||
|
return ctrl.Result{Requeue: true}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Info("Creating listener config secret")
|
||||||
|
|
||||||
|
podConfig, err := r.resourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "Failed to build listener config secret")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ctrl.SetControllerReference(autoscalingListener, podConfig, r.Scheme); err != nil {
|
||||||
|
logger.Error(err, "Failed to set controller reference")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := r.Create(ctx, podConfig); err != nil {
|
||||||
|
logger.Error(err, "Unable to create listener config secret", "namespace", podConfig.Namespace, "name", podConfig.Name)
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{Requeue: true}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
newPod, err := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(err, "Failed to build listener pod")
|
logger.Error(err, "Failed to build listener pod")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -424,13 +498,13 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) certificateEnvVarForListener(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener) (corev1.EnvVar, error) {
|
func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener) (string, error) {
|
||||||
if autoscalingListener.Spec.GitHubServerTLS.CertificateFrom == nil {
|
if autoscalingListener.Spec.GitHubServerTLS.CertificateFrom == nil {
|
||||||
return corev1.EnvVar{}, fmt.Errorf("githubServerTLS.certificateFrom is not specified")
|
return "", fmt.Errorf("githubServerTLS.certificateFrom is not specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
if autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef == nil {
|
if autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef == nil {
|
||||||
return corev1.EnvVar{}, fmt.Errorf("githubServerTLS.certificateFrom.configMapKeyRef is not specified")
|
return "", fmt.Errorf("githubServerTLS.certificateFrom.configMapKeyRef is not specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
var configmap corev1.ConfigMap
|
var configmap corev1.ConfigMap
|
||||||
@@ -443,7 +517,7 @@ func (r *AutoscalingListenerReconciler) certificateEnvVarForListener(ctx context
|
|||||||
&configmap,
|
&configmap,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return corev1.EnvVar{}, fmt.Errorf(
|
return "", fmt.Errorf(
|
||||||
"failed to get configmap %s: %w",
|
"failed to get configmap %s: %w",
|
||||||
autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Name,
|
autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Name,
|
||||||
err,
|
err,
|
||||||
@@ -452,17 +526,14 @@ func (r *AutoscalingListenerReconciler) certificateEnvVarForListener(ctx context
|
|||||||
|
|
||||||
certificate, ok := configmap.Data[autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Key]
|
certificate, ok := configmap.Data[autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Key]
|
||||||
if !ok {
|
if !ok {
|
||||||
return corev1.EnvVar{}, fmt.Errorf(
|
return "", fmt.Errorf(
|
||||||
"key %s is not found in configmap %s",
|
"key %s is not found in configmap %s",
|
||||||
autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Key,
|
autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Key,
|
||||||
autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Name,
|
autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Name,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
return corev1.EnvVar{
|
return certificate, nil
|
||||||
Name: "GITHUB_SERVER_ROOT_CA",
|
|
||||||
Value: certificate,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||||
@@ -643,7 +714,7 @@ func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
labelBasedWatchFunc := func(obj client.Object) []reconcile.Request {
|
labelBasedWatchFunc := func(_ context.Context, obj client.Object) []reconcile.Request {
|
||||||
var requests []reconcile.Request
|
var requests []reconcile.Request
|
||||||
labels := obj.GetLabels()
|
labels := obj.GetLabels()
|
||||||
namespace, ok := labels["auto-scaling-listener-namespace"]
|
namespace, ok := labels["auto-scaling-listener-namespace"]
|
||||||
@@ -670,8 +741,8 @@ func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error
|
|||||||
For(&v1alpha1.AutoscalingListener{}).
|
For(&v1alpha1.AutoscalingListener{}).
|
||||||
Owns(&corev1.Pod{}).
|
Owns(&corev1.Pod{}).
|
||||||
Owns(&corev1.ServiceAccount{}).
|
Owns(&corev1.ServiceAccount{}).
|
||||||
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
Watches(&rbacv1.Role{}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||||
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
Watches(&rbacv1.RoleBinding{}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package actionsgithubcom
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -13,6 +14,7 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
|
||||||
|
listenerconfig "github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
|
||||||
. "github.com/onsi/ginkgo/v2"
|
. "github.com/onsi/ginkgo/v2"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
@@ -103,6 +105,19 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
|||||||
|
|
||||||
Context("When creating a new AutoScalingListener", func() {
|
Context("When creating a new AutoScalingListener", func() {
|
||||||
It("It should create/add all required resources for a new AutoScalingListener (finalizer, secret, service account, role, rolebinding, pod)", func() {
|
It("It should create/add all required resources for a new AutoScalingListener (finalizer, secret, service account, role, rolebinding, pod)", func() {
|
||||||
|
config := new(corev1.Secret)
|
||||||
|
Eventually(
|
||||||
|
func() error {
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerConfigName(autoscalingListener), Namespace: configSecret.Namespace}, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
autoscalingListenerTestTimeout,
|
||||||
|
autoscalingListenerTestInterval,
|
||||||
|
).Should(Succeed(), "Config secret should be created")
|
||||||
|
|
||||||
// Check if finalizer is added
|
// Check if finalizer is added
|
||||||
created := new(actionsv1alpha1.AutoscalingListener)
|
created := new(actionsv1alpha1.AutoscalingListener)
|
||||||
Eventually(
|
Eventually(
|
||||||
@@ -203,7 +218,8 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
|||||||
return pod.Name, nil
|
return pod.Name, nil
|
||||||
},
|
},
|
||||||
autoscalingListenerTestTimeout,
|
autoscalingListenerTestTimeout,
|
||||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
|
autoscalingListenerTestInterval,
|
||||||
|
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
|
||||||
|
|
||||||
// Delete the AutoScalingListener
|
// Delete the AutoScalingListener
|
||||||
err := k8sClient.Delete(ctx, autoscalingListener)
|
err := k8sClient.Delete(ctx, autoscalingListener)
|
||||||
@@ -225,7 +241,41 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
autoscalingListenerTestTimeout,
|
autoscalingListenerTestTimeout,
|
||||||
autoscalingListenerTestInterval).ShouldNot(Succeed(), "failed to delete pod")
|
autoscalingListenerTestInterval,
|
||||||
|
).ShouldNot(Succeed(), "failed to delete pod")
|
||||||
|
|
||||||
|
// Cleanup the listener role binding
|
||||||
|
Eventually(
|
||||||
|
func() bool {
|
||||||
|
roleBinding := new(rbacv1.RoleBinding)
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
|
||||||
|
return kerrors.IsNotFound(err)
|
||||||
|
},
|
||||||
|
autoscalingListenerTestTimeout,
|
||||||
|
autoscalingListenerTestInterval,
|
||||||
|
).Should(BeTrue(), "failed to delete role binding")
|
||||||
|
|
||||||
|
// Cleanup the listener role
|
||||||
|
Eventually(
|
||||||
|
func() bool {
|
||||||
|
role := new(rbacv1.Role)
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
|
||||||
|
return kerrors.IsNotFound(err)
|
||||||
|
},
|
||||||
|
autoscalingListenerTestTimeout,
|
||||||
|
autoscalingListenerTestInterval,
|
||||||
|
).Should(BeTrue(), "failed to delete role")
|
||||||
|
|
||||||
|
// Cleanup the listener config
|
||||||
|
Eventually(
|
||||||
|
func() bool {
|
||||||
|
config := new(corev1.Secret)
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerConfigName(autoscalingListener), Namespace: autoscalingListener.Namespace}, config)
|
||||||
|
return kerrors.IsNotFound(err)
|
||||||
|
},
|
||||||
|
autoscalingListenerTestTimeout,
|
||||||
|
autoscalingListenerTestInterval,
|
||||||
|
).Should(BeTrue(), "failed to delete config secret")
|
||||||
|
|
||||||
// Cleanup the listener service account
|
// Cleanup the listener service account
|
||||||
Eventually(
|
Eventually(
|
||||||
@@ -375,7 +425,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
|||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
Name: "listener",
|
Name: autoscalingListenerContainerName,
|
||||||
ImagePullPolicy: corev1.PullAlways,
|
ImagePullPolicy: corev1.PullAlways,
|
||||||
SecurityContext: &corev1.SecurityContext{
|
SecurityContext: &corev1.SecurityContext{
|
||||||
RunAsUser: &runAsUser,
|
RunAsUser: &runAsUser,
|
||||||
@@ -477,6 +527,17 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
|||||||
autoscalingListenerTestTimeout,
|
autoscalingListenerTestTimeout,
|
||||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
|
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
|
||||||
|
|
||||||
|
// Check if config is created
|
||||||
|
config := new(corev1.Secret)
|
||||||
|
Eventually(
|
||||||
|
func() error {
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerConfigName(autoscalingListener), Namespace: autoscalingListener.Namespace}, config)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
autoscalingListenerTestTimeout,
|
||||||
|
autoscalingListenerTestInterval,
|
||||||
|
).Should(Succeed(), "Config secret should be created")
|
||||||
|
|
||||||
// Check if pod is created
|
// Check if pod is created
|
||||||
pod := new(corev1.Pod)
|
pod := new(corev1.Pod)
|
||||||
Eventually(
|
Eventually(
|
||||||
@@ -494,7 +555,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
|||||||
|
|
||||||
Expect(pod.Spec.SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
|
Expect(pod.Spec.SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
|
||||||
|
|
||||||
Expect(pod.Spec.Containers[0].Name).NotTo(Equal("listener"), "Pod should have the correct container name")
|
Expect(pod.Spec.Containers[0].Name).To(Equal(autoscalingListenerContainerName), "Pod should have the correct container name")
|
||||||
Expect(pod.Spec.Containers[0].SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
|
Expect(pod.Spec.Containers[0].SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
|
||||||
Expect(pod.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways), "Pod should have the correct image pull policy")
|
Expect(pod.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways), "Pod should have the correct image pull policy")
|
||||||
|
|
||||||
@@ -701,6 +762,155 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
var _ = Describe("Test AutoScalingListener controller with template modification", func() {
|
||||||
|
var ctx context.Context
|
||||||
|
var mgr ctrl.Manager
|
||||||
|
var autoscalingNS *corev1.Namespace
|
||||||
|
var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet
|
||||||
|
var configSecret *corev1.Secret
|
||||||
|
var autoscalingListener *actionsv1alpha1.AutoscalingListener
|
||||||
|
|
||||||
|
createRunnerSetAndListener := func(listenerTemplate *corev1.PodTemplateSpec) {
|
||||||
|
min := 1
|
||||||
|
max := 10
|
||||||
|
autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-asrs",
|
||||||
|
Namespace: autoscalingNS.Name,
|
||||||
|
},
|
||||||
|
Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{
|
||||||
|
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||||
|
GitHubConfigSecret: configSecret.Name,
|
||||||
|
MaxRunners: &max,
|
||||||
|
MinRunners: &min,
|
||||||
|
Template: corev1.PodTemplateSpec{
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
Image: "ghcr.io/actions/runner",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ListenerTemplate: listenerTemplate,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := k8sClient.Create(ctx, autoscalingRunnerSet)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
|
||||||
|
|
||||||
|
autoscalingListener = &actionsv1alpha1.AutoscalingListener{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-asl",
|
||||||
|
Namespace: autoscalingNS.Name,
|
||||||
|
},
|
||||||
|
Spec: actionsv1alpha1.AutoscalingListenerSpec{
|
||||||
|
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||||
|
GitHubConfigSecret: configSecret.Name,
|
||||||
|
RunnerScaleSetId: 1,
|
||||||
|
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
|
||||||
|
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
|
||||||
|
EphemeralRunnerSetName: "test-ers",
|
||||||
|
MaxRunners: 10,
|
||||||
|
MinRunners: 1,
|
||||||
|
Image: "ghcr.io/owner/repo",
|
||||||
|
Template: listenerTemplate,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = k8sClient.Create(ctx, autoscalingListener)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingListener")
|
||||||
|
}
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
ctx = context.Background()
|
||||||
|
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||||
|
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||||
|
|
||||||
|
controller := &AutoscalingListenerReconciler{
|
||||||
|
Client: mgr.GetClient(),
|
||||||
|
Scheme: mgr.GetScheme(),
|
||||||
|
Log: logf.Log,
|
||||||
|
}
|
||||||
|
err := controller.SetupWithManager(mgr)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||||
|
|
||||||
|
startManagers(GinkgoT(), mgr)
|
||||||
|
})
|
||||||
|
|
||||||
|
It("Should create listener pod with modified spec", func() {
|
||||||
|
runAsUser1001 := int64(1001)
|
||||||
|
runAsUser1000 := int64(1000)
|
||||||
|
tmpl := &corev1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"test-annotation-key": "test-annotation-value",
|
||||||
|
},
|
||||||
|
Labels: map[string]string{
|
||||||
|
"test-label-key": "test-label-value",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: autoscalingListenerContainerName,
|
||||||
|
ImagePullPolicy: corev1.PullAlways,
|
||||||
|
SecurityContext: &corev1.SecurityContext{
|
||||||
|
RunAsUser: &runAsUser1001,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "sidecar",
|
||||||
|
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||||
|
Image: "busybox",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
SecurityContext: &corev1.PodSecurityContext{
|
||||||
|
RunAsUser: &runAsUser1000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
createRunnerSetAndListener(tmpl)
|
||||||
|
|
||||||
|
// wait for listener pod to be created
|
||||||
|
Eventually(
|
||||||
|
func(g Gomega) {
|
||||||
|
pod := new(corev1.Pod)
|
||||||
|
err := k8sClient.Get(
|
||||||
|
ctx,
|
||||||
|
client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace},
|
||||||
|
pod,
|
||||||
|
)
|
||||||
|
g.Expect(err).NotTo(HaveOccurred(), "failed to get pod")
|
||||||
|
|
||||||
|
g.Expect(pod.ObjectMeta.Annotations).To(HaveKeyWithValue("test-annotation-key", "test-annotation-value"), "pod annotations should be copied from runner set template")
|
||||||
|
g.Expect(pod.ObjectMeta.Labels).To(HaveKeyWithValue("test-label-key", "test-label-value"), "pod labels should be copied from runner set template")
|
||||||
|
|
||||||
|
},
|
||||||
|
autoscalingListenerTestTimeout,
|
||||||
|
autoscalingListenerTestInterval).Should(Succeed(), "failed to create listener pod with proxy details")
|
||||||
|
|
||||||
|
// Delete the AutoScalingListener
|
||||||
|
err := k8sClient.Delete(ctx, autoscalingListener)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to delete test AutoScalingListener")
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func(g Gomega) {
|
||||||
|
var proxySecret corev1.Secret
|
||||||
|
err := k8sClient.Get(
|
||||||
|
ctx,
|
||||||
|
types.NamespacedName{Name: proxyListenerSecretName(autoscalingListener), Namespace: autoscalingNS.Name},
|
||||||
|
&proxySecret,
|
||||||
|
)
|
||||||
|
g.Expect(kerrors.IsNotFound(err)).To(BeTrue())
|
||||||
|
},
|
||||||
|
autoscalingListenerTestTimeout,
|
||||||
|
autoscalingListenerTestInterval).Should(Succeed(), "failed to delete secret with proxy details")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
var _ = Describe("Test GitHub Server TLS configuration", func() {
|
var _ = Describe("Test GitHub Server TLS configuration", func() {
|
||||||
var ctx context.Context
|
var ctx context.Context
|
||||||
var mgr ctrl.Manager
|
var mgr ctrl.Manager
|
||||||
@@ -816,31 +1026,26 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
Context("When creating a new AutoScalingListener", func() {
|
Context("When creating a new AutoScalingListener", func() {
|
||||||
It("It should set the certificates as an environment variable on the pod", func() {
|
It("It should set the certificates in the config of the pod", func() {
|
||||||
pod := new(corev1.Pod)
|
config := new(corev1.Secret)
|
||||||
Eventually(
|
Eventually(
|
||||||
func(g Gomega) {
|
func(g Gomega) {
|
||||||
err := k8sClient.Get(
|
err := k8sClient.Get(
|
||||||
ctx,
|
ctx,
|
||||||
client.ObjectKey{
|
client.ObjectKey{
|
||||||
Name: autoscalingListener.Name,
|
Name: scaleSetListenerConfigName(autoscalingListener),
|
||||||
Namespace: autoscalingListener.Namespace,
|
Namespace: autoscalingListener.Namespace,
|
||||||
},
|
},
|
||||||
pod,
|
config,
|
||||||
)
|
)
|
||||||
|
|
||||||
g.Expect(err).NotTo(HaveOccurred(), "failed to get pod")
|
g.Expect(err).NotTo(HaveOccurred(), "failed to get pod")
|
||||||
g.Expect(pod.Spec.Containers).NotTo(BeEmpty(), "pod should have containers")
|
|
||||||
g.Expect(pod.Spec.Containers[0].Env).NotTo(BeEmpty(), "pod should have env variables")
|
|
||||||
|
|
||||||
var env *corev1.EnvVar
|
g.Expect(config.Data["config.json"]).ToNot(BeEmpty(), "listener configuration file should not be empty")
|
||||||
for _, e := range pod.Spec.Containers[0].Env {
|
|
||||||
if e.Name == "GITHUB_SERVER_ROOT_CA" {
|
var listenerConfig listenerconfig.Config
|
||||||
env = &e
|
err = json.Unmarshal(config.Data["config.json"], &listenerConfig)
|
||||||
break
|
g.Expect(err).NotTo(HaveOccurred(), "failed to parse listener configuration file")
|
||||||
}
|
|
||||||
}
|
|
||||||
g.Expect(env).NotTo(BeNil(), "pod should have an env variable named GITHUB_SERVER_ROOT_CA_PATH")
|
|
||||||
|
|
||||||
cert, err := os.ReadFile(filepath.Join(
|
cert, err := os.ReadFile(filepath.Join(
|
||||||
"../../",
|
"../../",
|
||||||
@@ -851,7 +1056,7 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
|
|||||||
))
|
))
|
||||||
g.Expect(err).NotTo(HaveOccurred(), "failed to read rootCA.crt")
|
g.Expect(err).NotTo(HaveOccurred(), "failed to read rootCA.crt")
|
||||||
|
|
||||||
g.Expect(env.Value).To(
|
g.Expect(listenerConfig.ServerRootCA).To(
|
||||||
BeEquivalentTo(string(cert)),
|
BeEquivalentTo(string(cert)),
|
||||||
"GITHUB_SERVER_ROOT_CA should be the rootCA.crt",
|
"GITHUB_SERVER_ROOT_CA should be the rootCA.crt",
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -39,14 +39,17 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
labelKeyRunnerSpecHash = "runner-spec-hash"
|
annotationKeyRunnerSpecHash = "actions.github.com/runner-spec-hash"
|
||||||
|
// annotationKeyValuesHash is hash of the entire values json.
|
||||||
|
// This is used to determine if the values have changed, so we can
|
||||||
|
// re-create listener.
|
||||||
|
annotationKeyValuesHash = "actions.github.com/values-hash"
|
||||||
|
|
||||||
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
|
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
|
||||||
runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
|
runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
|
||||||
runnerScaleSetNameAnnotationKey = "runner-scale-set-name"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type UpdateStrategy string
|
type UpdateStrategy string
|
||||||
@@ -206,7 +209,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the runner scale set name is up to date
|
// Make sure the runner scale set name is up to date
|
||||||
currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetNameAnnotationKey]
|
currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName]
|
||||||
if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) {
|
if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) {
|
||||||
log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.")
|
log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.")
|
||||||
return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log)
|
return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log)
|
||||||
@@ -232,9 +235,8 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
|
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
|
||||||
}
|
}
|
||||||
|
|
||||||
desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
|
||||||
for _, runnerSet := range existingRunnerSets.all() {
|
for _, runnerSet := range existingRunnerSets.all() {
|
||||||
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[labelKeyRunnerSpecHash])
|
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Annotations[annotationKeyRunnerSpecHash])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the AutoscalingListener is up and running in the controller namespace
|
// Make sure the AutoscalingListener is up and running in the controller namespace
|
||||||
@@ -251,7 +253,9 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Our listener pod is out of date, so we need to delete it to get a new recreate.
|
// Our listener pod is out of date, so we need to delete it to get a new recreate.
|
||||||
if listenerFound && (listener.Labels[labelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash()) {
|
listenerValuesHashChanged := listener.Annotations[annotationKeyValuesHash] != autoscalingRunnerSet.Annotations[annotationKeyValuesHash]
|
||||||
|
listenerSpecHashChanged := listener.Annotations[annotationKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash()
|
||||||
|
if listenerFound && (listenerValuesHashChanged || listenerSpecHashChanged) {
|
||||||
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
|
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
|
||||||
if err := r.Delete(ctx, listener); err != nil {
|
if err := r.Delete(ctx, listener); err != nil {
|
||||||
if kerrors.IsNotFound(err) {
|
if kerrors.IsNotFound(err) {
|
||||||
@@ -265,7 +269,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if desiredSpecHash != latestRunnerSet.Labels[labelKeyRunnerSpecHash] {
|
if latestRunnerSet.Annotations[annotationKeyRunnerSpecHash] != autoscalingRunnerSet.RunnerSetSpecHash() {
|
||||||
if r.drainingJobs(&latestRunnerSet.Status) {
|
if r.drainingJobs(&latestRunnerSet.Status) {
|
||||||
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Waiting for the running and pending runners to finish:", "running", latestRunnerSet.Status.RunningEphemeralRunners, "pending", latestRunnerSet.Status.PendingEphemeralRunners)
|
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Waiting for the running and pending runners to finish:", "running", latestRunnerSet.Status.RunningEphemeralRunners, "pending", latestRunnerSet.Status.PendingEphemeralRunners)
|
||||||
log.Info("Scaling down the number of desired replicas to 0")
|
log.Info("Scaling down the number of desired replicas to 0")
|
||||||
@@ -273,6 +277,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
// need to scale down to 0
|
// need to scale down to 0
|
||||||
err := patch(ctx, r.Client, latestRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
|
err := patch(ctx, r.Client, latestRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
|
||||||
obj.Spec.Replicas = 0
|
obj.Spec.Replicas = 0
|
||||||
|
obj.Spec.PatchID = 0
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err, "Failed to patch runner set to set desired count to 0")
|
log.Error(err, "Failed to patch runner set to set desired count to 0")
|
||||||
@@ -467,6 +472,8 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
|
|||||||
Version: build.Version,
|
Version: build.Version,
|
||||||
CommitSHA: build.CommitSHA,
|
CommitSHA: build.CommitSHA,
|
||||||
ScaleSetID: runnerScaleSet.Id,
|
ScaleSetID: runnerScaleSet.Id,
|
||||||
|
HasProxy: autoscalingRunnerSet.Spec.Proxy != nil,
|
||||||
|
Subsystem: "controller",
|
||||||
})
|
})
|
||||||
|
|
||||||
logger.Info("Created/Reused a runner scale set", "id", runnerScaleSet.Id, "runnerGroupName", runnerScaleSet.RunnerGroupName)
|
logger.Info("Created/Reused a runner scale set", "id", runnerScaleSet.Id, "runnerGroupName", runnerScaleSet.RunnerGroupName)
|
||||||
@@ -479,7 +486,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
|
|||||||
|
|
||||||
logger.Info("Adding runner scale set ID, name and runner group name as an annotation and url labels")
|
logger.Info("Adding runner scale set ID, name and runner group name as an annotation and url labels")
|
||||||
if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||||
obj.Annotations[runnerScaleSetNameAnnotationKey] = runnerScaleSet.Name
|
obj.Annotations[AnnotationKeyGitHubRunnerScaleSetName] = runnerScaleSet.Name
|
||||||
obj.Annotations[runnerScaleSetIdAnnotationKey] = strconv.Itoa(runnerScaleSet.Id)
|
obj.Annotations[runnerScaleSetIdAnnotationKey] = strconv.Itoa(runnerScaleSet.Id)
|
||||||
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = runnerScaleSet.RunnerGroupName
|
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = runnerScaleSet.RunnerGroupName
|
||||||
if err := applyGitHubURLLabels(obj.Spec.GitHubConfigUrl, obj.Labels); err != nil { // should never happen
|
if err := applyGitHubURLLabels(obj.Spec.GitHubConfigUrl, obj.Labels); err != nil { // should never happen
|
||||||
@@ -527,9 +534,10 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("Updating runner scale set runner group name as an annotation")
|
logger.Info("Updating runner scale set name and runner group name as annotations")
|
||||||
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||||
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = updatedRunnerScaleSet.RunnerGroupName
|
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = updatedRunnerScaleSet.RunnerGroupName
|
||||||
|
obj.Annotations[AnnotationKeyGitHubRunnerScaleSetName] = updatedRunnerScaleSet.Name
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
logger.Error(err, "Failed to update runner group name annotation")
|
logger.Error(err, "Failed to update runner group name annotation")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -565,7 +573,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
|
|||||||
|
|
||||||
logger.Info("Updating runner scale set name as an annotation")
|
logger.Info("Updating runner scale set name as an annotation")
|
||||||
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||||
obj.Annotations[runnerScaleSetNameAnnotationKey] = updatedRunnerScaleSet.Name
|
obj.Annotations[AnnotationKeyGitHubRunnerScaleSetName] = updatedRunnerScaleSet.Name
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
logger.Error(err, "Failed to update runner scale set name annotation")
|
logger.Error(err, "Failed to update runner scale set name annotation")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -774,8 +782,8 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro
|
|||||||
return ctrl.NewControllerManagedBy(mgr).
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
For(&v1alpha1.AutoscalingRunnerSet{}).
|
For(&v1alpha1.AutoscalingRunnerSet{}).
|
||||||
Owns(&v1alpha1.EphemeralRunnerSet{}).
|
Owns(&v1alpha1.EphemeralRunnerSet{}).
|
||||||
Watches(&source.Kind{Type: &v1alpha1.AutoscalingListener{}}, handler.EnqueueRequestsFromMapFunc(
|
Watches(&v1alpha1.AutoscalingListener{}, handler.EnqueueRequestsFromMapFunc(
|
||||||
func(o client.Object) []reconcile.Request {
|
func(_ context.Context, o client.Object) []reconcile.Request {
|
||||||
autoscalingListener := o.(*v1alpha1.AutoscalingListener)
|
autoscalingListener := o.(*v1alpha1.AutoscalingListener)
|
||||||
return []reconcile.Request{
|
return []reconcile.Request{
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -280,6 +280,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
|||||||
// This should trigger re-creation of EphemeralRunnerSet and Listener
|
// This should trigger re-creation of EphemeralRunnerSet and Listener
|
||||||
patched := autoscalingRunnerSet.DeepCopy()
|
patched := autoscalingRunnerSet.DeepCopy()
|
||||||
patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
|
patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
|
||||||
|
if patched.ObjectMeta.Annotations == nil {
|
||||||
|
patched.ObjectMeta.Annotations = make(map[string]string)
|
||||||
|
}
|
||||||
|
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "test-hash"
|
||||||
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
|
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
||||||
autoscalingRunnerSet = patched.DeepCopy()
|
autoscalingRunnerSet = patched.DeepCopy()
|
||||||
@@ -297,10 +301,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
|||||||
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
|
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
|
||||||
}
|
}
|
||||||
|
|
||||||
return runnerSetList.Items[0].Labels[labelKeyRunnerSpecHash], nil
|
return runnerSetList.Items[0].Annotations[annotationKeyRunnerSpecHash], nil
|
||||||
},
|
},
|
||||||
autoscalingRunnerSetTestTimeout,
|
autoscalingRunnerSetTestTimeout,
|
||||||
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[labelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
|
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Annotations[annotationKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
|
||||||
|
|
||||||
// We should create a new listener
|
// We should create a new listener
|
||||||
Eventually(
|
Eventually(
|
||||||
@@ -334,6 +338,55 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
|||||||
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
|
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
||||||
|
|
||||||
|
// We should not re-create a new EphemeralRunnerSet
|
||||||
|
Consistently(
|
||||||
|
func() (string, error) {
|
||||||
|
runnerSetList := new(v1alpha1.EphemeralRunnerSetList)
|
||||||
|
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(runnerSetList.Items) != 1 {
|
||||||
|
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(runnerSetList.Items[0].UID), nil
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo(string(runnerSet.UID)), "New EphemeralRunnerSet should not be created")
|
||||||
|
|
||||||
|
// We should only re-create a new listener
|
||||||
|
Eventually(
|
||||||
|
func() (string, error) {
|
||||||
|
listener := new(v1alpha1.AutoscalingListener)
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(listener.UID), nil
|
||||||
|
},
|
||||||
|
autoscalingRunnerSetTestTimeout,
|
||||||
|
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(string(listener.UID)), "New Listener should be created")
|
||||||
|
|
||||||
|
// Only update the values hash for the autoscaling runner set
|
||||||
|
// This should trigger re-creation of the Listener only
|
||||||
|
runnerSetList = new(v1alpha1.EphemeralRunnerSetList)
|
||||||
|
err = k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet")
|
||||||
|
Expect(len(runnerSetList.Items)).To(Equal(1), "There should be 1 EphemeralRunnerSet")
|
||||||
|
runnerSet = runnerSetList.Items[0]
|
||||||
|
|
||||||
|
listener = new(v1alpha1.AutoscalingListener)
|
||||||
|
err = k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to get Listener")
|
||||||
|
|
||||||
|
patched = autoscalingRunnerSet.DeepCopy()
|
||||||
|
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "hash-changes"
|
||||||
|
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
||||||
|
|
||||||
// We should not re-create a new EphemeralRunnerSet
|
// We should not re-create a new EphemeralRunnerSet
|
||||||
Consistently(
|
Consistently(
|
||||||
func() (string, error) {
|
func() (string, error) {
|
||||||
@@ -493,6 +546,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
|||||||
// Patch the AutoScalingRunnerSet image which should trigger
|
// Patch the AutoScalingRunnerSet image which should trigger
|
||||||
// the recreation of the Listener and EphemeralRunnerSet
|
// the recreation of the Listener and EphemeralRunnerSet
|
||||||
patched := autoscalingRunnerSet.DeepCopy()
|
patched := autoscalingRunnerSet.DeepCopy()
|
||||||
|
if patched.ObjectMeta.Annotations == nil {
|
||||||
|
patched.ObjectMeta.Annotations = make(map[string]string)
|
||||||
|
}
|
||||||
|
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "testgroup2"
|
||||||
patched.Spec.Template.Spec = corev1.PodSpec{
|
patched.Spec.Template.Spec = corev1.PodSpec{
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
@@ -501,7 +558,6 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
|
|
||||||
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
|
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
||||||
autoscalingRunnerSet = patched.DeepCopy()
|
autoscalingRunnerSet = patched.DeepCopy()
|
||||||
@@ -698,7 +754,7 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if val, ok := ars.Annotations[runnerScaleSetNameAnnotationKey]; ok {
|
if val, ok := ars.Annotations[AnnotationKeyGitHubRunnerScaleSetName]; ok {
|
||||||
return val, nil
|
return val, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -722,7 +778,7 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if val, ok := ars.Annotations[runnerScaleSetNameAnnotationKey]; ok {
|
if val, ok := ars.Annotations[AnnotationKeyGitHubRunnerScaleSetName]; ok {
|
||||||
return val, nil
|
return val, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package actionsgithubcom
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/actions/actions-runner-controller/logging"
|
"github.com/actions/actions-runner-controller/logging"
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -40,7 +39,11 @@ const (
|
|||||||
// Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running
|
// Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running
|
||||||
const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
|
const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
|
||||||
|
|
||||||
const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
|
const (
|
||||||
|
AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
|
||||||
|
AnnotationKeyGitHubRunnerScaleSetName = "actions.github.com/runner-scale-set-name"
|
||||||
|
AnnotationKeyPatchID = "actions.github.com/patch-id"
|
||||||
|
)
|
||||||
|
|
||||||
// Labels applied to listener roles
|
// Labels applied to listener roles
|
||||||
const (
|
const (
|
||||||
@@ -59,10 +62,6 @@ const (
|
|||||||
AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name"
|
AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultScaleSetListenerImagePullPolicy is the default pull policy applied
|
|
||||||
// to the listener when ImagePullPolicy is not specified
|
|
||||||
const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent
|
|
||||||
|
|
||||||
// DefaultScaleSetListenerLogLevel is the default log level applied
|
// DefaultScaleSetListenerLogLevel is the default log level applied
|
||||||
const DefaultScaleSetListenerLogLevel = string(logging.LogLevelDebug)
|
const DefaultScaleSetListenerLogLevel = string(logging.LogLevelDebug)
|
||||||
|
|
||||||
@@ -71,3 +70,9 @@ const DefaultScaleSetListenerLogFormat = string(logging.LogFormatText)
|
|||||||
|
|
||||||
// ownerKey is field selector matching the owner name of a particular resource
|
// ownerKey is field selector matching the owner name of a particular resource
|
||||||
const resourceOwnerKey = ".metadata.controller"
|
const resourceOwnerKey = ".metadata.controller"
|
||||||
|
|
||||||
|
// EphemeralRunner pod creation failure reasons
|
||||||
|
const (
|
||||||
|
ReasonTooManyPodFailures = "TooManyPodFailures"
|
||||||
|
ReasonInvalidPodFailure = "InvalidPod"
|
||||||
|
)
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user