mirror of
https://github.com/actions/runner-container-hooks.git
synced 2025-12-16 09:46:43 +00:00
Compare commits
41 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
46c92fe43e | ||
|
|
56208347f1 | ||
|
|
c093f87779 | ||
|
|
c47c74ad9e | ||
|
|
90a6236466 | ||
|
|
496287d61d | ||
|
|
5264b6cd7d | ||
|
|
b58b13134a | ||
|
|
8ea7e21dec | ||
|
|
64000d716a | ||
|
|
4ff4b552a6 | ||
|
|
4cdcf09c43 | ||
|
|
5107bb1d41 | ||
|
|
547ed30dc3 | ||
|
|
17fb66892c | ||
|
|
9319a8566a | ||
|
|
669ec6f706 | ||
|
|
aa658859f8 | ||
|
|
8b83223a2b | ||
|
|
586a052286 | ||
|
|
730509f702 | ||
|
|
3fc91e4132 | ||
|
|
ebbe2bdaff | ||
|
|
17837d25d2 | ||
|
|
c37c5ca584 | ||
|
|
04b58be49a | ||
|
|
89ff7d1155 | ||
|
|
6dbb0b61b7 | ||
|
|
c92bb5544e | ||
|
|
26f4a32c30 | ||
|
|
10c6c0aa70 | ||
|
|
d735152125 | ||
|
|
ae31f04223 | ||
|
|
7754cb80eb | ||
|
|
ae432db512 | ||
|
|
4448b61e00 | ||
|
|
bf39b9bf16 | ||
|
|
5b597b0fe2 | ||
|
|
0e1ba7bdc8 | ||
|
|
73914b840c | ||
|
|
b537fd4c92 |
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.png filter=lfs diff=lfs merge=lfs -text
|
||||
45
.github/workflows/release.yaml
vendored
45
.github/workflows/release.yaml
vendored
@@ -13,28 +13,47 @@ jobs:
|
||||
- run: npm run build-all
|
||||
name: Build packages
|
||||
- uses: actions/github-script@v6
|
||||
id: releaseNotes
|
||||
id: releaseVersion
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
const hookVersion = require('./package.json').version
|
||||
var releaseNotes = fs.readFileSync('${{ github.workspace }}/releaseNotes.md', 'utf8').replace(/<HOOK_VERSION>/g, hookVersion)
|
||||
console.log(releaseNotes)
|
||||
core.setOutput('version', hookVersion);
|
||||
core.setOutput('note', releaseNotes);
|
||||
- name: Zip up releases
|
||||
run: |
|
||||
zip -r -j actions-runner-hooks-docker-${{ steps.releaseNotes.outputs.version }}.zip packages/docker/dist
|
||||
zip -r -j actions-runner-hooks-k8s-${{ steps.releaseNotes.outputs.version }}.zip packages/k8s/dist
|
||||
zip -r -j actions-runner-hooks-docker-${{ steps.releaseVersion.outputs.version }}.zip packages/docker/dist
|
||||
zip -r -j actions-runner-hooks-k8s-${{ steps.releaseVersion.outputs.version }}.zip packages/k8s/dist
|
||||
- name: Calculate SHA
|
||||
id: sha
|
||||
shell: bash
|
||||
run: |
|
||||
sha_docker=$(sha256sum actions-runner-hooks-docker-${{ steps.releaseVersion.outputs.version }}.zip | awk '{print $1}')
|
||||
echo "Docker SHA: $sha_docker"
|
||||
echo "docker-sha=$sha_docker" >> $GITHUB_OUTPUT
|
||||
sha_k8s=$(sha256sum actions-runner-hooks-k8s-${{ steps.releaseVersion.outputs.version }}.zip | awk '{print $1}')
|
||||
echo "K8s SHA: $sha_k8s"
|
||||
echo "k8s-sha=$sha_k8s" >> $GITHUB_OUTPUT
|
||||
- name: replace SHA
|
||||
id: releaseNotes
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
var releaseNotes = fs.readFileSync('${{ github.workspace }}/releaseNotes.md', 'utf8').replace(/<HOOK_VERSION>/g, '${{ steps.releaseVersion.outputs.version }}')
|
||||
releaseNotes = releaseNotes.replace(/<DOCKER_SHA>/g, '${{ steps.sha.outputs.docker-sha }}')
|
||||
releaseNotes = releaseNotes.replace(/<K8S_SHA>/g, '${{ steps.sha.outputs.k8s-sha }}')
|
||||
console.log(releaseNotes)
|
||||
core.setOutput('note', releaseNotes);
|
||||
- uses: actions/create-release@v1
|
||||
id: createRelease
|
||||
name: Create ${{ steps.releaseNotes.outputs.version }} Hook Release
|
||||
name: Create ${{ steps.releaseVersion.outputs.version }} Hook Release
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: "v${{ steps.releaseNotes.outputs.version }}"
|
||||
release_name: "v${{ steps.releaseNotes.outputs.version }}"
|
||||
tag_name: "v${{ steps.releaseVersion.outputs.version }}"
|
||||
release_name: "v${{ steps.releaseVersion.outputs.version }}"
|
||||
body: |
|
||||
${{ steps.releaseNotes.outputs.note }}
|
||||
- name: Upload K8s hooks
|
||||
@@ -43,8 +62,8 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.createRelease.outputs.upload_url }}
|
||||
asset_path: ${{ github.workspace }}/actions-runner-hooks-k8s-${{ steps.releaseNotes.outputs.version }}.zip
|
||||
asset_name: actions-runner-hooks-k8s-${{ steps.releaseNotes.outputs.version }}.zip
|
||||
asset_path: ${{ github.workspace }}/actions-runner-hooks-k8s-${{ steps.releaseVersion.outputs.version }}.zip
|
||||
asset_name: actions-runner-hooks-k8s-${{ steps.releaseVersion.outputs.version }}.zip
|
||||
asset_content_type: application/octet-stream
|
||||
- name: Upload docker hooks
|
||||
uses: actions/upload-release-asset@v1
|
||||
@@ -52,6 +71,6 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.createRelease.outputs.upload_url }}
|
||||
asset_path: ${{ github.workspace }}/actions-runner-hooks-docker-${{ steps.releaseNotes.outputs.version }}.zip
|
||||
asset_name: actions-runner-hooks-docker-${{ steps.releaseNotes.outputs.version }}.zip
|
||||
asset_path: ${{ github.workspace }}/actions-runner-hooks-docker-${{ steps.releaseVersion.outputs.version }}.zip
|
||||
asset_name: actions-runner-hooks-docker-${{ steps.releaseVersion.outputs.version }}.zip
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
@@ -1 +1 @@
|
||||
* @actions/actions-runtime @actions/runner-akvelon
|
||||
* @actions/actions-launch @actions/runner-akvelon
|
||||
|
||||
184
docs/adrs/0072-using-ephemeral-containers.md
Normal file
184
docs/adrs/0072-using-ephemeral-containers.md
Normal file
@@ -0,0 +1,184 @@
|
||||
# ADR 0072: Using Ephemeral Containers
|
||||
|
||||
**Date:** 27 March 2023
|
||||
|
||||
**Status**: Rejected <!--Accepted|Rejected|Superceded|Deprecated-->
|
||||
|
||||
## Context
|
||||
|
||||
We are evaluating using Kubernetes [ephemeral containers](https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/) as a drop-in replacement for creating pods for [jobs that run in containers](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container) and [service containers](https://docs.github.com/en/actions/using-containerized-services/about-service-containers).
|
||||
|
||||
The main motivator behind using ephemeral containers is to eliminate the need for [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). Persistent Volume implementations vary depending on the provider and we want to avoid building a dependency on it in order to provide our end-users a consistent experience.
|
||||
|
||||
With ephemeral containers we could leverage [emptyDir volumes](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) which fits our use case better and its behaviour is consistent across providers.
|
||||
|
||||
However, it's important to acknowledge that ephemeral containers were not designed to handle workloads but rather provide a mechanism to inspect running containers for debugging and troubleshooting purposes.
|
||||
|
||||
## Evaluation
|
||||
|
||||
The criteria that we are using to evaluate whether ephemeral containers are fit for purpose are:
|
||||
|
||||
- Networking
|
||||
- Storage
|
||||
- Security
|
||||
- Resource limits
|
||||
- Logs
|
||||
- Customizability
|
||||
|
||||
### Networking
|
||||
|
||||
Ephemeral containers share the networking namespace of the pod they are attached to. This means that ephemeral containers can access the same network interfaces as the pod and can communicate with other containers in the same pod. However, ephemeral containers cannot have ports configured and as such the fields ports, livenessProbe, and readinessProbe are not available [^1][^2]
|
||||
|
||||
In this scenario we have 3 containers in a pod:
|
||||
|
||||
- `runner`: the main container that runs the GitHub Actions job
|
||||
- `debugger`: the first ephemeral container
|
||||
- `debugger2`: the second ephemeral container
|
||||
|
||||
By sequentially opening ports on each of these containers and connecting to them we can demonstrate that the communication flow between the runner and the debuggers is feasible.
|
||||
|
||||
<details>
|
||||
<summary>1. Runner -> Debugger communication</summary>
|
||||
|
||||

|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>2. Debugger -> Runner communication</summary>
|
||||
|
||||

|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>3. Debugger2 -> Debugger communication</summary>
|
||||
|
||||

|
||||
</details>
|
||||
|
||||
### Storage
|
||||
|
||||
An emptyDir volume can be successfully mounted (read/write) by the runner as well as the ephemeral containers. This means that ephemeral containers can share data with the runner and other ephemeral containers.
|
||||
|
||||
<details>
|
||||
<summary>Configuration</summary>
|
||||
|
||||
```yaml
|
||||
# Extracted from the values.yaml for the gha-runner-scale-set helm chart
|
||||
spec:
|
||||
containers:
|
||||
- name: runner
|
||||
image: ghcr.io/actions/actions-runner:latest
|
||||
command: ["/home/runner/run.sh"]
|
||||
volumeMounts:
|
||||
- mountPath: /workspace
|
||||
name: work-volume
|
||||
volumes:
|
||||
- name: work-volume
|
||||
emptyDir:
|
||||
sizeLimit: 1Gi
|
||||
```
|
||||
|
||||
```bash
|
||||
# The API call to the Kubernetes API used to create the ephemeral containers
|
||||
|
||||
POD_NAME="arc-runner-set-6sfwd-runner-k7qq6"
|
||||
NAMESPACE="arc-runners"
|
||||
|
||||
curl -v "https://<IP>:<PORT>/api/v1/namespaces/$NAMESPACE/pods/$POD_NAME/ephemeralcontainers" \
|
||||
-X PATCH \
|
||||
-H 'Content-Type: application/strategic-merge-patch+json' \
|
||||
--cacert <PATH_TO_CACERT> \
|
||||
--cert <PATH_TO_CERT> \
|
||||
--key <PATH_TO_CLIENT_KEY> \
|
||||
-d '
|
||||
{
|
||||
"spec":
|
||||
{
|
||||
"ephemeralContainers":
|
||||
[
|
||||
{
|
||||
"name": "debugger",
|
||||
"command": ["sh"],
|
||||
"image": "ghcr.io/actions/actions-runner:latest",
|
||||
"targetContainerName": "runner",
|
||||
"stdin": true,
|
||||
"tty": true,
|
||||
"volumeMounts": [{
|
||||
"mountPath": "/workspace",
|
||||
"name": "work-volume",
|
||||
"readOnly": false
|
||||
}]
|
||||
},
|
||||
{
|
||||
"name": "debugger2",
|
||||
"command": ["sh"],
|
||||
"image": "ghcr.io/actions/actions-runner:latest",
|
||||
"targetContainerName": "runner",
|
||||
"stdin": true,
|
||||
"tty": true,
|
||||
"volumeMounts": [{
|
||||
"mountPath": "/workspace",
|
||||
"name": "work-volume",
|
||||
"readOnly": false
|
||||
}]
|
||||
}
|
||||
]
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>emptyDir volume mount</summary>
|
||||
|
||||

|
||||
|
||||
</details>
|
||||
|
||||
### Security
|
||||
|
||||
According to the [ephemeral containers API specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#ephemeralcontainer-v1-core) the configuration of the `securityContext` field is possible.
|
||||
|
||||
Ephemeral containers share the same network namespace as the pod they are attached to. This means that ephemeral containers can access the same network interfaces as the pod and can communicate with other containers in the same pod.
|
||||
|
||||
It is also possible for ephemeral containers to [share the process namespace](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) with the other containers in the pod. This is disabled by default.
|
||||
|
||||
The above could have unpredictable security implications.
|
||||
|
||||
### Resource limits
|
||||
|
||||
Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. [^1] This is a major drawback as it means that ephemeral containers cannot be configured to have resource limits.
|
||||
|
||||
There are no guaranteed resources for ad-hoc troubleshooting. If troubleshooting causes a pod to exceed its resource limit it may be evicted. [^3]
|
||||
|
||||
### Logs
|
||||
|
||||
Since ephemeral containers can share volumes with the runner container, it's possible to write logs to the same volume and have them available to the runner container.
|
||||
|
||||
### Customizability
|
||||
|
||||
Ephemeral containers can run any image and tag provided, they can be customized to run any arbitrary job. However, it's important to note that the following are not feasible:
|
||||
|
||||
- Lifecycle is not allowed for ephemeral containers
|
||||
- Ephemeral containers will stop when their command exits, such as exiting a shell, and they will not be restarted. Unlike `kubectl exec`, processes in Ephemeral Containers will not receive an `EOF` if their connections are interrupted, so shells won't automatically exit on disconnect. There is no API support for killing or restarting an ephemeral container. The only way to exit the container is to send it an OS signal. [^4]
|
||||
- Probes are not allowed for ephemeral containers.
|
||||
- Ports are not allowed for ephemeral containers.
|
||||
|
||||
## Decision
|
||||
|
||||
While the evaluation shows that ephemeral containers can be used to run jobs in containers, it's important to acknowledge that ephemeral containers were not designed to handle workloads but rather provide a mechanism to inspect running containers for debugging and troubleshooting purposes.
|
||||
|
||||
Given the limitations of ephemeral containers, we decided not to use them outside of their intended purpose.
|
||||
|
||||
## Consequences
|
||||
|
||||
Proposal rejected, no further action required. This document will be used as a reference for future discussions.
|
||||
|
||||
[^1]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#ephemeralcontainer-v1-core
|
||||
|
||||
[^2]: https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/
|
||||
|
||||
[^3]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/277-ephemeral-containers/README.md#notesconstraintscaveats
|
||||
|
||||
[^4]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/277-ephemeral-containers/README.md#ephemeral-container-lifecycle
|
||||
32
docs/adrs/0096-hook-extensions.md
Normal file
32
docs/adrs/0096-hook-extensions.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# ADR 0096: Hook extensions
|
||||
|
||||
**Date:** 3 August 2023
|
||||
|
||||
**Status**: Accepted <!--Accepted|Rejected|Superceded|Deprecated-->
|
||||
|
||||
## Context
|
||||
|
||||
The current implementation of container hooks does not allow users to customize the pods created by the hook. While the implementation is designed to be used as is or as a starting point, building and maintaining a custom hook implementation just to specify additional fields is not a good user experience.
|
||||
|
||||
## Decision
|
||||
|
||||
We have decided to add hook extensions to the container hook implementation. This will allow users to customize the pods created by the hook by specifying additional fields. The hook extensions will be implemented in a way that is backwards-compatible with the existing hook implementation.
|
||||
|
||||
To allow customization, the runner executing the hook should have `ACTIONS_RUNNER_CONTAINER_HOOK_TEMPLATE` environment variable pointing to a yaml file on the runner system. The extension specified in that file will be applied both for job pods, and container steps.
|
||||
|
||||
If environment variable is set, but the file can't be read, the hook will fail, signaling incorrect configuration.
|
||||
|
||||
If the environment variable does not exist, the hook will apply the default spec.
|
||||
|
||||
In case the hook is able to read the extended spec, it will first create a default configuration, and then merged modified fields in the following way:
|
||||
|
||||
1. The `.metadata` fields that will be appended if they are not reserved are `labels` and `annotations`.
|
||||
2. The pod spec fields except for `containers` and `volumes` are applied from the template, possibly overwriting the field.
|
||||
3. The volumes are applied in form of appending additional volumes to the default volumes.
|
||||
4. The containers are merged based on the name assigned to them:
|
||||
1. If the name of the container *is not* "$job", the entire spec of the container will be added to the pod definition.
|
||||
2. If the name of the container *is* "$job", the `name` and the `image` fields are going to be ignored and the spec will be applied so that `env`, `volumeMounts`, `ports` are appended to the default container spec created by the hook, while the rest of the fields are going to be applied to the newly created container spec.
|
||||
|
||||
## Consequences
|
||||
|
||||
The addition of hook extensions will provide a better user experience for users who need to customize the pods created by the container hook. However, it will require additional effort to provide the template to the runner pod, and configure it properly.
|
||||
BIN
docs/adrs/images/debugger-runner.png
(Stored with Git LFS)
Normal file
BIN
docs/adrs/images/debugger-runner.png
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
docs/adrs/images/debugger2-debugger.png
(Stored with Git LFS)
Normal file
BIN
docs/adrs/images/debugger2-debugger.png
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
docs/adrs/images/emptyDir_volume.png
(Stored with Git LFS)
Normal file
BIN
docs/adrs/images/emptyDir_volume.png
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
docs/adrs/images/runner-debugger.png
(Stored with Git LFS)
Normal file
BIN
docs/adrs/images/runner-debugger.png
(Stored with Git LFS)
Normal file
Binary file not shown.
30
examples/extension.yaml
Normal file
30
examples/extension.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
metadata:
|
||||
annotations:
|
||||
annotated-by: "extension"
|
||||
labels:
|
||||
labeled-by: "extension"
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 3000
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: $job # overwirtes job container
|
||||
env:
|
||||
- name: ENV1
|
||||
value: "value1"
|
||||
imagePullPolicy: Always
|
||||
image: "busybox:1.28" # Ignored
|
||||
command:
|
||||
- sh
|
||||
args:
|
||||
- -c
|
||||
- sleep 50
|
||||
- name: side-car
|
||||
image: "ubuntu:latest" # required
|
||||
command:
|
||||
- sh
|
||||
args:
|
||||
- -c
|
||||
- sleep 60
|
||||
|
||||
@@ -73,6 +73,8 @@
|
||||
"contextName": "redis",
|
||||
"image": "redis",
|
||||
"createOptions": "--cpus 1",
|
||||
"entrypoint": null,
|
||||
"entryPointArgs": [],
|
||||
"environmentVariables": {},
|
||||
"userMountVolumes": [
|
||||
{
|
||||
|
||||
28
package-lock.json
generated
28
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "hooks",
|
||||
"version": "0.1.3",
|
||||
"version": "0.5.0",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "hooks",
|
||||
"version": "0.1.3",
|
||||
"version": "0.5.0",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@types/jest": "^27.5.1",
|
||||
@@ -1800,9 +1800,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/json5": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
|
||||
"integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
|
||||
"integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"minimist": "^1.2.0"
|
||||
@@ -2625,9 +2625,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/word-wrap": {
|
||||
"version": "1.2.3",
|
||||
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz",
|
||||
"integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==",
|
||||
"version": "1.2.4",
|
||||
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz",
|
||||
"integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
@@ -3926,9 +3926,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"json5": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
|
||||
"integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
|
||||
"integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"minimist": "^1.2.0"
|
||||
@@ -4509,9 +4509,9 @@
|
||||
}
|
||||
},
|
||||
"word-wrap": {
|
||||
"version": "1.2.3",
|
||||
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz",
|
||||
"integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==",
|
||||
"version": "1.2.4",
|
||||
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz",
|
||||
"integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==",
|
||||
"dev": true
|
||||
},
|
||||
"wrappy": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "hooks",
|
||||
"version": "0.2.0",
|
||||
"version": "0.5.0",
|
||||
"description": "Three projects are included - k8s: a kubernetes hook implementation that spins up pods dynamically to run a job - docker: A hook implementation of the runner's docker implementation - A hook lib, which contains shared typescript definitions and utilities that the other packages consume",
|
||||
"main": "",
|
||||
"directories": {
|
||||
|
||||
724
packages/docker/package-lock.json
generated
724
packages/docker/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -13,6 +13,7 @@
|
||||
"@actions/core": "^1.9.1",
|
||||
"@actions/exec": "^1.1.1",
|
||||
"hooklib": "file:../hooklib",
|
||||
"shlex": "^2.1.2",
|
||||
"uuid": "^8.3.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -91,11 +91,12 @@ export async function containerPull(
|
||||
image: string,
|
||||
configLocation: string
|
||||
): Promise<void> {
|
||||
const dockerArgs: string[] = ['pull']
|
||||
const dockerArgs: string[] = []
|
||||
if (configLocation) {
|
||||
dockerArgs.push('--config')
|
||||
dockerArgs.push(configLocation)
|
||||
}
|
||||
dockerArgs.push('pull')
|
||||
dockerArgs.push(image)
|
||||
for (let i = 0; i < 3; i++) {
|
||||
try {
|
||||
@@ -443,7 +444,7 @@ export async function isContainerAlpine(containerId: string): Promise<boolean> {
|
||||
containerId,
|
||||
'sh',
|
||||
'-c',
|
||||
"[ $(cat /etc/*release* | grep -i -e '^ID=*alpine*' -c) != 0 ] || exit 1"
|
||||
`'[ $(cat /etc/*release* | grep -i -e "^ID=*alpine*" -c) != 0 ] || exit 1'`
|
||||
]
|
||||
try {
|
||||
await runDockerCommand(dockerArgs)
|
||||
|
||||
@@ -40,7 +40,7 @@ export async function prepareJob(
|
||||
if (!container?.image) {
|
||||
core.info('No job container provided, skipping')
|
||||
} else {
|
||||
setupContainer(container)
|
||||
setupContainer(container, true)
|
||||
|
||||
const configLocation = await registryLogin(container.registry)
|
||||
try {
|
||||
@@ -174,9 +174,11 @@ function generateResponseFile(
|
||||
writeToResponseFile(responseFile, JSON.stringify(response))
|
||||
}
|
||||
|
||||
function setupContainer(container): void {
|
||||
container.entryPointArgs = [`-f`, `/dev/null`]
|
||||
container.entryPoint = 'tail'
|
||||
function setupContainer(container, jobContainer = false): void {
|
||||
if (!container.entryPoint && jobContainer) {
|
||||
container.entryPointArgs = [`-f`, `/dev/null`]
|
||||
container.entryPoint = 'tail'
|
||||
}
|
||||
}
|
||||
|
||||
function generateNetworkName(): string {
|
||||
|
||||
@@ -16,15 +16,14 @@ import {
|
||||
import { checkEnvironment } from './utils'
|
||||
|
||||
async function run(): Promise<void> {
|
||||
const input = await getInputFromStdin()
|
||||
|
||||
const args = input['args']
|
||||
const command = input['command']
|
||||
const responseFile = input['responseFile']
|
||||
const state = input['state']
|
||||
|
||||
try {
|
||||
checkEnvironment()
|
||||
const input = await getInputFromStdin()
|
||||
|
||||
const args = input['args']
|
||||
const command = input['command']
|
||||
const responseFile = input['responseFile']
|
||||
const state = input['state']
|
||||
switch (command) {
|
||||
case Command.PrepareJob:
|
||||
await prepareJob(args as PrepareJobArgs, responseFile)
|
||||
|
||||
@@ -5,6 +5,7 @@ import * as core from '@actions/core'
|
||||
import { env } from 'process'
|
||||
// Import this way otherwise typescript has errors
|
||||
const exec = require('@actions/exec')
|
||||
const shlex = require('shlex')
|
||||
|
||||
export interface RunDockerCommandOptions {
|
||||
workingDir?: string
|
||||
@@ -17,6 +18,7 @@ export async function runDockerCommand(
|
||||
options?: RunDockerCommandOptions
|
||||
): Promise<string> {
|
||||
options = optionsWithDockerEnvs(options)
|
||||
args = fixArgs(args)
|
||||
const pipes = await exec.getExecOutput('docker', args, options)
|
||||
if (pipes.exitCode !== 0) {
|
||||
core.error(`Docker failed with exit code ${pipes.exitCode}`)
|
||||
@@ -84,6 +86,10 @@ export function sanitize(val: string): string {
|
||||
return newNameBuilder.join('')
|
||||
}
|
||||
|
||||
export function fixArgs(args: string[]): string[] {
|
||||
return shlex.split(args.join(' '))
|
||||
}
|
||||
|
||||
export function checkEnvironment(): void {
|
||||
if (!env.GITHUB_WORKSPACE) {
|
||||
throw new Error('GITHUB_WORKSPACE is not set')
|
||||
|
||||
@@ -40,21 +40,36 @@ describe('run script step', () => {
|
||||
definitions.runScriptStep.args.entryPoint = '/bin/bash'
|
||||
definitions.runScriptStep.args.entryPointArgs = [
|
||||
'-c',
|
||||
`if [[ ! $(env | grep "^PATH=") = "PATH=${definitions.runScriptStep.args.prependPath}:"* ]]; then exit 1; fi`
|
||||
`'if [[ ! $(env | grep "^PATH=") = "PATH=${definitions.runScriptStep.args.prependPath}:"* ]]; then exit 1; fi'`
|
||||
]
|
||||
await expect(
|
||||
runScriptStep(definitions.runScriptStep.args, prepareJobResponse.state)
|
||||
).resolves.not.toThrow()
|
||||
})
|
||||
|
||||
it("Should fix expansion and print correctly in container's stdout", async () => {
|
||||
const spy = jest.spyOn(process.stdout, 'write').mockImplementation()
|
||||
|
||||
definitions.runScriptStep.args.entryPoint = 'echo'
|
||||
definitions.runScriptStep.args.entryPointArgs = ['"Mona', 'the', `Octocat"`]
|
||||
await expect(
|
||||
runScriptStep(definitions.runScriptStep.args, prepareJobResponse.state)
|
||||
).resolves.not.toThrow()
|
||||
expect(spy).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Mona the Octocat')
|
||||
)
|
||||
|
||||
spy.mockRestore()
|
||||
})
|
||||
|
||||
it('Should have path variable changed in container with prepend path string array', async () => {
|
||||
definitions.runScriptStep.args.prependPath = ['/some/other/path']
|
||||
definitions.runScriptStep.args.entryPoint = '/bin/bash'
|
||||
definitions.runScriptStep.args.entryPointArgs = [
|
||||
'-c',
|
||||
`if [[ ! $(env | grep "^PATH=") = "PATH=${definitions.runScriptStep.args.prependPath.join(
|
||||
`'if [[ ! $(env | grep "^PATH=") = "PATH=${definitions.runScriptStep.args.prependPath.join(
|
||||
':'
|
||||
)}:"* ]]; then exit 1; fi`
|
||||
)}:"* ]]; then exit 1; fi'`
|
||||
]
|
||||
await expect(
|
||||
runScriptStep(definitions.runScriptStep.args, prepareJobResponse.state)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { optionsWithDockerEnvs, sanitize } from '../src/utils'
|
||||
import { optionsWithDockerEnvs, sanitize, fixArgs } from '../src/utils'
|
||||
|
||||
describe('Utilities', () => {
|
||||
it('should return sanitized image name', () => {
|
||||
@@ -10,6 +10,37 @@ describe('Utilities', () => {
|
||||
expect(sanitize(validStr)).toBe(validStr)
|
||||
})
|
||||
|
||||
test.each([
|
||||
[['"Hello', 'World"'], ['Hello World']],
|
||||
[
|
||||
[
|
||||
'sh',
|
||||
'-c',
|
||||
`'[ $(cat /etc/*release* | grep -i -e "^ID=*alpine*" -c) != 0 ] || exit 1'`
|
||||
],
|
||||
[
|
||||
'sh',
|
||||
'-c',
|
||||
`[ $(cat /etc/*release* | grep -i -e "^ID=*alpine*" -c) != 0 ] || exit 1`
|
||||
]
|
||||
],
|
||||
[
|
||||
[
|
||||
'sh',
|
||||
'-c',
|
||||
`'[ $(cat /etc/*release* | grep -i -e '\\''^ID=*alpine*'\\'' -c) != 0 ] || exit 1'`
|
||||
],
|
||||
[
|
||||
'sh',
|
||||
'-c',
|
||||
`[ $(cat /etc/*release* | grep -i -e '^ID=*alpine*' -c) != 0 ] || exit 1`
|
||||
]
|
||||
]
|
||||
])('should fix split arguments(%p, %p)', (args, expected) => {
|
||||
const got = fixArgs(args)
|
||||
expect(got).toStrictEqual(expected)
|
||||
})
|
||||
|
||||
describe('with docker options', () => {
|
||||
it('should augment options with docker environment variables', () => {
|
||||
process.env.DOCKER_HOST = 'unix:///run/user/1001/docker.sock'
|
||||
|
||||
36
packages/hooklib/package-lock.json
generated
36
packages/hooklib/package-lock.json
generated
@@ -1742,9 +1742,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/json5": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
|
||||
"integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
|
||||
"integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"minimist": "^1.2.0"
|
||||
@@ -2215,9 +2215,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/semver": {
|
||||
"version": "7.3.7",
|
||||
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz",
|
||||
"integrity": "sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==",
|
||||
"version": "7.5.4",
|
||||
"resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
|
||||
"integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"lru-cache": "^6.0.0"
|
||||
@@ -2532,9 +2532,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/word-wrap": {
|
||||
"version": "1.2.3",
|
||||
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz",
|
||||
"integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==",
|
||||
"version": "1.2.4",
|
||||
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz",
|
||||
"integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
@@ -3789,9 +3789,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"json5": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
|
||||
"integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
|
||||
"integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"minimist": "^1.2.0"
|
||||
@@ -4119,9 +4119,9 @@
|
||||
}
|
||||
},
|
||||
"semver": {
|
||||
"version": "7.3.7",
|
||||
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz",
|
||||
"integrity": "sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==",
|
||||
"version": "7.5.4",
|
||||
"resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
|
||||
"integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"lru-cache": "^6.0.0"
|
||||
@@ -4344,9 +4344,9 @@
|
||||
}
|
||||
},
|
||||
"word-wrap": {
|
||||
"version": "1.2.3",
|
||||
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz",
|
||||
"integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==",
|
||||
"version": "1.2.4",
|
||||
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz",
|
||||
"integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==",
|
||||
"dev": true
|
||||
},
|
||||
"wrappy": {
|
||||
|
||||
1551
packages/k8s/package-lock.json
generated
1551
packages/k8s/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -16,8 +16,10 @@
|
||||
"@actions/core": "^1.9.1",
|
||||
"@actions/exec": "^1.1.1",
|
||||
"@actions/io": "^1.1.2",
|
||||
"@kubernetes/client-node": "^0.16.3",
|
||||
"hooklib": "file:../hooklib"
|
||||
"@kubernetes/client-node": "^0.18.1",
|
||||
"hooklib": "file:../hooklib",
|
||||
"js-yaml": "^4.1.0",
|
||||
"shlex": "^2.1.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/jest": "^27.4.1",
|
||||
|
||||
@@ -42,6 +42,7 @@ export function getSecretName(): string {
|
||||
export const MAX_POD_NAME_LENGTH = 63
|
||||
export const STEP_POD_NAME_SUFFIX_LENGTH = 8
|
||||
export const JOB_CONTAINER_NAME = 'job'
|
||||
export const JOB_CONTAINER_EXTENSION_NAME = '$job'
|
||||
|
||||
export class RunnerInstanceLabel {
|
||||
private podName: string
|
||||
|
||||
@@ -1,25 +1,35 @@
|
||||
import * as core from '@actions/core'
|
||||
import * as io from '@actions/io'
|
||||
import * as k8s from '@kubernetes/client-node'
|
||||
import { ContextPorts, prepareJobArgs, writeToResponseFile } from 'hooklib'
|
||||
import {
|
||||
JobContainerInfo,
|
||||
ContextPorts,
|
||||
PrepareJobArgs,
|
||||
writeToResponseFile
|
||||
} from 'hooklib'
|
||||
import path from 'path'
|
||||
import {
|
||||
containerPorts,
|
||||
createPod,
|
||||
isPodContainerAlpine,
|
||||
prunePods,
|
||||
waitForPodPhases
|
||||
waitForPodPhases,
|
||||
getPrepareJobTimeoutSeconds
|
||||
} from '../k8s'
|
||||
import {
|
||||
containerVolumes,
|
||||
DEFAULT_CONTAINER_ENTRY_POINT,
|
||||
DEFAULT_CONTAINER_ENTRY_POINT_ARGS,
|
||||
PodPhase
|
||||
generateContainerName,
|
||||
mergeContainerWithOptions,
|
||||
readExtensionFromFile,
|
||||
PodPhase,
|
||||
fixArgs
|
||||
} from '../k8s/utils'
|
||||
import { JOB_CONTAINER_NAME } from './constants'
|
||||
import { JOB_CONTAINER_EXTENSION_NAME, JOB_CONTAINER_NAME } from './constants'
|
||||
|
||||
export async function prepareJob(
|
||||
args: prepareJobArgs,
|
||||
args: PrepareJobArgs,
|
||||
responseFile
|
||||
): Promise<void> {
|
||||
if (!args.container) {
|
||||
@@ -27,26 +37,46 @@ export async function prepareJob(
|
||||
}
|
||||
|
||||
await prunePods()
|
||||
|
||||
const extension = readExtensionFromFile()
|
||||
await copyExternalsToRoot()
|
||||
|
||||
let container: k8s.V1Container | undefined = undefined
|
||||
if (args.container?.image) {
|
||||
core.debug(`Using image '${args.container.image}' for job image`)
|
||||
container = createPodSpec(args.container, JOB_CONTAINER_NAME, true)
|
||||
container = createContainerSpec(
|
||||
args.container,
|
||||
JOB_CONTAINER_NAME,
|
||||
true,
|
||||
extension
|
||||
)
|
||||
}
|
||||
|
||||
let services: k8s.V1Container[] = []
|
||||
if (args.services?.length) {
|
||||
services = args.services.map(service => {
|
||||
core.debug(`Adding service '${service.image}' to pod definition`)
|
||||
return createPodSpec(service, service.image.split(':')[0])
|
||||
return createContainerSpec(
|
||||
service,
|
||||
generateContainerName(service.image),
|
||||
false,
|
||||
undefined
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
if (!container && !services?.length) {
|
||||
throw new Error('No containers exist, skipping hook invocation')
|
||||
}
|
||||
|
||||
let createdPod: k8s.V1Pod | undefined = undefined
|
||||
try {
|
||||
createdPod = await createPod(container, services, args.container.registry)
|
||||
createdPod = await createPod(
|
||||
container,
|
||||
services,
|
||||
args.container.registry,
|
||||
extension
|
||||
)
|
||||
} catch (err) {
|
||||
await prunePods()
|
||||
throw new Error(`failed to create job pod: ${err}`)
|
||||
@@ -63,7 +93,8 @@ export async function prepareJob(
|
||||
await waitForPodPhases(
|
||||
createdPod.metadata.name,
|
||||
new Set([PodPhase.RUNNING]),
|
||||
new Set([PodPhase.PENDING])
|
||||
new Set([PodPhase.PENDING]),
|
||||
getPrepareJobTimeoutSeconds()
|
||||
)
|
||||
} catch (err) {
|
||||
await prunePods()
|
||||
@@ -124,13 +155,11 @@ function generateResponseFile(
|
||||
)
|
||||
if (serviceContainers?.length) {
|
||||
response.context['services'] = serviceContainers.map(c => {
|
||||
if (!c.ports) {
|
||||
return
|
||||
}
|
||||
|
||||
const ctxPorts: ContextPorts = {}
|
||||
for (const port of c.ports) {
|
||||
ctxPorts[port.containerPort] = port.hostPort
|
||||
if (c.ports?.length) {
|
||||
for (const port of c.ports) {
|
||||
ctxPorts[port.containerPort] = port.hostPort
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -153,10 +182,11 @@ async function copyExternalsToRoot(): Promise<void> {
|
||||
}
|
||||
}
|
||||
|
||||
function createPodSpec(
|
||||
container,
|
||||
export function createContainerSpec(
|
||||
container: JobContainerInfo,
|
||||
name: string,
|
||||
jobContainer = false
|
||||
jobContainer = false,
|
||||
extension?: k8s.V1PodTemplateSpec
|
||||
): k8s.V1Container {
|
||||
if (!container.entryPoint && jobContainer) {
|
||||
container.entryPoint = DEFAULT_CONTAINER_ENTRY_POINT
|
||||
@@ -166,14 +196,20 @@ function createPodSpec(
|
||||
const podContainer = {
|
||||
name,
|
||||
image: container.image,
|
||||
command: [container.entryPoint],
|
||||
args: container.entryPointArgs,
|
||||
ports: containerPorts(container)
|
||||
} as k8s.V1Container
|
||||
if (container.workingDirectory) {
|
||||
podContainer.workingDir = container.workingDirectory
|
||||
}
|
||||
|
||||
if (container.entryPoint) {
|
||||
podContainer.command = [container.entryPoint]
|
||||
}
|
||||
|
||||
if (container.entryPointArgs?.length > 0) {
|
||||
podContainer.args = fixArgs(container.entryPointArgs)
|
||||
}
|
||||
|
||||
podContainer.env = []
|
||||
for (const [key, value] of Object.entries(
|
||||
container['environmentVariables']
|
||||
@@ -188,5 +224,17 @@ function createPodSpec(
|
||||
jobContainer
|
||||
)
|
||||
|
||||
if (!extension) {
|
||||
return podContainer
|
||||
}
|
||||
|
||||
const from = extension.spec?.containers?.find(
|
||||
c => c.name === JOB_CONTAINER_EXTENSION_NAME
|
||||
)
|
||||
|
||||
if (from) {
|
||||
mergeContainerWithOptions(podContainer, from)
|
||||
}
|
||||
|
||||
return podContainer
|
||||
}
|
||||
|
||||
@@ -12,12 +12,12 @@ import {
|
||||
} from '../k8s'
|
||||
import {
|
||||
containerVolumes,
|
||||
DEFAULT_CONTAINER_ENTRY_POINT,
|
||||
DEFAULT_CONTAINER_ENTRY_POINT_ARGS,
|
||||
PodPhase,
|
||||
writeEntryPointScript
|
||||
mergeContainerWithOptions,
|
||||
readExtensionFromFile,
|
||||
fixArgs
|
||||
} from '../k8s/utils'
|
||||
import { JOB_CONTAINER_NAME } from './constants'
|
||||
import { JOB_CONTAINER_EXTENSION_NAME, JOB_CONTAINER_NAME } from './constants'
|
||||
|
||||
export async function runContainerStep(
|
||||
stepContainer: RunContainerStepArgs
|
||||
@@ -31,10 +31,12 @@ export async function runContainerStep(
|
||||
secretName = await createSecretForEnvs(stepContainer.environmentVariables)
|
||||
}
|
||||
|
||||
core.debug(`Created secret ${secretName} for container job envs`)
|
||||
const container = createPodSpec(stepContainer, secretName)
|
||||
const extension = readExtensionFromFile()
|
||||
|
||||
const job = await createJob(container)
|
||||
core.debug(`Created secret ${secretName} for container job envs`)
|
||||
const container = createContainerSpec(stepContainer, secretName, extension)
|
||||
|
||||
const job = await createJob(container, extension)
|
||||
if (!job.metadata?.name) {
|
||||
throw new Error(
|
||||
`Expected job ${JSON.stringify(
|
||||
@@ -75,24 +77,21 @@ export async function runContainerStep(
|
||||
return Number(exitCode) || 1
|
||||
}
|
||||
|
||||
function createPodSpec(
|
||||
function createContainerSpec(
|
||||
container: RunContainerStepArgs,
|
||||
secretName?: string
|
||||
secretName?: string,
|
||||
extension?: k8s.V1PodTemplateSpec
|
||||
): k8s.V1Container {
|
||||
const podContainer = new k8s.V1Container()
|
||||
podContainer.name = JOB_CONTAINER_NAME
|
||||
podContainer.image = container.image
|
||||
|
||||
const { entryPoint, entryPointArgs } = container
|
||||
container.entryPoint = 'sh'
|
||||
|
||||
const { containerPath } = writeEntryPointScript(
|
||||
container.workingDirectory,
|
||||
entryPoint || DEFAULT_CONTAINER_ENTRY_POINT,
|
||||
entryPoint ? entryPointArgs || [] : DEFAULT_CONTAINER_ENTRY_POINT_ARGS
|
||||
)
|
||||
container.entryPointArgs = ['-e', containerPath]
|
||||
podContainer.command = [container.entryPoint, ...container.entryPointArgs]
|
||||
podContainer.workingDir = container.workingDirectory
|
||||
podContainer.command = container.entryPoint
|
||||
? [container.entryPoint]
|
||||
: undefined
|
||||
podContainer.args = container.entryPointArgs?.length
|
||||
? fixArgs(container.entryPointArgs)
|
||||
: undefined
|
||||
|
||||
if (secretName) {
|
||||
podContainer.envFrom = [
|
||||
@@ -106,5 +105,16 @@ function createPodSpec(
|
||||
}
|
||||
podContainer.volumeMounts = containerVolumes(undefined, false, true)
|
||||
|
||||
if (!extension) {
|
||||
return podContainer
|
||||
}
|
||||
|
||||
const from = extension.spec?.containers?.find(
|
||||
c => c.name === JOB_CONTAINER_EXTENSION_NAME
|
||||
)
|
||||
if (from) {
|
||||
mergeContainerWithOptions(podContainer, from)
|
||||
}
|
||||
|
||||
return podContainer
|
||||
}
|
||||
|
||||
@@ -9,15 +9,13 @@ import {
|
||||
import { isAuthPermissionsOK, namespace, requiredPermissions } from './k8s'
|
||||
|
||||
async function run(): Promise<void> {
|
||||
const input = await getInputFromStdin()
|
||||
|
||||
const args = input['args']
|
||||
const command = input['command']
|
||||
const responseFile = input['responseFile']
|
||||
const state = input['state']
|
||||
|
||||
let exitCode = 0
|
||||
try {
|
||||
const input = await getInputFromStdin()
|
||||
|
||||
const args = input['args']
|
||||
const command = input['command']
|
||||
const responseFile = input['responseFile']
|
||||
const state = input['state']
|
||||
if (!(await isAuthPermissionsOK())) {
|
||||
throw new Error(
|
||||
`The Service account needs the following permissions ${JSON.stringify(
|
||||
@@ -25,28 +23,28 @@ async function run(): Promise<void> {
|
||||
)} on the pod resource in the '${namespace()}' namespace. Please contact your self hosted runner administrator.`
|
||||
)
|
||||
}
|
||||
|
||||
let exitCode = 0
|
||||
switch (command) {
|
||||
case Command.PrepareJob:
|
||||
await prepareJob(args as prepareJobArgs, responseFile)
|
||||
break
|
||||
return process.exit(0)
|
||||
case Command.CleanupJob:
|
||||
await cleanupJob()
|
||||
break
|
||||
return process.exit(0)
|
||||
case Command.RunScriptStep:
|
||||
await runScriptStep(args, state, null)
|
||||
break
|
||||
return process.exit(0)
|
||||
case Command.RunContainerStep:
|
||||
exitCode = await runContainerStep(args)
|
||||
break
|
||||
case Command.runContainerStep:
|
||||
return process.exit(exitCode)
|
||||
default:
|
||||
throw new Error(`Command not recognized: ${command}`)
|
||||
}
|
||||
} catch (error) {
|
||||
core.error(error as Error)
|
||||
exitCode = 1
|
||||
process.exit(1)
|
||||
}
|
||||
process.exitCode = exitCode
|
||||
}
|
||||
|
||||
void run()
|
||||
|
||||
@@ -10,7 +10,12 @@ import {
|
||||
getVolumeClaimName,
|
||||
RunnerInstanceLabel
|
||||
} from '../hooks/constants'
|
||||
import { PodPhase } from './utils'
|
||||
import {
|
||||
PodPhase,
|
||||
mergePodSpecWithOptions,
|
||||
mergeObjectMeta,
|
||||
useKubeScheduler
|
||||
} from './utils'
|
||||
|
||||
const kc = new k8s.KubeConfig()
|
||||
|
||||
@@ -20,6 +25,8 @@ const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
|
||||
const k8sBatchV1Api = kc.makeApiClient(k8s.BatchV1Api)
|
||||
const k8sAuthorizationV1Api = kc.makeApiClient(k8s.AuthorizationV1Api)
|
||||
|
||||
const DEFAULT_WAIT_FOR_POD_TIME_SECONDS = 10 * 60 // 10 min
|
||||
|
||||
export const POD_VOLUME_NAME = 'work'
|
||||
|
||||
export const requiredPermissions = [
|
||||
@@ -58,7 +65,8 @@ export const requiredPermissions = [
|
||||
export async function createPod(
|
||||
jobContainer?: k8s.V1Container,
|
||||
services?: k8s.V1Container[],
|
||||
registry?: Registry
|
||||
registry?: Registry,
|
||||
extension?: k8s.V1PodTemplateSpec
|
||||
): Promise<k8s.V1Pod> {
|
||||
const containers: k8s.V1Container[] = []
|
||||
if (jobContainer) {
|
||||
@@ -80,11 +88,16 @@ export async function createPod(
|
||||
appPod.metadata.labels = {
|
||||
[instanceLabel.key]: instanceLabel.value
|
||||
}
|
||||
appPod.metadata.annotations = {}
|
||||
|
||||
appPod.spec = new k8s.V1PodSpec()
|
||||
appPod.spec.containers = containers
|
||||
appPod.spec.restartPolicy = 'Never'
|
||||
appPod.spec.nodeName = await getCurrentNodeName()
|
||||
|
||||
if (!useKubeScheduler()) {
|
||||
appPod.spec.nodeName = await getCurrentNodeName()
|
||||
}
|
||||
|
||||
const claimName = getVolumeClaimName()
|
||||
appPod.spec.volumes = [
|
||||
{
|
||||
@@ -103,12 +116,21 @@ export async function createPod(
|
||||
appPod.spec.imagePullSecrets = [secretReference]
|
||||
}
|
||||
|
||||
if (extension?.metadata) {
|
||||
mergeObjectMeta(appPod, extension.metadata)
|
||||
}
|
||||
|
||||
if (extension?.spec) {
|
||||
mergePodSpecWithOptions(appPod.spec, extension.spec)
|
||||
}
|
||||
|
||||
const { body } = await k8sApi.createNamespacedPod(namespace(), appPod)
|
||||
return body
|
||||
}
|
||||
|
||||
export async function createJob(
|
||||
container: k8s.V1Container
|
||||
container: k8s.V1Container,
|
||||
extension?: k8s.V1PodTemplateSpec
|
||||
): Promise<k8s.V1Job> {
|
||||
const runnerInstanceLabel = new RunnerInstanceLabel()
|
||||
|
||||
@@ -118,6 +140,7 @@ export async function createJob(
|
||||
job.metadata = new k8s.V1ObjectMeta()
|
||||
job.metadata.name = getStepPodName()
|
||||
job.metadata.labels = { [runnerInstanceLabel.key]: runnerInstanceLabel.value }
|
||||
job.metadata.annotations = {}
|
||||
|
||||
job.spec = new k8s.V1JobSpec()
|
||||
job.spec.ttlSecondsAfterFinished = 300
|
||||
@@ -125,9 +148,15 @@ export async function createJob(
|
||||
job.spec.template = new k8s.V1PodTemplateSpec()
|
||||
|
||||
job.spec.template.spec = new k8s.V1PodSpec()
|
||||
job.spec.template.metadata = new k8s.V1ObjectMeta()
|
||||
job.spec.template.metadata.labels = {}
|
||||
job.spec.template.metadata.annotations = {}
|
||||
job.spec.template.spec.containers = [container]
|
||||
job.spec.template.spec.restartPolicy = 'Never'
|
||||
job.spec.template.spec.nodeName = await getCurrentNodeName()
|
||||
|
||||
if (!useKubeScheduler()) {
|
||||
job.spec.template.spec.nodeName = await getCurrentNodeName()
|
||||
}
|
||||
|
||||
const claimName = getVolumeClaimName()
|
||||
job.spec.template.spec.volumes = [
|
||||
@@ -137,6 +166,17 @@ export async function createJob(
|
||||
}
|
||||
]
|
||||
|
||||
if (extension) {
|
||||
if (extension.metadata) {
|
||||
// apply metadata both to the job and the pod created by the job
|
||||
mergeObjectMeta(job, extension.metadata)
|
||||
mergeObjectMeta(job.spec.template, extension.metadata)
|
||||
}
|
||||
if (extension.spec) {
|
||||
mergePodSpecWithOptions(job.spec.template.spec, extension.spec)
|
||||
}
|
||||
}
|
||||
|
||||
const { body } = await k8sBatchV1Api.createNamespacedJob(namespace(), job)
|
||||
return body
|
||||
}
|
||||
@@ -320,7 +360,7 @@ export async function waitForPodPhases(
|
||||
podName: string,
|
||||
awaitingPhases: Set<PodPhase>,
|
||||
backOffPhases: Set<PodPhase>,
|
||||
maxTimeSeconds = 10 * 60 // 10 min
|
||||
maxTimeSeconds = DEFAULT_WAIT_FOR_POD_TIME_SECONDS
|
||||
): Promise<void> {
|
||||
const backOffManager = new BackOffManager(maxTimeSeconds)
|
||||
let phase: PodPhase = PodPhase.UNKNOWN
|
||||
@@ -343,6 +383,25 @@ export async function waitForPodPhases(
|
||||
}
|
||||
}
|
||||
|
||||
export function getPrepareJobTimeoutSeconds(): number {
|
||||
const envTimeoutSeconds =
|
||||
process.env['ACTIONS_RUNNER_PREPARE_JOB_TIMEOUT_SECONDS']
|
||||
|
||||
if (!envTimeoutSeconds) {
|
||||
return DEFAULT_WAIT_FOR_POD_TIME_SECONDS
|
||||
}
|
||||
|
||||
const timeoutSeconds = parseInt(envTimeoutSeconds, 10)
|
||||
if (!timeoutSeconds || timeoutSeconds <= 0) {
|
||||
core.warning(
|
||||
`Prepare job timeout is invalid ("${timeoutSeconds}"): use an int > 0`
|
||||
)
|
||||
return DEFAULT_WAIT_FOR_POD_TIME_SECONDS
|
||||
}
|
||||
|
||||
return timeoutSeconds
|
||||
}
|
||||
|
||||
async function getPodPhase(podName: string): Promise<PodPhase> {
|
||||
const podPhaseLookup = new Set<string>([
|
||||
PodPhase.PENDING,
|
||||
@@ -452,7 +511,7 @@ export async function isPodContainerAlpine(
|
||||
[
|
||||
'sh',
|
||||
'-c',
|
||||
"[ $(cat /etc/*release* | grep -i -e '^ID=*alpine*' -c) != 0 ] || exit 1"
|
||||
`'[ $(cat /etc/*release* | grep -i -e "^ID=*alpine*" -c) != 0 ] || exit 1'`
|
||||
],
|
||||
podName,
|
||||
containerName
|
||||
@@ -473,6 +532,7 @@ async function getCurrentNodeName(): Promise<string> {
|
||||
}
|
||||
return nodeName
|
||||
}
|
||||
|
||||
export function namespace(): string {
|
||||
if (process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE']) {
|
||||
return process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE']
|
||||
@@ -517,6 +577,9 @@ export function containerPorts(
|
||||
container: ContainerInfo
|
||||
): k8s.V1ContainerPort[] {
|
||||
const ports: k8s.V1ContainerPort[] = []
|
||||
if (!container.portMappings?.length) {
|
||||
return ports
|
||||
}
|
||||
for (const portDefinition of container.portMappings) {
|
||||
const portProtoSplit = portDefinition.split('/')
|
||||
if (portProtoSplit.length > 2) {
|
||||
@@ -551,3 +614,8 @@ export function containerPorts(
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
export async function getPodByName(name): Promise<k8s.V1Pod> {
|
||||
const { body } = await k8sApi.readNamespacedPod(name, namespace())
|
||||
return body
|
||||
}
|
||||
|
||||
@@ -1,13 +1,20 @@
|
||||
import * as k8s from '@kubernetes/client-node'
|
||||
import * as fs from 'fs'
|
||||
import * as yaml from 'js-yaml'
|
||||
import * as core from '@actions/core'
|
||||
import { Mount } from 'hooklib'
|
||||
import * as path from 'path'
|
||||
import { v1 as uuidv4 } from 'uuid'
|
||||
import { POD_VOLUME_NAME } from './index'
|
||||
import { JOB_CONTAINER_EXTENSION_NAME } from '../hooks/constants'
|
||||
import * as shlex from 'shlex'
|
||||
|
||||
export const DEFAULT_CONTAINER_ENTRY_POINT_ARGS = [`-f`, `/dev/null`]
|
||||
export const DEFAULT_CONTAINER_ENTRY_POINT = 'tail'
|
||||
|
||||
export const ENV_HOOK_TEMPLATE_PATH = 'ACTIONS_RUNNER_CONTAINER_HOOK_TEMPLATE'
|
||||
export const ENV_USE_KUBE_SCHEDULER = 'ACTIONS_RUNNER_USE_KUBE_SCHEDULER'
|
||||
|
||||
export function containerVolumes(
|
||||
userMountVolumes: Mount[] = [],
|
||||
jobContainer = true,
|
||||
@@ -111,11 +118,22 @@ export function writeEntryPointScript(
|
||||
if (environmentVariables && Object.entries(environmentVariables).length) {
|
||||
const envBuffer: string[] = []
|
||||
for (const [key, value] of Object.entries(environmentVariables)) {
|
||||
if (
|
||||
key.includes(`=`) ||
|
||||
key.includes(`'`) ||
|
||||
key.includes(`"`) ||
|
||||
key.includes(`$`)
|
||||
) {
|
||||
throw new Error(
|
||||
`environment key ${key} is invalid - the key must not contain =, $, ', or "`
|
||||
)
|
||||
}
|
||||
envBuffer.push(
|
||||
`"${key}=${value
|
||||
.replace(/\\/g, '\\\\')
|
||||
.replace(/"/g, '\\"')
|
||||
.replace(/=/g, '\\=')}"`
|
||||
.replace(/\$/g, '\\$')
|
||||
.replace(/`/g, '\\`')}"`
|
||||
)
|
||||
}
|
||||
environmentPrefix = `env ${envBuffer.join(' ')} `
|
||||
@@ -137,6 +155,117 @@ exec ${environmentPrefix} ${entryPoint} ${
|
||||
}
|
||||
}
|
||||
|
||||
export function generateContainerName(image: string): string {
|
||||
const nameWithTag = image.split('/').pop()
|
||||
const name = nameWithTag?.split(':').at(0)
|
||||
|
||||
if (!name) {
|
||||
throw new Error(`Image definition '${image}' is invalid`)
|
||||
}
|
||||
|
||||
return name
|
||||
}
|
||||
|
||||
// Overwrite or append based on container options
|
||||
//
|
||||
// Keep in mind, envs and volumes could be passed as fields in container definition
|
||||
// so default volume mounts and envs are appended first, and then create options are used
|
||||
// to append more values
|
||||
//
|
||||
// Rest of the fields are just applied
|
||||
// For example, container.createOptions.container.image is going to overwrite container.image field
|
||||
export function mergeContainerWithOptions(
|
||||
base: k8s.V1Container,
|
||||
from: k8s.V1Container
|
||||
): void {
|
||||
for (const [key, value] of Object.entries(from)) {
|
||||
if (key === 'name') {
|
||||
if (value !== base.name && value !== JOB_CONTAINER_EXTENSION_NAME) {
|
||||
core.warning("Skipping name override: name can't be overwritten")
|
||||
}
|
||||
continue
|
||||
} else if (key === 'image') {
|
||||
core.warning("Skipping image override: image can't be overwritten")
|
||||
continue
|
||||
} else if (key === 'env') {
|
||||
const envs = value as k8s.V1EnvVar[]
|
||||
base.env = mergeLists(base.env, envs)
|
||||
} else if (key === 'volumeMounts' && value) {
|
||||
const volumeMounts = value as k8s.V1VolumeMount[]
|
||||
base.volumeMounts = mergeLists(base.volumeMounts, volumeMounts)
|
||||
} else if (key === 'ports' && value) {
|
||||
const ports = value as k8s.V1ContainerPort[]
|
||||
base.ports = mergeLists(base.ports, ports)
|
||||
} else {
|
||||
base[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function mergePodSpecWithOptions(
|
||||
base: k8s.V1PodSpec,
|
||||
from: k8s.V1PodSpec
|
||||
): void {
|
||||
for (const [key, value] of Object.entries(from)) {
|
||||
if (key === 'containers') {
|
||||
base.containers.push(
|
||||
...from.containers.filter(e => !e.name?.startsWith('$'))
|
||||
)
|
||||
} else if (key === 'volumes' && value) {
|
||||
const volumes = value as k8s.V1Volume[]
|
||||
base.volumes = mergeLists(base.volumes, volumes)
|
||||
} else {
|
||||
base[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function mergeObjectMeta(
|
||||
base: { metadata?: k8s.V1ObjectMeta },
|
||||
from: k8s.V1ObjectMeta
|
||||
): void {
|
||||
if (!base.metadata?.labels || !base.metadata?.annotations) {
|
||||
throw new Error(
|
||||
"Can't merge metadata: base.metadata or base.annotations field is undefined"
|
||||
)
|
||||
}
|
||||
if (from?.labels) {
|
||||
for (const [key, value] of Object.entries(from.labels)) {
|
||||
if (base.metadata?.labels?.[key]) {
|
||||
core.warning(`Label ${key} is already defined and will be overwritten`)
|
||||
}
|
||||
base.metadata.labels[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
if (from?.annotations) {
|
||||
for (const [key, value] of Object.entries(from.annotations)) {
|
||||
if (base.metadata?.annotations?.[key]) {
|
||||
core.warning(
|
||||
`Annotation ${key} is already defined and will be overwritten`
|
||||
)
|
||||
}
|
||||
base.metadata.annotations[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function readExtensionFromFile(): k8s.V1PodTemplateSpec | undefined {
|
||||
const filePath = process.env[ENV_HOOK_TEMPLATE_PATH]
|
||||
if (!filePath) {
|
||||
return undefined
|
||||
}
|
||||
const doc = yaml.load(fs.readFileSync(filePath, 'utf8'))
|
||||
if (!doc || typeof doc !== 'object') {
|
||||
throw new Error(`Failed to parse ${filePath}`)
|
||||
}
|
||||
return doc as k8s.V1PodTemplateSpec
|
||||
}
|
||||
|
||||
export function useKubeScheduler(): boolean {
|
||||
return process.env[ENV_USE_KUBE_SCHEDULER] === 'true'
|
||||
}
|
||||
|
||||
export enum PodPhase {
|
||||
PENDING = 'Pending',
|
||||
RUNNING = 'Running',
|
||||
@@ -145,3 +274,16 @@ export enum PodPhase {
|
||||
UNKNOWN = 'Unknown',
|
||||
COMPLETED = 'Completed'
|
||||
}
|
||||
|
||||
function mergeLists<T>(base?: T[], from?: T[]): T[] {
|
||||
const b: T[] = base || []
|
||||
if (!from?.length) {
|
||||
return b
|
||||
}
|
||||
b.push(...from)
|
||||
return b
|
||||
}
|
||||
|
||||
export function fixArgs(args: string[]): string[] {
|
||||
return shlex.split(args.join(' '))
|
||||
}
|
||||
|
||||
@@ -1,6 +1,15 @@
|
||||
import * as fs from 'fs'
|
||||
import * as fs from 'fs'
|
||||
import { containerPorts, POD_VOLUME_NAME } from '../src/k8s'
|
||||
import { containerVolumes, writeEntryPointScript } from '../src/k8s/utils'
|
||||
import {
|
||||
containerVolumes,
|
||||
generateContainerName,
|
||||
writeEntryPointScript,
|
||||
mergePodSpecWithOptions,
|
||||
mergeContainerWithOptions,
|
||||
readExtensionFromFile,
|
||||
ENV_HOOK_TEMPLATE_PATH
|
||||
} from '../src/k8s/utils'
|
||||
import * as k8s from '@kubernetes/client-node'
|
||||
import { TestHelper } from './test-setup'
|
||||
|
||||
let testHelper: TestHelper
|
||||
@@ -45,6 +54,81 @@ describe('k8s utils', () => {
|
||||
).toThrow()
|
||||
})
|
||||
|
||||
it('should throw if environment variable name contains double quote', () => {
|
||||
expect(() =>
|
||||
writeEntryPointScript(
|
||||
'/test',
|
||||
'sh',
|
||||
['-e', 'script.sh'],
|
||||
['/prepend/path'],
|
||||
{
|
||||
'SOME"_ENV': 'SOME_VALUE'
|
||||
}
|
||||
)
|
||||
).toThrow()
|
||||
})
|
||||
|
||||
it('should throw if environment variable name contains =', () => {
|
||||
expect(() =>
|
||||
writeEntryPointScript(
|
||||
'/test',
|
||||
'sh',
|
||||
['-e', 'script.sh'],
|
||||
['/prepend/path'],
|
||||
{
|
||||
'SOME=ENV': 'SOME_VALUE'
|
||||
}
|
||||
)
|
||||
).toThrow()
|
||||
})
|
||||
|
||||
it('should throw if environment variable name contains single quote', () => {
|
||||
expect(() =>
|
||||
writeEntryPointScript(
|
||||
'/test',
|
||||
'sh',
|
||||
['-e', 'script.sh'],
|
||||
['/prepend/path'],
|
||||
{
|
||||
"SOME'_ENV": 'SOME_VALUE'
|
||||
}
|
||||
)
|
||||
).toThrow()
|
||||
})
|
||||
|
||||
it('should throw if environment variable name contains dollar', () => {
|
||||
expect(() =>
|
||||
writeEntryPointScript(
|
||||
'/test',
|
||||
'sh',
|
||||
['-e', 'script.sh'],
|
||||
['/prepend/path'],
|
||||
{
|
||||
SOME_$_ENV: 'SOME_VALUE'
|
||||
}
|
||||
)
|
||||
).toThrow()
|
||||
})
|
||||
|
||||
it('should escape double quote, dollar and backslash in environment variable values', () => {
|
||||
const { runnerPath } = writeEntryPointScript(
|
||||
'/test',
|
||||
'sh',
|
||||
['-e', 'script.sh'],
|
||||
['/prepend/path'],
|
||||
{
|
||||
DQUOTE: '"',
|
||||
BACK_SLASH: '\\',
|
||||
DOLLAR: '$'
|
||||
}
|
||||
)
|
||||
expect(fs.existsSync(runnerPath)).toBe(true)
|
||||
const script = fs.readFileSync(runnerPath, 'utf8')
|
||||
expect(script).toContain('"DQUOTE=\\"')
|
||||
expect(script).toContain('"BACK_SLASH=\\\\"')
|
||||
expect(script).toContain('"DOLLAR=\\$"')
|
||||
})
|
||||
|
||||
it('should return object with containerPath and runnerPath', () => {
|
||||
const { containerPath, runnerPath } = writeEntryPointScript(
|
||||
'/test',
|
||||
@@ -221,4 +305,211 @@ describe('k8s utils', () => {
|
||||
expect(() => containerPorts({ portMappings: ['1/tcp/udp'] })).toThrow()
|
||||
})
|
||||
})
|
||||
|
||||
describe('generate container name', () => {
|
||||
it('should return the container name from image string', () => {
|
||||
expect(
|
||||
generateContainerName('public.ecr.aws/localstack/localstack')
|
||||
).toEqual('localstack')
|
||||
expect(
|
||||
generateContainerName(
|
||||
'public.ecr.aws/url/with/multiple/slashes/postgres:latest'
|
||||
)
|
||||
).toEqual('postgres')
|
||||
expect(generateContainerName('postgres')).toEqual('postgres')
|
||||
expect(generateContainerName('postgres:latest')).toEqual('postgres')
|
||||
expect(generateContainerName('localstack/localstack')).toEqual(
|
||||
'localstack'
|
||||
)
|
||||
expect(generateContainerName('localstack/localstack:latest')).toEqual(
|
||||
'localstack'
|
||||
)
|
||||
})
|
||||
|
||||
it('should throw on invalid image string', () => {
|
||||
expect(() =>
|
||||
generateContainerName('localstack/localstack/:latest')
|
||||
).toThrow()
|
||||
expect(() => generateContainerName(':latest')).toThrow()
|
||||
})
|
||||
})
|
||||
|
||||
describe('read extension', () => {
|
||||
beforeEach(async () => {
|
||||
testHelper = new TestHelper()
|
||||
await testHelper.initialize()
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
await testHelper.cleanup()
|
||||
})
|
||||
|
||||
it('should throw if env variable is set but file does not exist', () => {
|
||||
process.env[ENV_HOOK_TEMPLATE_PATH] =
|
||||
'/path/that/does/not/exist/data.yaml'
|
||||
expect(() => readExtensionFromFile()).toThrow()
|
||||
})
|
||||
|
||||
it('should return undefined if env variable is not set', () => {
|
||||
delete process.env[ENV_HOOK_TEMPLATE_PATH]
|
||||
expect(readExtensionFromFile()).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should throw if file is empty', () => {
|
||||
let filePath = testHelper.createFile('data.yaml')
|
||||
process.env[ENV_HOOK_TEMPLATE_PATH] = filePath
|
||||
expect(() => readExtensionFromFile()).toThrow()
|
||||
})
|
||||
|
||||
it('should throw if file is not valid yaml', () => {
|
||||
let filePath = testHelper.createFile('data.yaml')
|
||||
fs.writeFileSync(filePath, 'invalid yaml')
|
||||
process.env[ENV_HOOK_TEMPLATE_PATH] = filePath
|
||||
expect(() => readExtensionFromFile()).toThrow()
|
||||
})
|
||||
|
||||
it('should return object if file is valid', () => {
|
||||
let filePath = testHelper.createFile('data.yaml')
|
||||
fs.writeFileSync(
|
||||
filePath,
|
||||
`
|
||||
metadata:
|
||||
labels:
|
||||
label-name: label-value
|
||||
annotations:
|
||||
annotation-name: annotation-value
|
||||
spec:
|
||||
containers:
|
||||
- name: test
|
||||
image: node:14.16
|
||||
- name: job
|
||||
image: ubuntu:latest`
|
||||
)
|
||||
|
||||
process.env[ENV_HOOK_TEMPLATE_PATH] = filePath
|
||||
const extension = readExtensionFromFile()
|
||||
expect(extension).toBeDefined()
|
||||
})
|
||||
})
|
||||
|
||||
it('should merge container spec', () => {
|
||||
const base = {
|
||||
image: 'node:14.16',
|
||||
name: 'test',
|
||||
env: [
|
||||
{
|
||||
name: 'TEST',
|
||||
value: 'TEST'
|
||||
}
|
||||
],
|
||||
ports: [
|
||||
{
|
||||
containerPort: 8080,
|
||||
hostPort: 8080,
|
||||
protocol: 'TCP'
|
||||
}
|
||||
]
|
||||
} as k8s.V1Container
|
||||
|
||||
const from = {
|
||||
ports: [
|
||||
{
|
||||
containerPort: 9090,
|
||||
hostPort: 9090,
|
||||
protocol: 'TCP'
|
||||
}
|
||||
],
|
||||
env: [
|
||||
{
|
||||
name: 'TEST_TWO',
|
||||
value: 'TEST_TWO'
|
||||
}
|
||||
],
|
||||
image: 'ubuntu:latest',
|
||||
name: 'overwrite'
|
||||
} as k8s.V1Container
|
||||
|
||||
const expectContainer = {
|
||||
name: base.name,
|
||||
image: base.image,
|
||||
ports: [
|
||||
...(base.ports as k8s.V1ContainerPort[]),
|
||||
...(from.ports as k8s.V1ContainerPort[])
|
||||
],
|
||||
env: [...(base.env as k8s.V1EnvVar[]), ...(from.env as k8s.V1EnvVar[])]
|
||||
}
|
||||
|
||||
const expectJobContainer = JSON.parse(JSON.stringify(expectContainer))
|
||||
expectJobContainer.name = base.name
|
||||
mergeContainerWithOptions(base, from)
|
||||
expect(base).toStrictEqual(expectContainer)
|
||||
})
|
||||
|
||||
it('should merge pod spec', () => {
|
||||
const base = {
|
||||
containers: [
|
||||
{
|
||||
image: 'node:14.16',
|
||||
name: 'test',
|
||||
env: [
|
||||
{
|
||||
name: 'TEST',
|
||||
value: 'TEST'
|
||||
}
|
||||
],
|
||||
ports: [
|
||||
{
|
||||
containerPort: 8080,
|
||||
hostPort: 8080,
|
||||
protocol: 'TCP'
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
restartPolicy: 'Never'
|
||||
} as k8s.V1PodSpec
|
||||
|
||||
const from = {
|
||||
securityContext: {
|
||||
runAsUser: 1000,
|
||||
fsGroup: 2000
|
||||
},
|
||||
restartPolicy: 'Always',
|
||||
volumes: [
|
||||
{
|
||||
name: 'work',
|
||||
emptyDir: {}
|
||||
}
|
||||
],
|
||||
containers: [
|
||||
{
|
||||
image: 'ubuntu:latest',
|
||||
name: 'side-car',
|
||||
env: [
|
||||
{
|
||||
name: 'TEST',
|
||||
value: 'TEST'
|
||||
}
|
||||
],
|
||||
ports: [
|
||||
{
|
||||
containerPort: 8080,
|
||||
hostPort: 8080,
|
||||
protocol: 'TCP'
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
} as k8s.V1PodSpec
|
||||
|
||||
const expected = JSON.parse(JSON.stringify(base))
|
||||
expected.securityContext = from.securityContext
|
||||
expected.restartPolicy = from.restartPolicy
|
||||
expected.volumes = from.volumes
|
||||
expected.containers.push(from.containers[0])
|
||||
|
||||
mergePodSpecWithOptions(base, from)
|
||||
|
||||
expect(base).toStrictEqual(expected)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,8 +1,18 @@
|
||||
import * as fs from 'fs'
|
||||
import * as path from 'path'
|
||||
import { cleanupJob } from '../src/hooks'
|
||||
import { prepareJob } from '../src/hooks/prepare-job'
|
||||
import { createContainerSpec, prepareJob } from '../src/hooks/prepare-job'
|
||||
import { TestHelper } from './test-setup'
|
||||
import {
|
||||
ENV_HOOK_TEMPLATE_PATH,
|
||||
ENV_USE_KUBE_SCHEDULER,
|
||||
generateContainerName,
|
||||
readExtensionFromFile
|
||||
} from '../src/k8s/utils'
|
||||
import { getPodByName } from '../src/k8s'
|
||||
import { V1Container } from '@kubernetes/client-node'
|
||||
import * as yaml from 'js-yaml'
|
||||
import { JOB_CONTAINER_NAME } from '../src/hooks/constants'
|
||||
|
||||
jest.useRealTimers()
|
||||
|
||||
@@ -71,4 +81,78 @@ describe('Prepare job', () => {
|
||||
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
|
||||
).rejects.toThrow()
|
||||
})
|
||||
|
||||
it('should not set command + args for service container if not passed in args', async () => {
|
||||
const services = prepareJobData.args.services.map(service => {
|
||||
return createContainerSpec(service, generateContainerName(service.image))
|
||||
}) as [V1Container]
|
||||
|
||||
expect(services[0].command).toBe(undefined)
|
||||
expect(services[0].args).toBe(undefined)
|
||||
})
|
||||
|
||||
it('should run pod with extensions applied', async () => {
|
||||
process.env[ENV_HOOK_TEMPLATE_PATH] = path.join(
|
||||
__dirname,
|
||||
'../../../examples/extension.yaml'
|
||||
)
|
||||
|
||||
await expect(
|
||||
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
|
||||
).resolves.not.toThrow()
|
||||
|
||||
delete process.env[ENV_HOOK_TEMPLATE_PATH]
|
||||
|
||||
const content = JSON.parse(
|
||||
fs.readFileSync(prepareJobOutputFilePath).toString()
|
||||
)
|
||||
|
||||
const got = await getPodByName(content.state.jobPod)
|
||||
|
||||
expect(got.metadata?.annotations?.['annotated-by']).toBe('extension')
|
||||
expect(got.metadata?.labels?.['labeled-by']).toBe('extension')
|
||||
expect(got.spec?.securityContext?.runAsUser).toBe(1000)
|
||||
expect(got.spec?.securityContext?.runAsGroup).toBe(3000)
|
||||
|
||||
// job container
|
||||
expect(got.spec?.containers[0].name).toBe(JOB_CONTAINER_NAME)
|
||||
expect(got.spec?.containers[0].image).toBe('node:14.16')
|
||||
expect(got.spec?.containers[0].command).toEqual(['sh'])
|
||||
expect(got.spec?.containers[0].args).toEqual(['-c', 'sleep 50'])
|
||||
|
||||
// service container
|
||||
expect(got.spec?.containers[1].image).toBe('redis')
|
||||
expect(got.spec?.containers[1].command).toBeFalsy()
|
||||
expect(got.spec?.containers[1].args).toBeFalsy()
|
||||
// side-car
|
||||
expect(got.spec?.containers[2].name).toBe('side-car')
|
||||
expect(got.spec?.containers[2].image).toBe('ubuntu:latest')
|
||||
expect(got.spec?.containers[2].command).toEqual(['sh'])
|
||||
expect(got.spec?.containers[2].args).toEqual(['-c', 'sleep 60'])
|
||||
})
|
||||
|
||||
it('should not throw exception using kube scheduler', async () => {
|
||||
// only for ReadWriteMany volumes or single node cluster
|
||||
process.env[ENV_USE_KUBE_SCHEDULER] = 'true'
|
||||
|
||||
await expect(
|
||||
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
|
||||
).resolves.not.toThrow()
|
||||
|
||||
delete process.env[ENV_USE_KUBE_SCHEDULER]
|
||||
})
|
||||
|
||||
test.each([undefined, null, []])(
|
||||
'should not throw exception when portMapping=%p',
|
||||
async pm => {
|
||||
prepareJobData.args.services.forEach(s => {
|
||||
s.portMappings = pm
|
||||
})
|
||||
await prepareJob(prepareJobData.args, prepareJobOutputFilePath)
|
||||
const content = JSON.parse(
|
||||
fs.readFileSync(prepareJobOutputFilePath).toString()
|
||||
)
|
||||
expect(() => content.context.services[0].image).not.toThrow()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
import { runContainerStep } from '../src/hooks'
|
||||
import { TestHelper } from './test-setup'
|
||||
import { ENV_HOOK_TEMPLATE_PATH } from '../src/k8s/utils'
|
||||
import * as fs from 'fs'
|
||||
import * as yaml from 'js-yaml'
|
||||
import { JOB_CONTAINER_EXTENSION_NAME } from '../src/hooks/constants'
|
||||
|
||||
jest.useRealTimers()
|
||||
|
||||
@@ -23,9 +27,45 @@ describe('Run container step', () => {
|
||||
expect(exitCode).toBe(0)
|
||||
})
|
||||
|
||||
it('should fail if the working directory does not exist', async () => {
|
||||
runContainerStepData.args.workingDirectory = '/foo/bar'
|
||||
await expect(runContainerStep(runContainerStepData.args)).rejects.toThrow()
|
||||
it('should run pod with extensions applied', async () => {
|
||||
const extension = {
|
||||
metadata: {
|
||||
annotations: {
|
||||
foo: 'bar'
|
||||
},
|
||||
labels: {
|
||||
bar: 'baz'
|
||||
}
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
name: JOB_CONTAINER_EXTENSION_NAME,
|
||||
command: ['sh'],
|
||||
args: ['-c', 'echo test']
|
||||
},
|
||||
{
|
||||
name: 'side-container',
|
||||
image: 'ubuntu:latest',
|
||||
command: ['sh'],
|
||||
args: ['-c', 'echo test']
|
||||
}
|
||||
],
|
||||
restartPolicy: 'Never',
|
||||
securityContext: {
|
||||
runAsUser: 1000,
|
||||
runAsGroup: 3000
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let filePath = testHelper.createFile()
|
||||
fs.writeFileSync(filePath, yaml.dump(extension))
|
||||
process.env[ENV_HOOK_TEMPLATE_PATH] = filePath
|
||||
await expect(
|
||||
runContainerStep(runContainerStepData.args)
|
||||
).resolves.not.toThrow()
|
||||
delete process.env[ENV_HOOK_TEMPLATE_PATH]
|
||||
})
|
||||
|
||||
it('should shold have env variables available', async () => {
|
||||
|
||||
@@ -89,6 +89,28 @@ describe('Run script step', () => {
|
||||
).resolves.not.toThrow()
|
||||
})
|
||||
|
||||
it('Dollar symbols in environment variables should not be expanded', async () => {
|
||||
runScriptStepDefinition.args.environmentVariables = {
|
||||
VARIABLE1: '$VAR',
|
||||
VARIABLE2: '${VAR}',
|
||||
VARIABLE3: '$(VAR)'
|
||||
}
|
||||
runScriptStepDefinition.args.entryPointArgs = [
|
||||
'-c',
|
||||
'\'if [[ -z "$VARIABLE1" ]]; then exit 1; fi\'',
|
||||
'\'if [[ -z "$VARIABLE2" ]]; then exit 2; fi\'',
|
||||
'\'if [[ -z "$VARIABLE3" ]]; then exit 3; fi\''
|
||||
]
|
||||
|
||||
await expect(
|
||||
runScriptStep(
|
||||
runScriptStepDefinition.args,
|
||||
prepareJobOutputData.state,
|
||||
null
|
||||
)
|
||||
).resolves.not.toThrow()
|
||||
})
|
||||
|
||||
it('Should have path variable changed in container with prepend path string array', async () => {
|
||||
runScriptStepDefinition.args.prependPath = ['/some/other/path']
|
||||
runScriptStepDefinition.args.entryPoint = '/bin/bash'
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
## Features
|
||||
- Always use the Docker related ENVs from the host machine instead of ENVs from the runner job [#40]
|
||||
- Use user defined entrypoints for service containers (instead of `tail -f /dev/null`)
|
||||
|
||||
<!-- ## Features -->
|
||||
## Bugs
|
||||
- Fixed substring issue with /github/workspace and /github/file_commands [#35]
|
||||
- Fixed issue related to setting hostPort and containerPort when formatting is not recognized by k8s default [#38]
|
||||
|
||||
<!-- ## Misc
|
||||
- Add option to use the kubernetes scheduler for workflow pods [#111]
|
||||
- Docker and K8s: Fix shell arguments when split by the runner [#115]
|
||||
|
||||
<!-- ## Misc -->
|
||||
|
||||
## SHA-256 Checksums
|
||||
|
||||
The SHA-256 checksums for the packages included in this build are shown below:
|
||||
|
||||
- actions-runner-hooks-docker-<HOOK_VERSION>.zip <DOCKER_SHA>
|
||||
- actions-runner-hooks-k8s-<HOOK_VERSION>.zip <K8S_SHA>
|
||||
|
||||
Reference in New Issue
Block a user