Compare commits

..

26 Commits

Author SHA1 Message Date
Ferenc Hammerl
56f935a374 Merge branch 'main' of https://github.com/actions/runner-container-hooks into fhammerl+nikola-jokic/kaniko 2023-01-04 11:27:54 +01:00
Nikola Jokic
7271e71008 managed to execute docker hub push and execute that image 2022-10-24 13:13:16 +02:00
Nikola Jokic
e33f331739 included secretName 2022-10-21 16:56:30 +02:00
Nikola Jokic
11de25a121 refactored the api to accept remote registry, not complete yet 2022-10-21 16:03:14 +02:00
Nikola Jokic
4e674e284a moved from random string generation to uuidv4() 2022-10-18 12:14:28 +02:00
Nikola Jokic
f841b42f55 run script step path repaired 2022-10-04 16:01:55 +02:00
Nikola Jokic
66566368e0 added backoff if NotFound on getPodPhase 2022-10-04 15:14:48 +02:00
Nikola Jokic
79262ba5fb format applied 2022-09-30 12:10:12 +02:00
Nikola Jokic
0cb9e396ea fixed env variable name in test 2022-09-30 12:06:40 +02:00
Nikola Jokic
b696059824 checked out k8s and docker package-lock from main 2022-09-30 11:05:16 +02:00
Nikola Jokic
365a99a4de Removed exposing git token for kaniko, removed testing comments, added
wait for kaniko
2022-09-29 16:03:15 +02:00
Nikola Jokic
02f00d0fd5 removed unnecessary permissions 2022-09-29 13:27:23 +02:00
Nikola Jokic
5e916d49cc upgraded package lock on hooklib 2022-09-29 11:03:48 +02:00
Nikola Jokic
a29f87c874 repaired lock files on docker and k8s 2022-09-29 10:52:41 +02:00
Nikola Jokic
6de86a9ef4 repaired lock to version 2 2022-09-29 10:48:20 +02:00
Nikola Jokic
31a2cda987 added ACTIONS_ prefix and added cleanup kaniko pod 2022-09-29 10:46:03 +02:00
Nikola Jokic
67d3f481f5 extracted creating a registry to test, written basic test expecting not to throw an exception 2022-09-28 16:01:07 +02:00
Nikola Jokic
5b7b738864 formatted kaniko.ts 2022-09-27 12:40:45 +02:00
Ferenc Hammerl
a99346d1ab Actually run built image 2022-09-26 13:57:51 +00:00
Ferenc Hammerl
3d102fd372 Mount volume with ahrdcoded path 2022-09-26 11:50:02 +00:00
Nikola Jokic
4de51ee6a5 random handle and random image name 2022-09-21 17:23:16 +02:00
Nikola Jokic
c8e272367f Merge branch 'main' into nikola-jokic/kaniko 2022-09-21 15:32:21 +02:00
Nikola Jokic
c4aa97c974 included generation of random handle/image 2022-09-21 15:29:39 +02:00
Nikola Jokic
f400db92cc Fixed invocation of registry. Basic run works hardcoded
Console logs are left in place and should be deleted
2022-09-21 13:54:25 +02:00
Nikola Jokic
5f0dc3f3b6 created base resource deffinitions for registry and kaniko 2022-09-21 10:39:04 +02:00
Nikola Jokic
6ef042836f fixing defaulting to docker hub on private registry, and b64 encoding 2022-07-29 13:27:17 +02:00
40 changed files with 1972 additions and 2526 deletions

1
.gitattributes vendored
View File

@@ -1 +0,0 @@
*.png filter=lfs diff=lfs merge=lfs -text

View File

@@ -13,47 +13,28 @@ jobs:
- run: npm run build-all
name: Build packages
- uses: actions/github-script@v6
id: releaseVersion
id: releaseNotes
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
script: |
const fs = require('fs');
const hookVersion = require('./package.json').version
var releaseNotes = fs.readFileSync('${{ github.workspace }}/releaseNotes.md', 'utf8').replace(/<HOOK_VERSION>/g, hookVersion)
console.log(releaseNotes)
core.setOutput('version', hookVersion);
core.setOutput('note', releaseNotes);
- name: Zip up releases
run: |
zip -r -j actions-runner-hooks-docker-${{ steps.releaseVersion.outputs.version }}.zip packages/docker/dist
zip -r -j actions-runner-hooks-k8s-${{ steps.releaseVersion.outputs.version }}.zip packages/k8s/dist
- name: Calculate SHA
id: sha
shell: bash
run: |
sha_docker=$(sha256sum actions-runner-hooks-docker-${{ steps.releaseVersion.outputs.version }}.zip | awk '{print $1}')
echo "Docker SHA: $sha_docker"
echo "docker-sha=$sha_docker" >> $GITHUB_OUTPUT
sha_k8s=$(sha256sum actions-runner-hooks-k8s-${{ steps.releaseVersion.outputs.version }}.zip | awk '{print $1}')
echo "K8s SHA: $sha_k8s"
echo "k8s-sha=$sha_k8s" >> $GITHUB_OUTPUT
- name: replace SHA
id: releaseNotes
uses: actions/github-script@v6
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
script: |
const fs = require('fs');
var releaseNotes = fs.readFileSync('${{ github.workspace }}/releaseNotes.md', 'utf8').replace(/<HOOK_VERSION>/g, '${{ steps.releaseVersion.outputs.version }}')
releaseNotes = releaseNotes.replace(/<DOCKER_SHA>/g, '${{ steps.sha.outputs.docker-sha }}')
releaseNotes = releaseNotes.replace(/<K8S_SHA>/g, '${{ steps.sha.outputs.k8s-sha }}')
console.log(releaseNotes)
core.setOutput('note', releaseNotes);
zip -r -j actions-runner-hooks-docker-${{ steps.releaseNotes.outputs.version }}.zip packages/docker/dist
zip -r -j actions-runner-hooks-k8s-${{ steps.releaseNotes.outputs.version }}.zip packages/k8s/dist
- uses: actions/create-release@v1
id: createRelease
name: Create ${{ steps.releaseVersion.outputs.version }} Hook Release
name: Create ${{ steps.releaseNotes.outputs.version }} Hook Release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: "v${{ steps.releaseVersion.outputs.version }}"
release_name: "v${{ steps.releaseVersion.outputs.version }}"
tag_name: "v${{ steps.releaseNotes.outputs.version }}"
release_name: "v${{ steps.releaseNotes.outputs.version }}"
body: |
${{ steps.releaseNotes.outputs.note }}
- name: Upload K8s hooks
@@ -62,8 +43,8 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/actions-runner-hooks-k8s-${{ steps.releaseVersion.outputs.version }}.zip
asset_name: actions-runner-hooks-k8s-${{ steps.releaseVersion.outputs.version }}.zip
asset_path: ${{ github.workspace }}/actions-runner-hooks-k8s-${{ steps.releaseNotes.outputs.version }}.zip
asset_name: actions-runner-hooks-k8s-${{ steps.releaseNotes.outputs.version }}.zip
asset_content_type: application/octet-stream
- name: Upload docker hooks
uses: actions/upload-release-asset@v1
@@ -71,6 +52,6 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/actions-runner-hooks-docker-${{ steps.releaseVersion.outputs.version }}.zip
asset_name: actions-runner-hooks-docker-${{ steps.releaseVersion.outputs.version }}.zip
asset_path: ${{ github.workspace }}/actions-runner-hooks-docker-${{ steps.releaseNotes.outputs.version }}.zip
asset_name: actions-runner-hooks-docker-${{ steps.releaseNotes.outputs.version }}.zip
asset_content_type: application/octet-stream

View File

@@ -1 +1 @@
* @actions/actions-launch @actions/runner-akvelon
* @actions/actions-runtime @actions/runner-akvelon

View File

@@ -1,184 +0,0 @@
# ADR 0072: Using Ephemeral Containers
**Date:** 27 March 2023
**Status**: Rejected <!--Accepted|Rejected|Superceded|Deprecated-->
## Context
We are evaluating using Kubernetes [ephemeral containers](https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/) as a drop-in replacement for creating pods for [jobs that run in containers](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container) and [service containers](https://docs.github.com/en/actions/using-containerized-services/about-service-containers).
The main motivator behind using ephemeral containers is to eliminate the need for [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). Persistent Volume implementations vary depending on the provider and we want to avoid building a dependency on it in order to provide our end-users a consistent experience.
With ephemeral containers we could leverage [emptyDir volumes](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) which fits our use case better and its behaviour is consistent across providers.
However, it's important to acknowledge that ephemeral containers were not designed to handle workloads but rather provide a mechanism to inspect running containers for debugging and troubleshooting purposes.
## Evaluation
The criteria that we are using to evaluate whether ephemeral containers are fit for purpose are:
- Networking
- Storage
- Security
- Resource limits
- Logs
- Customizability
### Networking
Ephemeral containers share the networking namespace of the pod they are attached to. This means that ephemeral containers can access the same network interfaces as the pod and can communicate with other containers in the same pod. However, ephemeral containers cannot have ports configured and as such the fields ports, livenessProbe, and readinessProbe are not available [^1][^2]
In this scenario we have 3 containers in a pod:
- `runner`: the main container that runs the GitHub Actions job
- `debugger`: the first ephemeral container
- `debugger2`: the second ephemeral container
By sequentially opening ports on each of these containers and connecting to them we can demonstrate that the communication flow between the runner and the debuggers is feasible.
<details>
<summary>1. Runner -> Debugger communication</summary>
![runner->debugger](./images/runner-debugger.png)
</details>
<details>
<summary>2. Debugger -> Runner communication</summary>
![debugger->runner](./images/debugger-runner.png)
</details>
<details>
<summary>3. Debugger2 -> Debugger communication</summary>
![debugger2->debugger](./images/debugger2-debugger.png)
</details>
### Storage
An emptyDir volume can be successfully mounted (read/write) by the runner as well as the ephemeral containers. This means that ephemeral containers can share data with the runner and other ephemeral containers.
<details>
<summary>Configuration</summary>
```yaml
# Extracted from the values.yaml for the gha-runner-scale-set helm chart
spec:
containers:
- name: runner
image: ghcr.io/actions/actions-runner:latest
command: ["/home/runner/run.sh"]
volumeMounts:
- mountPath: /workspace
name: work-volume
volumes:
- name: work-volume
emptyDir:
sizeLimit: 1Gi
```
```bash
# The API call to the Kubernetes API used to create the ephemeral containers
POD_NAME="arc-runner-set-6sfwd-runner-k7qq6"
NAMESPACE="arc-runners"
curl -v "https://<IP>:<PORT>/api/v1/namespaces/$NAMESPACE/pods/$POD_NAME/ephemeralcontainers" \
-X PATCH \
-H 'Content-Type: application/strategic-merge-patch+json' \
--cacert <PATH_TO_CACERT> \
--cert <PATH_TO_CERT> \
--key <PATH_TO_CLIENT_KEY> \
-d '
{
"spec":
{
"ephemeralContainers":
[
{
"name": "debugger",
"command": ["sh"],
"image": "ghcr.io/actions/actions-runner:latest",
"targetContainerName": "runner",
"stdin": true,
"tty": true,
"volumeMounts": [{
"mountPath": "/workspace",
"name": "work-volume",
"readOnly": false
}]
},
{
"name": "debugger2",
"command": ["sh"],
"image": "ghcr.io/actions/actions-runner:latest",
"targetContainerName": "runner",
"stdin": true,
"tty": true,
"volumeMounts": [{
"mountPath": "/workspace",
"name": "work-volume",
"readOnly": false
}]
}
]
}
}'
```
</details>
<details>
<summary>emptyDir volume mount</summary>
![emptyDir volume mount](./images/emptyDir_volume.png)
</details>
### Security
According to the [ephemeral containers API specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#ephemeralcontainer-v1-core) the configuration of the `securityContext` field is possible.
Ephemeral containers share the same network namespace as the pod they are attached to. This means that ephemeral containers can access the same network interfaces as the pod and can communicate with other containers in the same pod.
It is also possible for ephemeral containers to [share the process namespace](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) with the other containers in the pod. This is disabled by default.
The above could have unpredictable security implications.
### Resource limits
Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. [^1] This is a major drawback as it means that ephemeral containers cannot be configured to have resource limits.
There are no guaranteed resources for ad-hoc troubleshooting. If troubleshooting causes a pod to exceed its resource limit it may be evicted. [^3]
### Logs
Since ephemeral containers can share volumes with the runner container, it's possible to write logs to the same volume and have them available to the runner container.
### Customizability
Ephemeral containers can run any image and tag provided, they can be customized to run any arbitrary job. However, it's important to note that the following are not feasible:
- Lifecycle is not allowed for ephemeral containers
- Ephemeral containers will stop when their command exits, such as exiting a shell, and they will not be restarted. Unlike `kubectl exec`, processes in Ephemeral Containers will not receive an `EOF` if their connections are interrupted, so shells won't automatically exit on disconnect. There is no API support for killing or restarting an ephemeral container. The only way to exit the container is to send it an OS signal. [^4]
- Probes are not allowed for ephemeral containers.
- Ports are not allowed for ephemeral containers.
## Decision
While the evaluation shows that ephemeral containers can be used to run jobs in containers, it's important to acknowledge that ephemeral containers were not designed to handle workloads but rather provide a mechanism to inspect running containers for debugging and troubleshooting purposes.
Given the limitations of ephemeral containers, we decided not to use them outside of their intended purpose.
## Consequences
Proposal rejected, no further action required. This document will be used as a reference for future discussions.
[^1]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#ephemeralcontainer-v1-core
[^2]: https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/
[^3]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/277-ephemeral-containers/README.md#notesconstraintscaveats
[^4]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/277-ephemeral-containers/README.md#ephemeral-container-lifecycle

View File

@@ -1,34 +0,0 @@
# ADR 0096: Hook extensions
**Date:** 3 August 2023
**Status**: Superceded [^1]
## Context
The current implementation of container hooks does not allow users to customize the pods created by the hook. While the implementation is designed to be used as is or as a starting point, building and maintaining a custom hook implementation just to specify additional fields is not a good user experience.
## Decision
We have decided to add hook extensions to the container hook implementation. This will allow users to customize the pods created by the hook by specifying additional fields. The hook extensions will be implemented in a way that is backwards-compatible with the existing hook implementation.
To allow customization, the runner executing the hook should have `ACTIONS_RUNNER_CONTAINER_HOOK_TEMPLATE` environment variable pointing to a yaml file on the runner system. The extension specified in that file will be applied both for job pods, and container steps.
If environment variable is set, but the file can't be read, the hook will fail, signaling incorrect configuration.
If the environment variable does not exist, the hook will apply the default spec.
In case the hook is able to read the extended spec, it will first create a default configuration, and then merged modified fields in the following way:
1. The `.metadata` fields that will be appended if they are not reserved are `labels` and `annotations`.
2. The pod spec fields except for `containers` and `volumes` are applied from the template, possibly overwriting the field.
3. The volumes are applied in form of appending additional volumes to the default volumes.
4. The containers are merged based on the name assigned to them:
1. If the name of the container *is not* "$job", the entire spec of the container will be added to the pod definition.
2. If the name of the container *is* "$job", the `name` and the `image` fields are going to be ignored and the spec will be applied so that `env`, `volumeMounts`, `ports` are appended to the default container spec created by the hook, while the rest of the fields are going to be applied to the newly created container spec.
## Consequences
The addition of hook extensions will provide a better user experience for users who need to customize the pods created by the container hook. However, it will require additional effort to provide the template to the runner pod, and configure it properly.
[^1]: Superseded by [ADR 0134](0134-hook-extensions.md)

View File

@@ -1,41 +0,0 @@
# ADR 0134: Hook extensions
**Date:** 20 February 2024
**Status**: Accepted [^1]
## Context
The current implementation of container hooks does not allow users to customize the pods created by the hook.
While the implementation is designed to be used as is or as a starting point, building and maintaining a custom hook implementation just to specify additional fields is not a good user experience.
## Decision
We have decided to add hook extensions to the container hook implementation.
This will allow users to customize the pods created by the hook by specifying additional fields.
The hook extensions will be implemented in a way that is backwards-compatible with the existing hook implementation.
To allow customization, the runner executing the hook should have `ACTIONS_RUNNER_CONTAINER_HOOK_TEMPLATE` environment variable pointing to a yaml file on the runner system.
The extension specified in that file will be applied both for job pods, and container steps.
If environment variable is set, but the file can't be read, the hook will fail, signaling incorrect configuration.
If the environment variable does not exist, the hook will apply the default spec.
In case the hook is able to read the extended spec, it will first create a default configuration, and then merged modified fields in the following way:
1. The `.metadata` fields that will be appended if they are not reserved are `labels` and `annotations`.
2. The pod spec fields except for `containers` and `volumes` are applied from the template, possibly overwriting the field.
3. The volumes are applied in form of appending additional volumes to the default volumes.
4. The containers are merged based on the name assigned to them:
1. If the name of the container *is* "$job", the `name` and the `image` fields are going to be ignored and the spec will be applied so that `env`, `volumeMounts`, `ports` are appended to the default container spec created by the hook, while the rest of the fields are going to be applied to the newly created container spec.
2. If the name of the container *starts with* "$", and matches the name of the [container service](https://docs.github.com/en/actions/using-containerized-services/about-service-containers), the `name` and the `image` fields are going to be ignored and the spec will be applied to that service container, so that `env`, `volumeMounts`, `ports` are appended to the default container spec for service created by the hook, while the rest of the fields are going to be applied to the created container spec.
If there is no container service with such name defined in the workflow, such spec extension will be ignored.
3. If the name of the container *does not start with* "$", the entire spec of the container will be added to the pod definition.
## Consequences
The addition of hook extensions will provide a better user experience for users who need to customize the pods created by the container hook.
However, it will require additional effort to provide the template to the runner pod, and configure it properly.
[^1]: Supersedes [ADR 0096](0096-hook-extensions.md)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1,41 +0,0 @@
metadata:
annotations:
annotated-by: "extension"
labels:
labeled-by: "extension"
spec:
securityContext:
runAsUser: 1000
runAsGroup: 3000
restartPolicy: Never
containers:
- name: $job # overwrites job container
env:
- name: ENV1
value: "value1"
imagePullPolicy: Always
image: "busybox:1.28" # Ignored
command:
- sh
args:
- -c
- sleep 50
- name: $redis # overwrites redis service
env:
- name: ENV2
value: "value2"
image: "busybox:1.28" # Ignored
resources:
requests:
memory: "1Mi"
cpu: "1"
limits:
memory: "1Gi"
cpu: "2"
- name: side-car
image: "ubuntu:latest" # required
command:
- sh
args:
- -c
- sleep 60

View File

@@ -73,8 +73,6 @@
"contextName": "redis",
"image": "redis",
"createOptions": "--cpus 1",
"entrypoint": null,
"entryPointArgs": [],
"environmentVariables": {},
"userMountVolumes": [
{

40
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "hooks",
"version": "0.6.1",
"version": "0.1.3",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "hooks",
"version": "0.6.1",
"version": "0.1.3",
"license": "MIT",
"devDependencies": {
"@types/jest": "^27.5.1",
@@ -1800,9 +1800,9 @@
"dev": true
},
"node_modules/json5": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
"integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
"integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
"dev": true,
"dependencies": {
"minimist": "^1.2.0"
@@ -2322,9 +2322,9 @@
}
},
"node_modules/semver": {
"version": "7.6.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz",
"integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==",
"version": "7.3.7",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz",
"integrity": "sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==",
"dev": true,
"dependencies": {
"lru-cache": "^6.0.0"
@@ -2625,9 +2625,9 @@
}
},
"node_modules/word-wrap": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz",
"integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==",
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz",
"integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==",
"dev": true,
"engines": {
"node": ">=0.10.0"
@@ -3926,9 +3926,9 @@
"dev": true
},
"json5": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
"integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
"integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
"dev": true,
"requires": {
"minimist": "^1.2.0"
@@ -4292,9 +4292,9 @@
}
},
"semver": {
"version": "7.6.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz",
"integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==",
"version": "7.3.7",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz",
"integrity": "sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==",
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
@@ -4509,9 +4509,9 @@
}
},
"word-wrap": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz",
"integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==",
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz",
"integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==",
"dev": true
},
"wrappy": {

View File

@@ -1,6 +1,6 @@
{
"name": "hooks",
"version": "0.6.1",
"version": "0.2.0",
"description": "Three projects are included - k8s: a kubernetes hook implementation that spins up pods dynamically to run a job - docker: A hook implementation of the runner's docker implementation - A hook lib, which contains shared typescript definitions and utilities that the other packages consume",
"main": "",
"directories": {

File diff suppressed because it is too large Load Diff

View File

@@ -13,7 +13,6 @@
"@actions/core": "^1.9.1",
"@actions/exec": "^1.1.1",
"hooklib": "file:../hooklib",
"shlex": "^2.1.2",
"uuid": "^8.3.2"
},
"devDependencies": {

View File

@@ -91,12 +91,11 @@ export async function containerPull(
image: string,
configLocation: string
): Promise<void> {
const dockerArgs: string[] = []
const dockerArgs: string[] = ['pull']
if (configLocation) {
dockerArgs.push('--config')
dockerArgs.push(configLocation)
}
dockerArgs.push('pull')
dockerArgs.push(image)
for (let i = 0; i < 3; i++) {
try {
@@ -444,7 +443,7 @@ export async function isContainerAlpine(containerId: string): Promise<boolean> {
containerId,
'sh',
'-c',
`'[ $(cat /etc/*release* | grep -i -e "^ID=*alpine*" -c) != 0 ] || exit 1'`
"[ $(cat /etc/*release* | grep -i -e '^ID=*alpine*' -c) != 0 ] || exit 1"
]
try {
await runDockerCommand(dockerArgs)

View File

@@ -40,7 +40,7 @@ export async function prepareJob(
if (!container?.image) {
core.info('No job container provided, skipping')
} else {
setupContainer(container, true)
setupContainer(container)
const configLocation = await registryLogin(container.registry)
try {
@@ -174,11 +174,9 @@ function generateResponseFile(
writeToResponseFile(responseFile, JSON.stringify(response))
}
function setupContainer(container, jobContainer = false): void {
if (!container.entryPoint && jobContainer) {
container.entryPointArgs = [`-f`, `/dev/null`]
container.entryPoint = 'tail'
}
function setupContainer(container): void {
container.entryPointArgs = [`-f`, `/dev/null`]
container.entryPoint = 'tail'
}
function generateNetworkName(): string {

View File

@@ -16,14 +16,15 @@ import {
import { checkEnvironment } from './utils'
async function run(): Promise<void> {
const input = await getInputFromStdin()
const args = input['args']
const command = input['command']
const responseFile = input['responseFile']
const state = input['state']
try {
checkEnvironment()
const input = await getInputFromStdin()
const args = input['args']
const command = input['command']
const responseFile = input['responseFile']
const state = input['state']
switch (command) {
case Command.PrepareJob:
await prepareJob(args as PrepareJobArgs, responseFile)

View File

@@ -5,7 +5,6 @@ import * as core from '@actions/core'
import { env } from 'process'
// Import this way otherwise typescript has errors
const exec = require('@actions/exec')
const shlex = require('shlex')
export interface RunDockerCommandOptions {
workingDir?: string
@@ -18,7 +17,6 @@ export async function runDockerCommand(
options?: RunDockerCommandOptions
): Promise<string> {
options = optionsWithDockerEnvs(options)
args = fixArgs(args)
const pipes = await exec.getExecOutput('docker', args, options)
if (pipes.exitCode !== 0) {
core.error(`Docker failed with exit code ${pipes.exitCode}`)
@@ -86,10 +84,6 @@ export function sanitize(val: string): string {
return newNameBuilder.join('')
}
export function fixArgs(args: string[]): string[] {
return shlex.split(args.join(' '))
}
export function checkEnvironment(): void {
if (!env.GITHUB_WORKSPACE) {
throw new Error('GITHUB_WORKSPACE is not set')

View File

@@ -40,36 +40,21 @@ describe('run script step', () => {
definitions.runScriptStep.args.entryPoint = '/bin/bash'
definitions.runScriptStep.args.entryPointArgs = [
'-c',
`'if [[ ! $(env | grep "^PATH=") = "PATH=${definitions.runScriptStep.args.prependPath}:"* ]]; then exit 1; fi'`
`if [[ ! $(env | grep "^PATH=") = "PATH=${definitions.runScriptStep.args.prependPath}:"* ]]; then exit 1; fi`
]
await expect(
runScriptStep(definitions.runScriptStep.args, prepareJobResponse.state)
).resolves.not.toThrow()
})
it("Should fix expansion and print correctly in container's stdout", async () => {
const spy = jest.spyOn(process.stdout, 'write').mockImplementation()
definitions.runScriptStep.args.entryPoint = 'echo'
definitions.runScriptStep.args.entryPointArgs = ['"Mona', 'the', `Octocat"`]
await expect(
runScriptStep(definitions.runScriptStep.args, prepareJobResponse.state)
).resolves.not.toThrow()
expect(spy).toHaveBeenCalledWith(
expect.stringContaining('Mona the Octocat')
)
spy.mockRestore()
})
it('Should have path variable changed in container with prepend path string array', async () => {
definitions.runScriptStep.args.prependPath = ['/some/other/path']
definitions.runScriptStep.args.entryPoint = '/bin/bash'
definitions.runScriptStep.args.entryPointArgs = [
'-c',
`'if [[ ! $(env | grep "^PATH=") = "PATH=${definitions.runScriptStep.args.prependPath.join(
`if [[ ! $(env | grep "^PATH=") = "PATH=${definitions.runScriptStep.args.prependPath.join(
':'
)}:"* ]]; then exit 1; fi'`
)}:"* ]]; then exit 1; fi`
]
await expect(
runScriptStep(definitions.runScriptStep.args, prepareJobResponse.state)

View File

@@ -1,4 +1,4 @@
import { optionsWithDockerEnvs, sanitize, fixArgs } from '../src/utils'
import { optionsWithDockerEnvs, sanitize } from '../src/utils'
describe('Utilities', () => {
it('should return sanitized image name', () => {
@@ -10,37 +10,6 @@ describe('Utilities', () => {
expect(sanitize(validStr)).toBe(validStr)
})
test.each([
[['"Hello', 'World"'], ['Hello World']],
[
[
'sh',
'-c',
`'[ $(cat /etc/*release* | grep -i -e "^ID=*alpine*" -c) != 0 ] || exit 1'`
],
[
'sh',
'-c',
`[ $(cat /etc/*release* | grep -i -e "^ID=*alpine*" -c) != 0 ] || exit 1`
]
],
[
[
'sh',
'-c',
`'[ $(cat /etc/*release* | grep -i -e '\\''^ID=*alpine*'\\'' -c) != 0 ] || exit 1'`
],
[
'sh',
'-c',
`[ $(cat /etc/*release* | grep -i -e '^ID=*alpine*' -c) != 0 ] || exit 1`
]
]
])('should fix split arguments(%p, %p)', (args, expected) => {
const got = fixArgs(args)
expect(got).toStrictEqual(expected)
})
describe('with docker options', () => {
it('should augment options with docker environment variables', () => {
process.env.DOCKER_HOST = 'unix:///run/user/1001/docker.sock'

View File

@@ -1742,9 +1742,9 @@
"dev": true
},
"node_modules/json5": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
"integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
"integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
"dev": true,
"dependencies": {
"minimist": "^1.2.0"
@@ -2215,9 +2215,9 @@
}
},
"node_modules/semver": {
"version": "7.5.4",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
"integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
"version": "7.3.7",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz",
"integrity": "sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==",
"dev": true,
"dependencies": {
"lru-cache": "^6.0.0"
@@ -2532,9 +2532,9 @@
}
},
"node_modules/word-wrap": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz",
"integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==",
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz",
"integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==",
"dev": true,
"engines": {
"node": ">=0.10.0"
@@ -3789,9 +3789,9 @@
"dev": true
},
"json5": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
"integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
"integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
"dev": true,
"requires": {
"minimist": "^1.2.0"
@@ -4119,9 +4119,9 @@
}
},
"semver": {
"version": "7.5.4",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
"integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
"version": "7.3.7",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz",
"integrity": "sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==",
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
@@ -4344,9 +4344,9 @@
}
},
"word-wrap": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz",
"integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==",
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz",
"integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==",
"dev": true
},
"wrappy": {

File diff suppressed because it is too large Load Diff

View File

@@ -16,10 +16,8 @@
"@actions/core": "^1.9.1",
"@actions/exec": "^1.1.1",
"@actions/io": "^1.1.2",
"@kubernetes/client-node": "^0.18.1",
"hooklib": "file:../hooklib",
"js-yaml": "^4.1.0",
"shlex": "^2.1.2"
"@kubernetes/client-node": "^0.16.3",
"hooklib": "file:../hooklib"
},
"devDependencies": {
"@types/jest": "^27.4.1",

View File

@@ -41,9 +41,7 @@ export function getSecretName(): string {
export const MAX_POD_NAME_LENGTH = 63
export const STEP_POD_NAME_SUFFIX_LENGTH = 8
export const CONTAINER_EXTENSION_PREFIX = '$'
export const JOB_CONTAINER_NAME = 'job'
export const JOB_CONTAINER_EXTENSION_NAME = '$job'
export class RunnerInstanceLabel {
private podName: string

View File

@@ -1,35 +1,25 @@
import * as core from '@actions/core'
import * as io from '@actions/io'
import * as k8s from '@kubernetes/client-node'
import {
JobContainerInfo,
ContextPorts,
PrepareJobArgs,
writeToResponseFile
} from 'hooklib'
import { ContextPorts, prepareJobArgs, writeToResponseFile } from 'hooklib'
import path from 'path'
import {
containerPorts,
createPod,
isPodContainerAlpine,
prunePods,
waitForPodPhases,
getPrepareJobTimeoutSeconds
waitForPodPhases
} from '../k8s'
import {
containerVolumes,
DEFAULT_CONTAINER_ENTRY_POINT,
DEFAULT_CONTAINER_ENTRY_POINT_ARGS,
generateContainerName,
mergeContainerWithOptions,
readExtensionFromFile,
PodPhase,
fixArgs
PodPhase
} from '../k8s/utils'
import { CONTAINER_EXTENSION_PREFIX, JOB_CONTAINER_NAME } from './constants'
import { JOB_CONTAINER_NAME } from './constants'
export async function prepareJob(
args: PrepareJobArgs,
args: prepareJobArgs,
responseFile
): Promise<void> {
if (!args.container) {
@@ -37,51 +27,29 @@ export async function prepareJob(
}
await prunePods()
const extension = readExtensionFromFile()
await copyExternalsToRoot()
let container: k8s.V1Container | undefined = undefined
if (args.container?.image) {
core.debug(`Using image '${args.container.image}' for job image`)
container = createContainerSpec(
args.container,
JOB_CONTAINER_NAME,
true,
extension
)
container = createPodSpec(args.container, JOB_CONTAINER_NAME, true)
}
let services: k8s.V1Container[] = []
if (args.services?.length) {
services = args.services.map(service => {
core.debug(`Adding service '${service.image}' to pod definition`)
return createContainerSpec(
service,
generateContainerName(service.image),
false,
extension
)
return createPodSpec(service, service.image.split(':')[0])
})
}
if (!container && !services?.length) {
throw new Error('No containers exist, skipping hook invocation')
}
let createdPod: k8s.V1Pod | undefined = undefined
try {
createdPod = await createPod(
container,
services,
args.container.registry,
extension
)
createdPod = await createPod(container, services, args.container.registry)
} catch (err) {
await prunePods()
core.debug(`createPod failed: ${JSON.stringify(err)}`)
const message = (err as any)?.response?.body?.message || err
throw new Error(`failed to create job pod: ${message}`)
throw new Error(`failed to create job pod: ${err}`)
}
if (!createdPod?.metadata?.name) {
@@ -95,12 +63,11 @@ export async function prepareJob(
await waitForPodPhases(
createdPod.metadata.name,
new Set([PodPhase.RUNNING]),
new Set([PodPhase.PENDING]),
getPrepareJobTimeoutSeconds()
new Set([PodPhase.PENDING])
)
} catch (err) {
await prunePods()
throw new Error(`pod failed to come online with error: ${err}`)
throw new Error(`Pod failed to come online with error: ${err}`)
}
core.debug('Job pod is ready for traffic')
@@ -112,19 +79,14 @@ export async function prepareJob(
JOB_CONTAINER_NAME
)
} catch (err) {
core.debug(
`Failed to determine if the pod is alpine: ${JSON.stringify(err)}`
)
const message = (err as any)?.response?.body?.message || err
throw new Error(`failed to determine if the pod is alpine: ${message}`)
throw new Error(`Failed to determine if the pod is alpine: ${err}`)
}
core.debug(`Setting isAlpine to ${isAlpine}`)
generateResponseFile(responseFile, args, createdPod, isAlpine)
generateResponseFile(responseFile, createdPod, isAlpine)
}
function generateResponseFile(
responseFile: string,
args: PrepareJobArgs,
appPod: k8s.V1Pod,
isAlpine
): void {
@@ -157,27 +119,26 @@ function generateResponseFile(
}
}
if (args.services?.length) {
const serviceContainerNames =
args.services?.map(s => generateContainerName(s.image)) || []
const serviceContainers = appPod.spec?.containers.filter(
c => c.name !== JOB_CONTAINER_NAME
)
if (serviceContainers?.length) {
response.context['services'] = serviceContainers.map(c => {
if (!c.ports) {
return
}
response.context['services'] = appPod?.spec?.containers
?.filter(c => serviceContainerNames.includes(c.name))
.map(c => {
const ctxPorts: ContextPorts = {}
if (c.ports?.length) {
for (const port of c.ports) {
ctxPorts[port.containerPort] = port.hostPort
}
}
const ctxPorts: ContextPorts = {}
for (const port of c.ports) {
ctxPorts[port.containerPort] = port.hostPort
}
return {
image: c.image,
ports: ctxPorts
}
})
return {
image: c.image,
ports: ctxPorts
}
})
}
writeToResponseFile(responseFile, JSON.stringify(response))
}
@@ -192,11 +153,10 @@ async function copyExternalsToRoot(): Promise<void> {
}
}
export function createContainerSpec(
container: JobContainerInfo,
function createPodSpec(
container,
name: string,
jobContainer = false,
extension?: k8s.V1PodTemplateSpec
jobContainer = false
): k8s.V1Container {
if (!container.entryPoint && jobContainer) {
container.entryPoint = DEFAULT_CONTAINER_ENTRY_POINT
@@ -206,20 +166,14 @@ export function createContainerSpec(
const podContainer = {
name,
image: container.image,
command: [container.entryPoint],
args: container.entryPointArgs,
ports: containerPorts(container)
} as k8s.V1Container
if (container.workingDirectory) {
podContainer.workingDir = container.workingDirectory
}
if (container.entryPoint) {
podContainer.command = [container.entryPoint]
}
if (container.entryPointArgs?.length > 0) {
podContainer.args = fixArgs(container.entryPointArgs)
}
podContainer.env = []
for (const [key, value] of Object.entries(
container['environmentVariables']
@@ -234,17 +188,5 @@ export function createContainerSpec(
jobContainer
)
if (!extension) {
return podContainer
}
const from = extension.spec?.containers?.find(
c => c.name === CONTAINER_EXTENSION_PREFIX + name
)
if (from) {
mergeContainerWithOptions(podContainer, from)
}
return podContainer
}

View File

@@ -8,22 +8,24 @@ import {
getPodLogs,
getPodStatus,
waitForJobToComplete,
waitForPodPhases
waitForPodPhases,
containerBuild
} from '../k8s'
import {
containerVolumes,
fixArgs,
mergeContainerWithOptions,
DEFAULT_CONTAINER_ENTRY_POINT,
DEFAULT_CONTAINER_ENTRY_POINT_ARGS,
PodPhase,
readExtensionFromFile
writeEntryPointScript
} from '../k8s/utils'
import { JOB_CONTAINER_EXTENSION_NAME, JOB_CONTAINER_NAME } from './constants'
import { JOB_CONTAINER_NAME } from './constants'
export async function runContainerStep(
stepContainer: RunContainerStepArgs
): Promise<number> {
if (stepContainer.dockerfile) {
throw new Error('Building container actions is not currently supported')
const imageUrl = await containerBuild(stepContainer)
stepContainer.image = imageUrl
}
let secretName: string | undefined = undefined
@@ -31,20 +33,10 @@ export async function runContainerStep(
secretName = await createSecretForEnvs(stepContainer.environmentVariables)
}
const extension = readExtensionFromFile()
core.debug(`Created secret ${secretName} for container job envs`)
const container = createContainerSpec(stepContainer, secretName, extension)
let job: k8s.V1Job
try {
job = await createJob(container, extension)
} catch (err) {
core.debug(`createJob failed: ${JSON.stringify(err)}`)
const message = (err as any)?.response?.body?.message || err
throw new Error(`failed to run script step: ${message}`)
}
const container = createPodSpec(stepContainer, secretName)
const job = await createJob(container)
if (!job.metadata?.name) {
throw new Error(
`Expected job ${JSON.stringify(
@@ -54,23 +46,10 @@ export async function runContainerStep(
}
core.debug(`Job created, waiting for pod to start: ${job.metadata?.name}`)
let podName: string
try {
podName = await getContainerJobPodName(job.metadata.name)
} catch (err) {
core.debug(`getContainerJobPodName failed: ${JSON.stringify(err)}`)
const message = (err as any)?.response?.body?.message || err
throw new Error(`failed to get container job pod name: ${message}`)
}
const podName = await getContainerJobPodName(job.metadata.name)
await waitForPodPhases(
podName,
new Set([
PodPhase.COMPLETED,
PodPhase.RUNNING,
PodPhase.SUCCEEDED,
PodPhase.FAILED
]),
new Set([PodPhase.COMPLETED, PodPhase.RUNNING, PodPhase.SUCCEEDED]),
new Set([PodPhase.PENDING, PodPhase.UNKNOWN])
)
core.debug('Container step is running or complete, pulling logs')
@@ -79,7 +58,6 @@ export async function runContainerStep(
core.debug('Waiting for container job to complete')
await waitForJobToComplete(job.metadata.name)
// pod has failed so pull the status code from the container
const status = await getPodStatus(podName)
if (status?.phase === 'Succeeded') {
@@ -99,21 +77,24 @@ export async function runContainerStep(
return Number(exitCode) || 1
}
function createContainerSpec(
function createPodSpec(
container: RunContainerStepArgs,
secretName?: string,
extension?: k8s.V1PodTemplateSpec
secretName?: string
): k8s.V1Container {
const podContainer = new k8s.V1Container()
podContainer.name = JOB_CONTAINER_NAME
podContainer.image = container.image
podContainer.workingDir = container.workingDirectory
podContainer.command = container.entryPoint
? [container.entryPoint]
: undefined
podContainer.args = container.entryPointArgs?.length
? fixArgs(container.entryPointArgs)
: undefined
const { entryPoint, entryPointArgs } = container
container.entryPoint = 'sh'
const { containerPath } = writeEntryPointScript(
container.workingDirectory,
entryPoint || DEFAULT_CONTAINER_ENTRY_POINT,
entryPoint ? entryPointArgs || [] : DEFAULT_CONTAINER_ENTRY_POINT_ARGS
)
container.entryPointArgs = ['-e', containerPath]
podContainer.command = [container.entryPoint, ...container.entryPointArgs]
if (secretName) {
podContainer.envFrom = [
@@ -127,16 +108,5 @@ function createContainerSpec(
}
podContainer.volumeMounts = containerVolumes(undefined, false, true)
if (!extension) {
return podContainer
}
const from = extension.spec?.containers?.find(
c => c.name === JOB_CONTAINER_EXTENSION_NAME
)
if (from) {
mergeContainerWithOptions(podContainer, from)
}
return podContainer
}

View File

@@ -1,6 +1,5 @@
/* eslint-disable @typescript-eslint/no-unused-vars */
import * as fs from 'fs'
import * as core from '@actions/core'
import { RunScriptStepArgs } from 'hooklib'
import { execPodStep } from '../k8s'
import { writeEntryPointScript } from '../k8s/utils'
@@ -29,9 +28,7 @@ export async function runScriptStep(
JOB_CONTAINER_NAME
)
} catch (err) {
core.debug(`execPodStep failed: ${JSON.stringify(err)}`)
const message = (err as any)?.response?.body?.message || err
throw new Error(`failed to run script step: ${message}`)
throw new Error(`failed to run script step: ${err}`)
} finally {
fs.rmSync(runnerPath)
}

View File

@@ -9,13 +9,15 @@ import {
import { isAuthPermissionsOK, namespace, requiredPermissions } from './k8s'
async function run(): Promise<void> {
try {
const input = await getInputFromStdin()
const input = await getInputFromStdin()
const args = input['args']
const command = input['command']
const responseFile = input['responseFile']
const state = input['state']
const args = input['args']
const command = input['command']
const responseFile = input['responseFile']
const state = input['state']
let exitCode = 0
try {
if (!(await isAuthPermissionsOK())) {
throw new Error(
`The Service account needs the following permissions ${JSON.stringify(
@@ -23,28 +25,28 @@ async function run(): Promise<void> {
)} on the pod resource in the '${namespace()}' namespace. Please contact your self hosted runner administrator.`
)
}
let exitCode = 0
switch (command) {
case Command.PrepareJob:
await prepareJob(args as prepareJobArgs, responseFile)
return process.exit(0)
break
case Command.CleanupJob:
await cleanupJob()
return process.exit(0)
break
case Command.RunScriptStep:
await runScriptStep(args, state, null)
return process.exit(0)
break
case Command.RunContainerStep:
exitCode = await runContainerStep(args)
return process.exit(exitCode)
break
case Command.runContainerStep:
default:
throw new Error(`Command not recognized: ${command}`)
}
} catch (error) {
core.error(error as Error)
process.exit(1)
exitCode = 1
}
process.exitCode = exitCode
}
void run()

View File

@@ -1,6 +1,6 @@
import * as core from '@actions/core'
import * as k8s from '@kubernetes/client-node'
import { ContainerInfo, Registry } from 'hooklib'
import { RunContainerStepArgs, ContainerInfo, Registry } from 'hooklib'
import * as stream from 'stream'
import {
getJobPodName,
@@ -10,23 +10,25 @@ import {
getVolumeClaimName,
RunnerInstanceLabel
} from '../hooks/constants'
import { kanikoPod } from './kaniko'
import { v4 as uuidv4 } from 'uuid'
import { PodPhase } from './utils'
import {
PodPhase,
mergePodSpecWithOptions,
mergeObjectMeta,
useKubeScheduler,
fixArgs
} from './utils'
namespace,
kc,
k8sApi,
k8sBatchV1Api,
k8sAuthorizationV1Api,
localRegistryNodePort,
localRegistryHost,
localRegistryPort,
remoteRegistryHost,
remoteRegistryHandle,
remoteRegistrySecretName,
isLocalRegistrySet
} from './settings'
const kc = new k8s.KubeConfig()
kc.loadFromDefault()
const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
const k8sBatchV1Api = kc.makeApiClient(k8s.BatchV1Api)
const k8sAuthorizationV1Api = kc.makeApiClient(k8s.AuthorizationV1Api)
const DEFAULT_WAIT_FOR_POD_TIME_SECONDS = 10 * 60 // 10 min
export * from './settings'
export const POD_VOLUME_NAME = 'work'
@@ -54,20 +56,13 @@ export const requiredPermissions = [
verbs: ['get', 'list', 'create', 'delete'],
resource: 'jobs',
subresource: ''
},
{
group: '',
verbs: ['create', 'delete', 'get', 'list'],
resource: 'secrets',
subresource: ''
}
]
export async function createPod(
jobContainer?: k8s.V1Container,
services?: k8s.V1Container[],
registry?: Registry,
extension?: k8s.V1PodTemplateSpec
registry?: Registry
): Promise<k8s.V1Pod> {
const containers: k8s.V1Container[] = []
if (jobContainer) {
@@ -89,16 +84,11 @@ export async function createPod(
appPod.metadata.labels = {
[instanceLabel.key]: instanceLabel.value
}
appPod.metadata.annotations = {}
appPod.spec = new k8s.V1PodSpec()
appPod.spec.containers = containers
appPod.spec.restartPolicy = 'Never'
if (!useKubeScheduler()) {
appPod.spec.nodeName = await getCurrentNodeName()
}
appPod.spec.nodeName = await getCurrentNodeName()
const claimName = getVolumeClaimName()
appPod.spec.volumes = [
{
@@ -117,21 +107,12 @@ export async function createPod(
appPod.spec.imagePullSecrets = [secretReference]
}
if (extension?.metadata) {
mergeObjectMeta(appPod, extension.metadata)
}
if (extension?.spec) {
mergePodSpecWithOptions(appPod.spec, extension.spec)
}
const { body } = await k8sApi.createNamespacedPod(namespace(), appPod)
return body
}
export async function createJob(
container: k8s.V1Container,
extension?: k8s.V1PodTemplateSpec
container: k8s.V1Container
): Promise<k8s.V1Job> {
const runnerInstanceLabel = new RunnerInstanceLabel()
@@ -141,7 +122,6 @@ export async function createJob(
job.metadata = new k8s.V1ObjectMeta()
job.metadata.name = getStepPodName()
job.metadata.labels = { [runnerInstanceLabel.key]: runnerInstanceLabel.value }
job.metadata.annotations = {}
job.spec = new k8s.V1JobSpec()
job.spec.ttlSecondsAfterFinished = 300
@@ -149,15 +129,9 @@ export async function createJob(
job.spec.template = new k8s.V1PodTemplateSpec()
job.spec.template.spec = new k8s.V1PodSpec()
job.spec.template.metadata = new k8s.V1ObjectMeta()
job.spec.template.metadata.labels = {}
job.spec.template.metadata.annotations = {}
job.spec.template.spec.containers = [container]
job.spec.template.spec.restartPolicy = 'Never'
if (!useKubeScheduler()) {
job.spec.template.spec.nodeName = await getCurrentNodeName()
}
job.spec.template.spec.nodeName = await getCurrentNodeName()
const claimName = getVolumeClaimName()
job.spec.template.spec.volumes = [
@@ -167,17 +141,6 @@ export async function createJob(
}
]
if (extension) {
if (extension.metadata) {
// apply metadata both to the job and the pod created by the job
mergeObjectMeta(job, extension.metadata)
mergeObjectMeta(job.spec.template, extension.metadata)
}
if (extension.spec) {
mergePodSpecWithOptions(job.spec.template.spec, extension.spec)
}
}
const { body } = await k8sBatchV1Api.createNamespacedJob(namespace(), job)
return body
}
@@ -227,37 +190,31 @@ export async function execPodStep(
stdin?: stream.Readable
): Promise<void> {
const exec = new k8s.Exec(kc)
command = fixArgs(command)
// Exec returns a websocket. If websocket fails, we should reject the promise. Otherwise, websocket will call a callback. Since at that point, websocket is not failing, we can safely resolve or reject the promise.
await new Promise(function (resolve, reject) {
exec
.exec(
namespace(),
podName,
containerName,
command,
process.stdout,
process.stderr,
stdin ?? null,
false /* tty */,
resp => {
// kube.exec returns an error if exit code is not 0, but we can't actually get the exit code
if (resp.status === 'Success') {
resolve(resp.code)
} else {
core.debug(
JSON.stringify({
message: resp?.message,
details: resp?.details
})
)
reject(resp?.message)
}
await new Promise(async function (resolve, reject) {
await exec.exec(
namespace(),
podName,
containerName,
command,
process.stdout,
process.stderr,
stdin ?? null,
false /* tty */,
resp => {
// kube.exec returns an error if exit code is not 0, but we can't actually get the exit code
if (resp.status === 'Success') {
resolve(resp.code)
} else {
core.debug(
JSON.stringify({
message: resp?.message,
details: resp?.details
})
)
reject(resp?.message)
}
)
// If exec.exec fails, explicitly reject the outer promise
// eslint-disable-next-line github/no-then
.catch(e => reject(e))
}
)
})
}
@@ -367,13 +324,25 @@ export async function waitForPodPhases(
podName: string,
awaitingPhases: Set<PodPhase>,
backOffPhases: Set<PodPhase>,
maxTimeSeconds = DEFAULT_WAIT_FOR_POD_TIME_SECONDS
maxTimeSeconds = 10 * 60 // 10 min
): Promise<void> {
const backOffManager = new BackOffManager(maxTimeSeconds)
let phase: PodPhase = PodPhase.UNKNOWN
try {
while (true) {
phase = await getPodPhase(podName)
let retryCount = 0
while (retryCount < 3) {
try {
phase = await getPodPhase(podName)
} catch (err) {
const e = err as k8s.HttpError
if (e?.body?.reason === 'NotFound') {
retryCount++
await backOffManager.backOff()
continue
} else {
throw err
}
}
if (awaitingPhases.has(phase)) {
return
}
@@ -385,30 +354,12 @@ export async function waitForPodPhases(
}
await backOffManager.backOff()
}
throw new Error(`Failed to get pod phase after ${retryCount} attempts`)
} catch (error) {
throw new Error(`Pod ${podName} is unhealthy with phase status ${phase}`)
}
}
export function getPrepareJobTimeoutSeconds(): number {
const envTimeoutSeconds =
process.env['ACTIONS_RUNNER_PREPARE_JOB_TIMEOUT_SECONDS']
if (!envTimeoutSeconds) {
return DEFAULT_WAIT_FOR_POD_TIME_SECONDS
}
const timeoutSeconds = parseInt(envTimeoutSeconds, 10)
if (!timeoutSeconds || timeoutSeconds <= 0) {
core.warning(
`Prepare job timeout is invalid ("${timeoutSeconds}"): use an int > 0`
)
return DEFAULT_WAIT_FOR_POD_TIME_SECONDS
}
return timeoutSeconds
}
async function getPodPhase(podName: string): Promise<PodPhase> {
const podPhaseLookup = new Set<string>([
PodPhase.PENDING,
@@ -518,7 +469,7 @@ export async function isPodContainerAlpine(
[
'sh',
'-c',
`'[ $(cat /etc/*release* | grep -i -e "^ID=*alpine*" -c) != 0 ] || exit 1'`
"[ $(cat /etc/*release* | grep -i -e '^ID=*alpine*' -c) != 0 ] || exit 1"
],
podName,
containerName
@@ -530,6 +481,42 @@ export async function isPodContainerAlpine(
return isAlpine
}
export async function containerBuild(
args: RunContainerStepArgs
): Promise<string> {
let kanikoRegistry = ''
let pullRegistry = ''
let secretName: string | undefined = undefined
if (isLocalRegistrySet()) {
const host = `${localRegistryHost()}.${namespace()}.svc.cluster.local`
const port = localRegistryPort()
const uri = `${generateBuildHandle()}/${generateBuildImage()}`
kanikoRegistry = `${host}:${port}/${uri}`
pullRegistry = `localhost:${localRegistryNodePort()}/${uri}`
} else {
const uri = `${remoteRegistryHandle()}/${generateBuildImage()}`
if (remoteRegistryHost()) {
kanikoRegistry = `${remoteRegistryHost()}/${uri}`
} else {
kanikoRegistry = uri
}
pullRegistry = kanikoRegistry
secretName = remoteRegistrySecretName()
}
const pod = kanikoPod(args.dockerfile, kanikoRegistry, secretName)
if (!pod.metadata?.name) {
throw new Error('kaniko pod name is not set')
}
await k8sApi.createNamespacedPod(namespace(), pod)
await waitForPodPhases(
pod.metadata.name,
new Set([PodPhase.SUCCEEDED]),
new Set([PodPhase.PENDING, PodPhase.UNKNOWN, PodPhase.RUNNING])
)
return pullRegistry
}
async function getCurrentNodeName(): Promise<string> {
const resp = await k8sApi.readNamespacedPod(getRunnerPodName(), namespace())
@@ -540,20 +527,6 @@ async function getCurrentNodeName(): Promise<string> {
return nodeName
}
export function namespace(): string {
if (process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE']) {
return process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE']
}
const context = kc.getContexts().find(ctx => ctx.namespace)
if (!context?.namespace) {
throw new Error(
'Failed to determine namespace, falling back to `default`. Namespace should be set in context, or in env variable "ACTIONS_RUNNER_KUBERNETES_NAMESPACE"'
)
}
return context.namespace
}
class BackOffManager {
private backOffSeconds = 1
totalTime = 0
@@ -584,9 +557,6 @@ export function containerPorts(
container: ContainerInfo
): k8s.V1ContainerPort[] {
const ports: k8s.V1ContainerPort[] = []
if (!container.portMappings?.length) {
return ports
}
for (const portDefinition of container.portMappings) {
const portProtoSplit = portDefinition.split('/')
if (portProtoSplit.length > 2) {
@@ -622,7 +592,10 @@ export function containerPorts(
return ports
}
export async function getPodByName(name): Promise<k8s.V1Pod> {
const { body } = await k8sApi.readNamespacedPod(name, namespace())
return body
function generateBuildImage(): string {
return `${uuidv4()}:${uuidv4()}`
}
function generateBuildHandle(): string {
return uuidv4()
}

View File

@@ -0,0 +1,95 @@
import * as k8s from '@kubernetes/client-node'
import * as path from 'path'
import {
getRunnerPodName,
getVolumeClaimName,
MAX_POD_NAME_LENGTH,
RunnerInstanceLabel
} from '../hooks/constants'
import { POD_VOLUME_NAME } from '.'
export const KANIKO_MOUNT_PATH = '/mnt/kaniko'
function getKanikoName(): string {
return `${getRunnerPodName().substring(
0,
MAX_POD_NAME_LENGTH - '-kaniko'.length
)}-kaniko`
}
export function kanikoPod(
dockerfile: string,
destination: string,
secretName?: string
): k8s.V1Pod {
const pod = new k8s.V1Pod()
pod.apiVersion = 'v1'
pod.kind = 'Pod'
pod.metadata = new k8s.V1ObjectMeta()
pod.metadata.name = getKanikoName()
const instanceLabel = new RunnerInstanceLabel()
pod.metadata.labels = {
[instanceLabel.key]: instanceLabel.value
}
const spec = new k8s.V1PodSpec()
const c = new k8s.V1Container()
c.image = 'gcr.io/kaniko-project/executor:latest'
c.name = 'kaniko'
c.imagePullPolicy = 'Always'
const prefix = (process.env.RUNNER_WORKSPACE as string).split('_work')[0]
const subPath = path
.dirname(dockerfile)
.substring(prefix.length + '_work/'.length)
c.volumeMounts = [
{
name: POD_VOLUME_NAME,
mountPath: KANIKO_MOUNT_PATH,
subPath,
readOnly: true
}
]
c.args = [
`--dockerfile=${path.basename(dockerfile)}`,
`--context=dir://${KANIKO_MOUNT_PATH}`,
`--destination=${destination}`
]
spec.containers = [c]
spec.dnsPolicy = 'ClusterFirst'
spec.restartPolicy = 'Never'
pod.spec = spec
const claimName: string = getVolumeClaimName()
pod.spec.volumes = [
{
name: POD_VOLUME_NAME,
persistentVolumeClaim: { claimName }
}
]
if (secretName) {
const volumeName = 'docker-registry'
pod.spec.volumes.push({
name: volumeName,
projected: {
sources: [
{
secret: {
name: secretName,
items: [
{
key: '.dockerconfigjson',
path: 'config.json'
}
]
}
}
]
}
})
c.volumeMounts.push({
name: volumeName,
mountPath: '/kaniko/.docker/'
})
}
return pod
}

View File

@@ -0,0 +1,73 @@
import * as k8s from '@kubernetes/client-node'
export const kc = new k8s.KubeConfig()
kc.loadFromDefault()
export const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
export const k8sBatchV1Api = kc.makeApiClient(k8s.BatchV1Api)
export const k8sAuthorizationV1Api = kc.makeApiClient(k8s.AuthorizationV1Api)
export const POD_VOLUME_NAME = 'work'
export function namespace(): string {
if (process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE']) {
return process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE']
}
const context = kc.getContexts().find(ctx => ctx.namespace)
if (!context?.namespace) {
throw new Error(
'Failed to determine namespace, falling back to `default`. Namespace should be set in context, or in env variable "ACTIONS_RUNNER_KUBERNETES_NAMESPACE"'
)
}
return context.namespace
}
export function isLocalRegistrySet(): boolean {
const name = 'ACTIONS_RUNNER_CONTAINER_HOOKS_LOCAL_REGISTRY_HOST'
return !!process.env[name]
}
export function localRegistryHost(): string {
const name = 'ACTIONS_RUNNER_CONTAINER_HOOKS_LOCAL_REGISTRY_HOST'
if (process.env[name]) {
return process.env[name]
}
throw new Error(`environment variable ${name} is not set`)
}
export function localRegistryPort(): number {
const name = 'ACTIONS_RUNNER_CONTAINER_HOOKS_LOCAL_REGISTRY_PORT'
if (process.env[name]) {
return parseInt(process.env[name])
}
throw new Error(`environment variable ${name} is not set`)
}
export function localRegistryNodePort(): number {
const name = 'ACTIONS_RUNNER_CONTAINER_HOOKS_LOCAL_REGISTRY_NODE_PORT'
if (process.env[name]) {
return parseInt(process.env[name])
}
throw new Error(`environment variable ${name} is not set`)
}
export function remoteRegistryHost(): string {
const name = 'ACTIONS_RUNNER_CONTAINER_HOOKS_REMOTE_REGISTRY_HOST'
return process.env[name] || ''
}
export function remoteRegistryHandle(): string {
const name = 'ACTIONS_RUNNER_CONTAINER_HOOKS_REMOTE_REGISTRY_HANDLE'
if (process.env[name]) {
return process.env[name]
}
throw new Error(`environment variable ${name} is not set`)
}
export function remoteRegistrySecretName(): string {
const name = 'ACTIONS_RUNNER_CONTAINER_HOOKS_REMOTE_REGISTRY_SECRET_NAME'
if (process.env[name]) {
return process.env[name]
}
throw new Error(`environment variable ${name} is not set`)
}

View File

@@ -1,20 +1,13 @@
import * as k8s from '@kubernetes/client-node'
import * as fs from 'fs'
import * as yaml from 'js-yaml'
import * as core from '@actions/core'
import { Mount } from 'hooklib'
import * as path from 'path'
import { v1 as uuidv4 } from 'uuid'
import { POD_VOLUME_NAME } from './index'
import { CONTAINER_EXTENSION_PREFIX } from '../hooks/constants'
import * as shlex from 'shlex'
export const DEFAULT_CONTAINER_ENTRY_POINT_ARGS = [`-f`, `/dev/null`]
export const DEFAULT_CONTAINER_ENTRY_POINT = 'tail'
export const ENV_HOOK_TEMPLATE_PATH = 'ACTIONS_RUNNER_CONTAINER_HOOK_TEMPLATE'
export const ENV_USE_KUBE_SCHEDULER = 'ACTIONS_RUNNER_USE_KUBE_SCHEDULER'
export function containerVolumes(
userMountVolumes: Mount[] = [],
jobContainer = true,
@@ -41,11 +34,6 @@ export function containerVolumes(
name: POD_VOLUME_NAME,
mountPath: '/github/file_commands',
subPath: '_temp/_runner_file_commands'
},
{
name: POD_VOLUME_NAME,
mountPath: '/github/workflow',
subPath: '_temp/_github_workflow'
}
)
return mounts
@@ -123,22 +111,11 @@ export function writeEntryPointScript(
if (environmentVariables && Object.entries(environmentVariables).length) {
const envBuffer: string[] = []
for (const [key, value] of Object.entries(environmentVariables)) {
if (
key.includes(`=`) ||
key.includes(`'`) ||
key.includes(`"`) ||
key.includes(`$`)
) {
throw new Error(
`environment key ${key} is invalid - the key must not contain =, $, ', or "`
)
}
envBuffer.push(
`"${key}=${value
.replace(/\\/g, '\\\\')
.replace(/"/g, '\\"')
.replace(/\$/g, '\\$')
.replace(/`/g, '\\`')}"`
.replace(/=/g, '\\=')}"`
)
}
environmentPrefix = `env ${envBuffer.join(' ')} `
@@ -160,119 +137,6 @@ exec ${environmentPrefix} ${entryPoint} ${
}
}
export function generateContainerName(image: string): string {
const nameWithTag = image.split('/').pop()
const name = nameWithTag?.split(':').at(0)
if (!name) {
throw new Error(`Image definition '${image}' is invalid`)
}
return name
}
// Overwrite or append based on container options
//
// Keep in mind, envs and volumes could be passed as fields in container definition
// so default volume mounts and envs are appended first, and then create options are used
// to append more values
//
// Rest of the fields are just applied
// For example, container.createOptions.container.image is going to overwrite container.image field
export function mergeContainerWithOptions(
base: k8s.V1Container,
from: k8s.V1Container
): void {
for (const [key, value] of Object.entries(from)) {
if (key === 'name') {
if (value !== CONTAINER_EXTENSION_PREFIX + base.name) {
core.warning("Skipping name override: name can't be overwritten")
}
continue
} else if (key === 'image') {
core.warning("Skipping image override: image can't be overwritten")
continue
} else if (key === 'env') {
const envs = value as k8s.V1EnvVar[]
base.env = mergeLists(base.env, envs)
} else if (key === 'volumeMounts' && value) {
const volumeMounts = value as k8s.V1VolumeMount[]
base.volumeMounts = mergeLists(base.volumeMounts, volumeMounts)
} else if (key === 'ports' && value) {
const ports = value as k8s.V1ContainerPort[]
base.ports = mergeLists(base.ports, ports)
} else {
base[key] = value
}
}
}
export function mergePodSpecWithOptions(
base: k8s.V1PodSpec,
from: k8s.V1PodSpec
): void {
for (const [key, value] of Object.entries(from)) {
if (key === 'containers') {
base.containers.push(
...from.containers.filter(
e => !e.name?.startsWith(CONTAINER_EXTENSION_PREFIX)
)
)
} else if (key === 'volumes' && value) {
const volumes = value as k8s.V1Volume[]
base.volumes = mergeLists(base.volumes, volumes)
} else {
base[key] = value
}
}
}
export function mergeObjectMeta(
base: { metadata?: k8s.V1ObjectMeta },
from: k8s.V1ObjectMeta
): void {
if (!base.metadata?.labels || !base.metadata?.annotations) {
throw new Error(
"Can't merge metadata: base.metadata or base.annotations field is undefined"
)
}
if (from?.labels) {
for (const [key, value] of Object.entries(from.labels)) {
if (base.metadata?.labels?.[key]) {
core.warning(`Label ${key} is already defined and will be overwritten`)
}
base.metadata.labels[key] = value
}
}
if (from?.annotations) {
for (const [key, value] of Object.entries(from.annotations)) {
if (base.metadata?.annotations?.[key]) {
core.warning(
`Annotation ${key} is already defined and will be overwritten`
)
}
base.metadata.annotations[key] = value
}
}
}
export function readExtensionFromFile(): k8s.V1PodTemplateSpec | undefined {
const filePath = process.env[ENV_HOOK_TEMPLATE_PATH]
if (!filePath) {
return undefined
}
const doc = yaml.load(fs.readFileSync(filePath, 'utf8'))
if (!doc || typeof doc !== 'object') {
throw new Error(`Failed to parse ${filePath}`)
}
return doc as k8s.V1PodTemplateSpec
}
export function useKubeScheduler(): boolean {
return process.env[ENV_USE_KUBE_SCHEDULER] === 'true'
}
export enum PodPhase {
PENDING = 'Pending',
RUNNING = 'Running',
@@ -281,16 +145,3 @@ export enum PodPhase {
UNKNOWN = 'Unknown',
COMPLETED = 'Completed'
}
function mergeLists<T>(base?: T[], from?: T[]): T[] {
const b: T[] = base || []
if (!from?.length) {
return b
}
b.push(...from)
return b
}
export function fixArgs(args: string[]): string[] {
return shlex.split(args.join(' '))
}

View File

@@ -1,15 +1,6 @@
import * as fs from 'fs'
import * as fs from 'fs'
import { containerPorts, POD_VOLUME_NAME } from '../src/k8s'
import {
containerVolumes,
generateContainerName,
writeEntryPointScript,
mergePodSpecWithOptions,
mergeContainerWithOptions,
readExtensionFromFile,
ENV_HOOK_TEMPLATE_PATH
} from '../src/k8s/utils'
import * as k8s from '@kubernetes/client-node'
import { containerVolumes, writeEntryPointScript } from '../src/k8s/utils'
import { TestHelper } from './test-setup'
let testHelper: TestHelper
@@ -54,81 +45,6 @@ describe('k8s utils', () => {
).toThrow()
})
it('should throw if environment variable name contains double quote', () => {
expect(() =>
writeEntryPointScript(
'/test',
'sh',
['-e', 'script.sh'],
['/prepend/path'],
{
'SOME"_ENV': 'SOME_VALUE'
}
)
).toThrow()
})
it('should throw if environment variable name contains =', () => {
expect(() =>
writeEntryPointScript(
'/test',
'sh',
['-e', 'script.sh'],
['/prepend/path'],
{
'SOME=ENV': 'SOME_VALUE'
}
)
).toThrow()
})
it('should throw if environment variable name contains single quote', () => {
expect(() =>
writeEntryPointScript(
'/test',
'sh',
['-e', 'script.sh'],
['/prepend/path'],
{
"SOME'_ENV": 'SOME_VALUE'
}
)
).toThrow()
})
it('should throw if environment variable name contains dollar', () => {
expect(() =>
writeEntryPointScript(
'/test',
'sh',
['-e', 'script.sh'],
['/prepend/path'],
{
SOME_$_ENV: 'SOME_VALUE'
}
)
).toThrow()
})
it('should escape double quote, dollar and backslash in environment variable values', () => {
const { runnerPath } = writeEntryPointScript(
'/test',
'sh',
['-e', 'script.sh'],
['/prepend/path'],
{
DQUOTE: '"',
BACK_SLASH: '\\',
DOLLAR: '$'
}
)
expect(fs.existsSync(runnerPath)).toBe(true)
const script = fs.readFileSync(runnerPath, 'utf8')
expect(script).toContain('"DQUOTE=\\"')
expect(script).toContain('"BACK_SLASH=\\\\"')
expect(script).toContain('"DOLLAR=\\$"')
})
it('should return object with containerPath and runnerPath', () => {
const { containerPath, runnerPath } = writeEntryPointScript(
'/test',
@@ -185,20 +101,6 @@ describe('k8s utils', () => {
expect(volumes.find(e => e.mountPath === '/__w')).toBeTruthy()
})
it('should always have /github/workflow mount if working on container job or container action', () => {
let volumes = containerVolumes([], true, true)
expect(volumes.find(e => e.mountPath === '/github/workflow')).toBeTruthy()
volumes = containerVolumes([], true, false)
expect(volumes.find(e => e.mountPath === '/github/workflow')).toBeTruthy()
volumes = containerVolumes([], false, true)
expect(volumes.find(e => e.mountPath === '/github/workflow')).toBeTruthy()
volumes = containerVolumes([], false, false)
expect(
volumes.find(e => e.mountPath === '/github/workflow')
).toBeUndefined()
})
it('should have container action volumes', () => {
let volumes = containerVolumes([], true, true)
let workspace = volumes.find(e => e.mountPath === '/github/workspace')
@@ -219,10 +121,11 @@ describe('k8s utils', () => {
expect(fileCommands?.subPath).toBe('_temp/_runner_file_commands')
})
it('should have externals, github home mounts if job container', () => {
it('should have externals, github home and github workflow mounts if job container', () => {
const volumes = containerVolumes()
expect(volumes.find(e => e.mountPath === '/__e')).toBeTruthy()
expect(volumes.find(e => e.mountPath === '/github/home')).toBeTruthy()
expect(volumes.find(e => e.mountPath === '/github/workflow')).toBeTruthy()
})
it('should throw if user volume source volume path is not in workspace', () => {
@@ -318,211 +221,4 @@ describe('k8s utils', () => {
expect(() => containerPorts({ portMappings: ['1/tcp/udp'] })).toThrow()
})
})
describe('generate container name', () => {
it('should return the container name from image string', () => {
expect(
generateContainerName('public.ecr.aws/localstack/localstack')
).toEqual('localstack')
expect(
generateContainerName(
'public.ecr.aws/url/with/multiple/slashes/postgres:latest'
)
).toEqual('postgres')
expect(generateContainerName('postgres')).toEqual('postgres')
expect(generateContainerName('postgres:latest')).toEqual('postgres')
expect(generateContainerName('localstack/localstack')).toEqual(
'localstack'
)
expect(generateContainerName('localstack/localstack:latest')).toEqual(
'localstack'
)
})
it('should throw on invalid image string', () => {
expect(() =>
generateContainerName('localstack/localstack/:latest')
).toThrow()
expect(() => generateContainerName(':latest')).toThrow()
})
})
describe('read extension', () => {
beforeEach(async () => {
testHelper = new TestHelper()
await testHelper.initialize()
})
afterEach(async () => {
await testHelper.cleanup()
})
it('should throw if env variable is set but file does not exist', () => {
process.env[ENV_HOOK_TEMPLATE_PATH] =
'/path/that/does/not/exist/data.yaml'
expect(() => readExtensionFromFile()).toThrow()
})
it('should return undefined if env variable is not set', () => {
delete process.env[ENV_HOOK_TEMPLATE_PATH]
expect(readExtensionFromFile()).toBeUndefined()
})
it('should throw if file is empty', () => {
let filePath = testHelper.createFile('data.yaml')
process.env[ENV_HOOK_TEMPLATE_PATH] = filePath
expect(() => readExtensionFromFile()).toThrow()
})
it('should throw if file is not valid yaml', () => {
let filePath = testHelper.createFile('data.yaml')
fs.writeFileSync(filePath, 'invalid yaml')
process.env[ENV_HOOK_TEMPLATE_PATH] = filePath
expect(() => readExtensionFromFile()).toThrow()
})
it('should return object if file is valid', () => {
let filePath = testHelper.createFile('data.yaml')
fs.writeFileSync(
filePath,
`
metadata:
labels:
label-name: label-value
annotations:
annotation-name: annotation-value
spec:
containers:
- name: test
image: node:14.16
- name: job
image: ubuntu:latest`
)
process.env[ENV_HOOK_TEMPLATE_PATH] = filePath
const extension = readExtensionFromFile()
expect(extension).toBeDefined()
})
})
it('should merge container spec', () => {
const base = {
image: 'node:14.16',
name: 'test',
env: [
{
name: 'TEST',
value: 'TEST'
}
],
ports: [
{
containerPort: 8080,
hostPort: 8080,
protocol: 'TCP'
}
]
} as k8s.V1Container
const from = {
ports: [
{
containerPort: 9090,
hostPort: 9090,
protocol: 'TCP'
}
],
env: [
{
name: 'TEST_TWO',
value: 'TEST_TWO'
}
],
image: 'ubuntu:latest',
name: 'overwrite'
} as k8s.V1Container
const expectContainer = {
name: base.name,
image: base.image,
ports: [
...(base.ports as k8s.V1ContainerPort[]),
...(from.ports as k8s.V1ContainerPort[])
],
env: [...(base.env as k8s.V1EnvVar[]), ...(from.env as k8s.V1EnvVar[])]
}
const expectJobContainer = JSON.parse(JSON.stringify(expectContainer))
expectJobContainer.name = base.name
mergeContainerWithOptions(base, from)
expect(base).toStrictEqual(expectContainer)
})
it('should merge pod spec', () => {
const base = {
containers: [
{
image: 'node:14.16',
name: 'test',
env: [
{
name: 'TEST',
value: 'TEST'
}
],
ports: [
{
containerPort: 8080,
hostPort: 8080,
protocol: 'TCP'
}
]
}
],
restartPolicy: 'Never'
} as k8s.V1PodSpec
const from = {
securityContext: {
runAsUser: 1000,
fsGroup: 2000
},
restartPolicy: 'Always',
volumes: [
{
name: 'work',
emptyDir: {}
}
],
containers: [
{
image: 'ubuntu:latest',
name: 'side-car',
env: [
{
name: 'TEST',
value: 'TEST'
}
],
ports: [
{
containerPort: 8080,
hostPort: 8080,
protocol: 'TCP'
}
]
}
]
} as k8s.V1PodSpec
const expected = JSON.parse(JSON.stringify(base))
expected.securityContext = from.securityContext
expected.restartPolicy = from.restartPolicy
expected.volumes = from.volumes
expected.containers.push(from.containers[0])
mergePodSpecWithOptions(base, from)
expect(base).toStrictEqual(expected)
})
})

View File

@@ -1,18 +1,8 @@
import * as fs from 'fs'
import * as path from 'path'
import { cleanupJob } from '../src/hooks'
import { createContainerSpec, prepareJob } from '../src/hooks/prepare-job'
import { prepareJob } from '../src/hooks/prepare-job'
import { TestHelper } from './test-setup'
import {
ENV_HOOK_TEMPLATE_PATH,
ENV_USE_KUBE_SCHEDULER,
generateContainerName,
readExtensionFromFile
} from '../src/k8s/utils'
import { getPodByName } from '../src/k8s'
import { V1Container } from '@kubernetes/client-node'
import * as yaml from 'js-yaml'
import { JOB_CONTAINER_NAME } from '../src/hooks/constants'
jest.useRealTimers()
@@ -81,114 +71,4 @@ describe('Prepare job', () => {
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).rejects.toThrow()
})
it('should not set command + args for service container if not passed in args', async () => {
const services = prepareJobData.args.services.map(service => {
return createContainerSpec(service, generateContainerName(service.image))
}) as [V1Container]
expect(services[0].command).toBe(undefined)
expect(services[0].args).toBe(undefined)
})
it('should determine alpine correctly', async () => {
prepareJobData.args.container.image = 'alpine:latest'
await prepareJob(prepareJobData.args, prepareJobOutputFilePath)
const content = JSON.parse(
fs.readFileSync(prepareJobOutputFilePath).toString()
)
expect(content.isAlpine).toBe(true)
})
it('should run pod with extensions applied', async () => {
process.env[ENV_HOOK_TEMPLATE_PATH] = path.join(
__dirname,
'../../../examples/extension.yaml'
)
await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).resolves.not.toThrow()
delete process.env[ENV_HOOK_TEMPLATE_PATH]
const content = JSON.parse(
fs.readFileSync(prepareJobOutputFilePath).toString()
)
const got = await getPodByName(content.state.jobPod)
expect(got.metadata?.annotations?.['annotated-by']).toBe('extension')
expect(got.metadata?.labels?.['labeled-by']).toBe('extension')
expect(got.spec?.securityContext?.runAsUser).toBe(1000)
expect(got.spec?.securityContext?.runAsGroup).toBe(3000)
// job container
expect(got.spec?.containers[0].name).toBe(JOB_CONTAINER_NAME)
expect(got.spec?.containers[0].image).toBe('node:14.16')
expect(got.spec?.containers[0].command).toEqual(['sh'])
expect(got.spec?.containers[0].args).toEqual(['-c', 'sleep 50'])
// service container
expect(got.spec?.containers[1].image).toBe('redis')
expect(got.spec?.containers[1].command).toBeFalsy()
expect(got.spec?.containers[1].args).toBeFalsy()
expect(got.spec?.containers[1].env).toEqual([
{ name: 'ENV2', value: 'value2' }
])
expect(got.spec?.containers[1].resources).toEqual({
requests: { memory: '1Mi', cpu: '1' },
limits: { memory: '1Gi', cpu: '2' }
})
// side-car
expect(got.spec?.containers[2].name).toBe('side-car')
expect(got.spec?.containers[2].image).toBe('ubuntu:latest')
expect(got.spec?.containers[2].command).toEqual(['sh'])
expect(got.spec?.containers[2].args).toEqual(['-c', 'sleep 60'])
})
it('should put only job and services in output context file', async () => {
process.env[ENV_HOOK_TEMPLATE_PATH] = path.join(
__dirname,
'../../../examples/extension.yaml'
)
await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).resolves.not.toThrow()
const content = JSON.parse(
fs.readFileSync(prepareJobOutputFilePath).toString()
)
expect(content.state.jobPod).toBeTruthy()
expect(content.context.container).toBeTruthy()
expect(content.context.services).toBeTruthy()
expect(content.context.services.length).toBe(1)
})
it('should not throw exception using kube scheduler', async () => {
// only for ReadWriteMany volumes or single node cluster
process.env[ENV_USE_KUBE_SCHEDULER] = 'true'
await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).resolves.not.toThrow()
delete process.env[ENV_USE_KUBE_SCHEDULER]
})
test.each([undefined, null, []])(
'should not throw exception when portMapping=%p',
async pm => {
prepareJobData.args.services.forEach(s => {
s.portMappings = pm
})
await prepareJob(prepareJobData.args, prepareJobOutputFilePath)
const content = JSON.parse(
fs.readFileSync(prepareJobOutputFilePath).toString()
)
expect(() => content.context.services[0].image).not.toThrow()
}
)
})

View File

@@ -1,17 +1,12 @@
import { runContainerStep } from '../src/hooks'
import { TestHelper } from './test-setup'
import { ENV_HOOK_TEMPLATE_PATH } from '../src/k8s/utils'
import * as fs from 'fs'
import * as yaml from 'js-yaml'
import { JOB_CONTAINER_EXTENSION_NAME } from '../src/hooks/constants'
jest.useRealTimers()
let testHelper: TestHelper
describe('Run container step with image', () => {
let testHelper: TestHelper
let runContainerStepData: any
let runContainerStepData: any
describe('Run container step', () => {
beforeEach(async () => {
testHelper = new TestHelper()
await testHelper.initialize()
@@ -27,45 +22,9 @@ describe('Run container step', () => {
expect(exitCode).toBe(0)
})
it('should run pod with extensions applied', async () => {
const extension = {
metadata: {
annotations: {
foo: 'bar'
},
labels: {
bar: 'baz'
}
},
spec: {
containers: [
{
name: JOB_CONTAINER_EXTENSION_NAME,
command: ['sh'],
args: ['-c', 'echo test']
},
{
name: 'side-container',
image: 'ubuntu:latest',
command: ['sh'],
args: ['-c', 'echo test']
}
],
restartPolicy: 'Never',
securityContext: {
runAsUser: 1000,
runAsGroup: 3000
}
}
}
let filePath = testHelper.createFile()
fs.writeFileSync(filePath, yaml.dump(extension))
process.env[ENV_HOOK_TEMPLATE_PATH] = filePath
await expect(
runContainerStep(runContainerStepData.args)
).resolves.not.toThrow()
delete process.env[ENV_HOOK_TEMPLATE_PATH]
it('should fail if the working directory does not exist', async () => {
runContainerStepData.args.workingDirectory = '/foo/bar'
await expect(runContainerStep(runContainerStepData.args)).rejects.toThrow()
})
it('should shold have env variables available', async () => {
@@ -79,3 +38,33 @@ describe('Run container step', () => {
).resolves.not.toThrow()
})
})
describe('run container step with docker build', () => {
let testHelper: TestHelper
let runContainerStepData: any
beforeEach(async () => {
testHelper = new TestHelper()
await testHelper.initialize()
runContainerStepData = testHelper.getRunContainerStepDefinition()
})
afterEach(async () => {
await testHelper.cleanup()
})
it('should build container and execute docker action', async () => {
const { registryName, localRegistryPort, nodePort } =
await testHelper.createContainerRegistry()
process.env.ACTIONS_RUNNER_CONTAINER_HOOKS_LOCAL_REGISTRY_HOST =
registryName
process.env.ACTIONS_RUNNER_CONTAINER_HOOKS_LOCAL_REGISTRY_PORT =
localRegistryPort.toString()
process.env.ACTIONS_RUNNER_CONTAINER_HOOKS_LOCAL_REGISTRY_NODE_PORT =
nodePort.toString()
const actionPath = testHelper.initializeDockerAction()
const data = JSON.parse(JSON.stringify(runContainerStepData))
data.args.dockerfile = `${actionPath}/Dockerfile`
await expect(runContainerStep(data.args)).resolves.not.toThrow()
})
})

View File

@@ -59,6 +59,7 @@ describe('Run script step', () => {
it('should shold have env variables available', async () => {
runScriptStepDefinition.args.entryPoint = 'bash'
runScriptStepDefinition.args.workingDirectory = '/' // set to '/' so that cd does not throw
runScriptStepDefinition.args.entryPointArgs = [
'-c',
"'if [[ -z $NODE_ENV ]]; then exit 1; fi'"
@@ -89,28 +90,6 @@ describe('Run script step', () => {
).resolves.not.toThrow()
})
it('Dollar symbols in environment variables should not be expanded', async () => {
runScriptStepDefinition.args.environmentVariables = {
VARIABLE1: '$VAR',
VARIABLE2: '${VAR}',
VARIABLE3: '$(VAR)'
}
runScriptStepDefinition.args.entryPointArgs = [
'-c',
'\'if [[ -z "$VARIABLE1" ]]; then exit 1; fi\'',
'\'if [[ -z "$VARIABLE2" ]]; then exit 2; fi\'',
'\'if [[ -z "$VARIABLE3" ]]; then exit 3; fi\''
]
await expect(
runScriptStep(
runScriptStepDefinition.args,
prepareJobOutputData.state,
null
)
).resolves.not.toThrow()
})
it('Should have path variable changed in container with prepend path string array', async () => {
runScriptStepDefinition.args.prependPath = ['/some/other/path']
runScriptStepDefinition.args.entryPoint = '/bin/bash'

View File

@@ -2,7 +2,10 @@ import * as k8s from '@kubernetes/client-node'
import * as fs from 'fs'
import { HookData } from 'hooklib/lib'
import * as path from 'path'
import internal from 'stream'
import { v4 as uuidv4 } from 'uuid'
import { waitForPodPhases } from '../src/k8s'
import { PodPhase } from '../src/k8s/utils'
const kc = new k8s.KubeConfig()
@@ -10,6 +13,7 @@ kc.loadFromDefault()
const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
const k8sStorageApi = kc.makeApiClient(k8s.StorageV1Api)
const k8sAppsV1 = kc.makeApiClient(k8s.AppsV1Api)
export class TestHelper {
private tempDirPath: string
@@ -74,10 +78,19 @@ export class TestHelper {
0
)
.catch(e => {})
await k8sApi
.deleteNamespacedPod(
`${this.podName}-kaniko`,
'default',
undefined,
undefined,
0
)
.catch(e => {})
}
public createFile(fileName?: string): string {
public createFile(fileName?: string, content = ''): string {
const filePath = `${this.tempDirPath}/${fileName || uuidv4()}`
fs.writeFileSync(filePath, '')
fs.writeFileSync(filePath, content)
return filePath
}
@@ -193,4 +206,237 @@ export class TestHelper {
runContainerStep.args.registry = null
return runContainerStep
}
public async createContainerRegistry(): Promise<{
registryName: string
localRegistryPort: number
nodePort: number
}> {
const registryName = 'docker-registry'
const localRegistryPort = 5000
const nodePort = 31500
const cm = registryConfigMap(registryName, localRegistryPort)
const secret = registrySecret(registryName)
const ss = registryStatefulSet(registryName, localRegistryPort)
const svc = registryService(registryName, localRegistryPort, nodePort)
const namespace =
process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE'] || 'default'
await Promise.all([
k8sApi.createNamespacedConfigMap(namespace, cm),
k8sApi.createNamespacedSecret(namespace, secret)
])
await k8sAppsV1.createNamespacedStatefulSet(namespace, ss)
await waitForPodPhases(
`${registryName}-0`,
new Set([PodPhase.RUNNING]),
new Set([PodPhase.PENDING])
)
await k8sApi.createNamespacedService(namespace, svc)
return {
registryName,
localRegistryPort,
nodePort
}
}
public initializeDockerAction(): string {
const actionPath = `${this.tempDirPath}/_work/_actions/example-handle/example-repo/example-branch/mock-directory`
fs.mkdirSync(actionPath, { recursive: true })
this.writeDockerfile(actionPath)
this.writeEntrypoint(actionPath)
return actionPath
}
private writeDockerfile(actionPath: string) {
const content = `FROM ubuntu:latest
COPY entrypoint.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]`
fs.writeFileSync(`${actionPath}/Dockerfile`, content)
}
private writeEntrypoint(actionPath) {
const content = `#!/bin/sh -l
echo "Hello $1"
time=$(date)
echo "::set-output name=time::$time"`
const entryPointPath = `${actionPath}/entrypoint.sh`
fs.writeFileSync(entryPointPath, content)
fs.chmodSync(entryPointPath, 0o755)
}
}
function registryConfigMap(name: string, port: number): k8s.V1ConfigMap {
const REGISTRY_CONFIG_MAP_YAML = `
storage:
filesystem:
rootdirectory: /var/lib/registry
maxthreads: 100
health:
storagedriver:
enabled: true
interval: 10s
threshold: 3
http:
addr: :${port}
headers:
X-Content-Type-Options:
- nosniff
log:
fields:
service: registry
storage:
cache:
blobdescriptor: inmemory
version: 0.1
`.trim()
const cm = new k8s.V1ConfigMap()
cm.apiVersion = 'v1'
cm.data = {
'config.yaml': REGISTRY_CONFIG_MAP_YAML
}
cm.kind = 'ConfigMap'
cm.metadata = new k8s.V1ObjectMeta()
cm.metadata.labels = { app: name }
cm.metadata.name = `${name}-config`
return cm
}
function registryStatefulSet(name: string, port: number): k8s.V1StatefulSet {
const ss = new k8s.V1StatefulSet()
ss.apiVersion = 'apps/v1'
ss.metadata = new k8s.V1ObjectMeta()
ss.metadata.name = name
const spec = new k8s.V1StatefulSetSpec()
spec.selector = new k8s.V1LabelSelector()
spec.selector.matchLabels = { app: 'docker-registry' }
spec.serviceName = 'registry'
spec.replicas = 1
const tmpl = new k8s.V1PodTemplateSpec()
tmpl.metadata = new k8s.V1ObjectMeta()
tmpl.metadata.labels = { app: name }
tmpl.spec = new k8s.V1PodSpec()
tmpl.spec.terminationGracePeriodSeconds = 5 // TODO: figure out for how long
const c = new k8s.V1Container()
c.command = ['/bin/registry', 'serve', '/etc/docker/registry/config.yaml']
c.env = [
{
name: 'REGISTRY_HTTP_SECRET',
valueFrom: {
secretKeyRef: {
key: 'haSharedSecret',
name: `${name}-secret`
}
}
},
{
name: 'REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY',
value: '/var/lib/registry'
}
]
c.image = 'registry:2.6.2'
c.name = name
c.imagePullPolicy = 'IfNotPresent'
c.ports = [
{
containerPort: port,
protocol: 'TCP'
}
]
c.volumeMounts = [
{
mountPath: '/etc/docker/registry',
name: 'docker-registry-config'
}
]
c.livenessProbe = new k8s.V1Probe()
c.livenessProbe.failureThreshold = 3
c.livenessProbe.periodSeconds = 10
c.livenessProbe.successThreshold = 1
c.livenessProbe.timeoutSeconds = 1
c.livenessProbe.httpGet = new k8s.V1HTTPGetAction()
c.livenessProbe.httpGet.path = '/'
c.livenessProbe.httpGet.port = port
c.livenessProbe.httpGet.scheme = 'HTTP'
c.readinessProbe = new k8s.V1Probe()
c.readinessProbe.failureThreshold = 3
c.readinessProbe.periodSeconds = 10
c.readinessProbe.successThreshold = 1
c.readinessProbe.timeoutSeconds = 1
c.readinessProbe.httpGet = new k8s.V1HTTPGetAction()
c.readinessProbe.httpGet.path = '/'
c.readinessProbe.httpGet.port = port
c.readinessProbe.httpGet.scheme = 'HTTP'
tmpl.spec.containers = [c]
tmpl.spec.volumes = [
{
name: `${name}-config`,
configMap: {
name: `${name}-config`
}
}
]
spec.template = tmpl
ss.spec = spec
return ss
}
function registryService(
name: string,
port: number,
nodePort: number
): k8s.V1Service {
const svc = new k8s.V1Service()
svc.apiVersion = 'v1'
svc.kind = 'Service'
svc.metadata = new k8s.V1ObjectMeta()
svc.metadata.name = name
svc.metadata.labels = {
app: name
}
const spec = new k8s.V1ServiceSpec()
spec.externalTrafficPolicy = 'Cluster'
spec.ports = [
{
name: 'registry',
nodePort: nodePort,
port: port,
protocol: 'TCP',
targetPort: port
}
]
spec.selector = {
app: name
}
spec.sessionAffinity = 'None'
spec.type = 'NodePort'
svc.spec = spec
return svc
}
function registrySecret(name: string): k8s.V1Secret {
const secret = new k8s.V1Secret()
secret.apiVersion = 'v1'
secret.data = { haSharedSecret: 'U29tZVZlcnlTdHJpbmdTZWNyZXQK' }
secret.kind = 'Secret'
secret.metadata = new k8s.V1ObjectMeta()
secret.metadata.labels = {
app: name,
chart: `${name}-1.4.3`
}
secret.metadata.name = `${name}-secret`
secret.type = 'Opaque'
return secret
}

View File

@@ -1,19 +1,9 @@
<!-- ## Features -->
## Features
- Always use the Docker related ENVs from the host machine instead of ENVs from the runner job [#40]
- Use user defined entrypoints for service containers (instead of `tail -f /dev/null`)
## Bugs
- Fixed substring issue with /github/workspace and /github/file_commands [#35]
- Fixed issue related to setting hostPort and containerPort when formatting is not recognized by k8s default [#38]
- Skip writing extension containers in output context file [#154]
## Misc
- Bump ws from 7.5.7 to 7.5.10 in /packages/docker [#170]
- Bump braces from 3.0.2 to 3.0.3 in /packages/docker [#171]
- Bump tar from 6.1.11 to 6.2.1 in /packages/k8s [#156]
## SHA-256 Checksums
The SHA-256 checksums for the packages included in this build are shown below:
- actions-runner-hooks-docker-<HOOK_VERSION>.zip <DOCKER_SHA>
- actions-runner-hooks-k8s-<HOOK_VERSION>.zip <K8S_SHA>
<!-- ## Misc