Compare commits

..

10 Commits

Author SHA1 Message Date
Ferenc Hammerl
b3df7ec55b Update 0034-build-docker-with-kaniko.md 2023-01-26 17:58:10 +01:00
Ferenc Hammerl
16276a2a22 Update 0034-build-docker-with-kaniko.md 2023-01-09 17:44:09 +01:00
Ferenc Hammerl
abeebd2a37 Update 0034-build-docker-with-kaniko.md 2023-01-09 16:48:42 +01:00
Ferenc Hammerl
2eecf2d378 Update 0034-build-docker-with-kaniko.md 2023-01-04 12:20:08 +01:00
Ferenc Hammerl
0c38d44dbd Update 0034-build-docker-with-kaniko.md 2023-01-04 12:18:51 +01:00
Ferenc Hammerl
a62b81fc95 Update 0034-build-docker-with-kaniko.md 2022-12-15 14:59:53 +01:00
Ferenc Hammerl
ae0066ae41 Update 0034-build-docker-with-kaniko.md 2022-12-15 14:05:32 +01:00
Ferenc Hammerl
6c9241fb0e Update 0034-build-docker-with-kaniko.md 2022-10-04 16:33:41 +02:00
Ferenc Hammerl
efe66bb99b Update id of md file 2022-10-04 13:24:13 +02:00
Ferenc Hammerl
9a50e3a796 Add Kaniko ADR 2022-10-04 13:02:19 +02:00
60 changed files with 19601 additions and 17593 deletions

4
.eslintignore Normal file
View File

@@ -0,0 +1,4 @@
dist/
lib/
node_modules/
**/tests/**

56
.eslintrc.json Normal file
View File

@@ -0,0 +1,56 @@
{
"plugins": ["@typescript-eslint"],
"extends": ["plugin:github/recommended"],
"parser": "@typescript-eslint/parser",
"parserOptions": {
"ecmaVersion": 9,
"sourceType": "module",
"project": "./tsconfig.json"
},
"rules": {
"eslint-comments/no-use": "off",
"import/no-namespace": "off",
"no-constant-condition": "off",
"no-unused-vars": "off",
"i18n-text/no-en": "off",
"@typescript-eslint/no-unused-vars": "error",
"@typescript-eslint/explicit-member-accessibility": ["error", {"accessibility": "no-public"}],
"@typescript-eslint/no-require-imports": "error",
"@typescript-eslint/array-type": "error",
"@typescript-eslint/await-thenable": "error",
"camelcase": "off",
"@typescript-eslint/explicit-function-return-type": ["error", {"allowExpressions": true}],
"@typescript-eslint/func-call-spacing": ["error", "never"],
"@typescript-eslint/no-array-constructor": "error",
"@typescript-eslint/no-empty-interface": "error",
"@typescript-eslint/no-explicit-any": "warn",
"@typescript-eslint/no-extraneous-class": "error",
"@typescript-eslint/no-floating-promises": "error",
"@typescript-eslint/no-for-in-array": "error",
"@typescript-eslint/no-inferrable-types": "error",
"@typescript-eslint/no-misused-new": "error",
"@typescript-eslint/no-namespace": "error",
"@typescript-eslint/no-non-null-assertion": "warn",
"@typescript-eslint/no-unnecessary-qualifier": "error",
"@typescript-eslint/no-unnecessary-type-assertion": "error",
"@typescript-eslint/no-useless-constructor": "error",
"@typescript-eslint/no-var-requires": "error",
"@typescript-eslint/prefer-for-of": "warn",
"@typescript-eslint/prefer-function-type": "warn",
"@typescript-eslint/prefer-includes": "error",
"@typescript-eslint/prefer-string-starts-ends-with": "error",
"@typescript-eslint/promise-function-async": "error",
"@typescript-eslint/require-array-sort-compare": "error",
"@typescript-eslint/restrict-plus-operands": "error",
"semi": "off",
"@typescript-eslint/semi": ["error", "never"],
"@typescript-eslint/type-annotation-spacing": "error",
"@typescript-eslint/unbound-method": "error",
"no-shadow": "off",
"@typescript-eslint/no-shadow": ["error"]
},
"env": {
"node": true,
"es6": true
}
}

1
.gitattributes vendored
View File

@@ -1 +0,0 @@
*.png filter=lfs diff=lfs merge=lfs -text

View File

@@ -1,28 +0,0 @@
version: 2
updates:
# Group updates into a single PR per workspace package
- package-ecosystem: npm
directory: "/packages/docker"
schedule:
interval: weekly
groups:
all-dependencies:
patterns:
- "*"
- package-ecosystem: npm
directory: "/packages/hooklib"
schedule:
interval: weekly
groups:
all-dependencies:
patterns:
- "*"
- package-ecosystem: npm
directory: "/packages/k8s"
schedule:
interval: weekly
groups:
all-dependencies:
patterns:
- "*"

View File

@@ -6,50 +6,14 @@ on:
paths-ignore: paths-ignore:
- '**.md' - '**.md'
workflow_dispatch: workflow_dispatch:
jobs: jobs:
format-and-lint: build:
name: Format & Lint Checks
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v3
- run: npm install
name: Install dependencies
- run: npm run bootstrap
name: Bootstrap the packages
- run: npm run build-all
name: Build packages
- run: npm run format-check
name: Check formatting
- name: Check linter
run: |
npm run lint
git diff --exit-code -- . ':!packages/k8s/tests/test-kind.yaml'
docker-tests:
name: Docker Hook Tests
runs-on: ubuntu-latest
needs: format-and-lint
steps:
- uses: actions/checkout@v5
- run: npm install
name: Install dependencies
- run: npm run bootstrap
name: Bootstrap the packages
- run: npm run build-all
name: Build packages
- name: Run Docker tests
run: npm run test --prefix packages/docker
k8s-tests:
name: Kubernetes Hook Tests
runs-on: ubuntu-latest
needs: format-and-lint
steps:
- uses: actions/checkout@v5
- run: sed -i "s|{{PATHTOREPO}}|$(pwd)|" packages/k8s/tests/test-kind.yaml - run: sed -i "s|{{PATHTOREPO}}|$(pwd)|" packages/k8s/tests/test-kind.yaml
name: Setup kind cluster yaml config name: Setup kind cluster yaml config
- uses: helm/kind-action@v1.12.0 - uses: helm/kind-action@v1.2.0
with: with:
config: packages/k8s/tests/test-kind.yaml config: packages/k8s/tests/test-kind.yaml
- run: npm install - run: npm install
@@ -58,5 +22,10 @@ jobs:
name: Bootstrap the packages name: Bootstrap the packages
- run: npm run build-all - run: npm run build-all
name: Build packages name: Build packages
- name: Run Kubernetes tests - run: npm run format-check
run: npm run test --prefix packages/k8s - name: Check linter
run: |
npm run lint
git diff --exit-code -- ':!packages/k8s/tests/test-kind.yaml'
- name: Run tests
run: npm run test

View File

@@ -38,11 +38,11 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v5 uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v3 uses: github/codeql-action/init@v2
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file. # If you wish to specify custom queries, you can do so here or in a config file.
@@ -56,7 +56,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below) # If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@v3 uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell. # Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
@@ -69,4 +69,4 @@ jobs:
# ./location_of_script_within_repo/buildscript.sh # ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3 uses: github/codeql-action/analyze@v2

View File

@@ -1,70 +1,57 @@
name: CD - Release new version name: CD - Release new version
on: on:
workflow_dispatch: workflow_dispatch:
permissions:
contents: write
jobs: jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v3
- run: npm install
- name: Install dependencies name: Install dependencies
run: npm install - run: npm run bootstrap
name: Bootstrap the packages
- name: Bootstrap the packages - run: npm run build-all
run: npm run bootstrap name: Build packages
- uses: actions/github-script@v6
- name: Build packages id: releaseNotes
run: npm run build-all
- uses: actions/github-script@v8
id: releaseVersion
with: with:
result-encoding: string GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
script: | script: |
const fs = require('fs'); const fs = require('fs');
return require('./package.json').version const hookVersion = require('./package.json').version
var releaseNotes = fs.readFileSync('${{ github.workspace }}/releaseNotes.md', 'utf8').replace(/<HOOK_VERSION>/g, hookVersion)
console.log(releaseNotes)
core.setOutput('version', hookVersion);
core.setOutput('note', releaseNotes);
- name: Zip up releases - name: Zip up releases
run: | run: |
zip -r -j actions-runner-hooks-docker-${{ steps.releaseVersion.outputs.result }}.zip packages/docker/dist zip -r -j actions-runner-hooks-docker-${{ steps.releaseNotes.outputs.version }}.zip packages/docker/dist
zip -r -j actions-runner-hooks-k8s-${{ steps.releaseVersion.outputs.result }}.zip packages/k8s/dist zip -r -j actions-runner-hooks-k8s-${{ steps.releaseNotes.outputs.version }}.zip packages/k8s/dist
- uses: actions/create-release@v1
- name: Calculate SHA id: createRelease
id: sha name: Create ${{ steps.releaseNotes.outputs.version }} Hook Release
shell: bash
run: |
sha_docker=$(sha256sum actions-runner-hooks-docker-${{ steps.releaseVersion.outputs.result }}.zip | awk '{print $1}')
echo "Docker SHA: $sha_docker"
echo "docker-sha=$sha_docker" >> $GITHUB_OUTPUT
sha_k8s=$(sha256sum actions-runner-hooks-k8s-${{ steps.releaseVersion.outputs.result }}.zip | awk '{print $1}')
echo "K8s SHA: $sha_k8s"
echo "k8s-sha=$sha_k8s" >> $GITHUB_OUTPUT
- name: Create release notes
id: releaseNotes
uses: actions/github-script@v8
with:
script: |
const fs = require('fs');
var releaseNotes = fs.readFileSync('${{ github.workspace }}/releaseNotes.md', 'utf8').replace(/<HOOK_VERSION>/g, '${{ steps.releaseVersion.outputs.result }}')
releaseNotes = releaseNotes.replace(/<DOCKER_SHA>/g, '${{ steps.sha.outputs.docker-sha }}')
releaseNotes = releaseNotes.replace(/<K8S_SHA>/g, '${{ steps.sha.outputs.k8s-sha }}')
console.log(releaseNotes)
fs.writeFileSync('${{ github.workspace }}/finalReleaseNotes.md', releaseNotes);
- name: Create ${{ steps.releaseVersion.outputs.result }} Hook Release
env: env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: | with:
gh release create v${{ steps.releaseVersion.outputs.result }} \ tag_name: "v${{ steps.releaseNotes.outputs.version }}"
--title "v${{ steps.releaseVersion.outputs.result }}" \ release_name: "v${{ steps.releaseNotes.outputs.version }}"
--repo ${{ github.repository }} \ body: |
--notes-file ${{ github.workspace }}/finalReleaseNotes.md \ ${{ steps.releaseNotes.outputs.note }}
--latest \ - name: Upload K8s hooks
${{ github.workspace }}/actions-runner-hooks-k8s-${{ steps.releaseVersion.outputs.result }}.zip \ uses: actions/upload-release-asset@v1
${{ github.workspace }}/actions-runner-hooks-docker-${{ steps.releaseVersion.outputs.result }}.zip env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/actions-runner-hooks-k8s-${{ steps.releaseNotes.outputs.version }}.zip
asset_name: actions-runner-hooks-k8s-${{ steps.releaseNotes.outputs.version }}.zip
asset_content_type: application/octet-stream
- name: Upload docker hooks
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/actions-runner-hooks-docker-${{ steps.releaseNotes.outputs.version }}.zip
asset_name: actions-runner-hooks-docker-${{ steps.releaseNotes.outputs.version }}.zip
asset_content_type: application/octet-stream

View File

@@ -1 +1 @@
* @actions/actions-compute @nikola-jokic * @actions/actions-runtime @actions/runner-akvelon

View File

@@ -13,7 +13,7 @@ You'll need a runner compatible with hooks, a repository with container workflow
- You'll need a runner compatible with hooks, a repository with container workflows to which you can register the runner and the hooks from this repository. - You'll need a runner compatible with hooks, a repository with container workflows to which you can register the runner and the hooks from this repository.
- See [the runner contributing.md](../../github/CONTRIBUTING.MD) for how to get started with runner development. - See [the runner contributing.md](../../github/CONTRIBUTING.MD) for how to get started with runner development.
- Build your hook using `npm run build` - Build your hook using `npm run build`
- Enable the hooks by setting `ACTIONS_RUNNER_CONTAINER_HOOKS=./packages/{libraryname}/dist/index.js` file generated by [ncc](https://github.com/vercel/ncc) - Enable the hooks by setting `ACTIONS_RUNNER_CONTAINER_HOOK=./packages/{libraryname}/dist/index.js` file generated by [ncc](https://github.com/vercel/ncc)
- Configure your self hosted runner against the a repository you have admin access - Configure your self hosted runner against the a repository you have admin access
- Run a workflow with a container job, for example - Run a workflow with a container job, for example
``` ```

View File

@@ -3,24 +3,6 @@ The Runner Container Hooks repo provides a set of packages that implement the co
More information on how to implement your own hooks can be found in the [adr](https://github.com/actions/runner/pull/1891). The `examples` folder provides example inputs for each hook. More information on how to implement your own hooks can be found in the [adr](https://github.com/actions/runner/pull/1891). The `examples` folder provides example inputs for each hook.
### Note
Thank you for your interest in this GitHub action, however, right now we are not taking contributions.
We continue to focus our resources on strategic areas that help our customers be successful while making developers' lives easier. While GitHub Actions remains a key part of this vision, we are allocating resources towards other areas of Actions and are not taking contributions to this repository at this time. The GitHub public roadmap is the best place to follow along for any updates on features were working on and what stage theyre in.
We are taking the following steps to better direct requests related to GitHub Actions, including:
1. We will be directing questions and support requests to our [Community Discussions area](https://github.com/orgs/community/discussions/categories/actions)
2. High Priority bugs can be reported through Community Discussions or you can report these to our support team https://support.github.com/contact/bug-report.
3. Security Issues should be handled as per our [security.md](security.md)
We will still provide security updates for this project and fix major breaking changes during this time.
You are welcome to still raise bugs in this repo.
## Background ## Background
Three projects are included in the `packages` folder Three projects are included in the `packages` folder
@@ -28,6 +10,10 @@ Three projects are included in the `packages` folder
- docker: A hook implementation of the runner's docker implementation. More details can be found in the [readme](./packages/docker/README.md) - docker: A hook implementation of the runner's docker implementation. More details can be found in the [readme](./packages/docker/README.md)
- hooklib: a shared library which contains typescript definitions and utilities that the other projects consume - hooklib: a shared library which contains typescript definitions and utilities that the other projects consume
### Requirements
We welcome contributions. See [how to contribute to get started](./CONTRIBUTING.md).
## License ## License
This project is licensed under the terms of the MIT open source license. Please refer to [MIT](./LICENSE.md) for the full terms. This project is licensed under the terms of the MIT open source license. Please refer to [MIT](./LICENSE.md) for the full terms.
@@ -42,4 +28,4 @@ Find a bug? Please file an issue in this repository using the issue templates.
## Code of Conduct ## Code of Conduct
See our [Code of Conduct](./CODE_OF_CONDUCT.MD) See our [Code of Conduct](./CODE_OF_CONDUCT.MD)

View File

@@ -0,0 +1,64 @@
# ADR 0034: Build container-action Dockerfiles with Kaniko
**Date**: 2023-01-26
**Status**: In Progress
# Background
[Building Dockerfiles in k8s using Kaniko](https://github.com/actions/runner-container-hooks/issues/23) has been on the radar since the beginning of container hooks.
Currently, this is possible in ARC using a [dind/docker-in-docker](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/runner/actions-runner-dind.dockerfile) sidecar container.
This container needs to be launched using `--privileged`, which presents a security concern.
As an alternative tool, a container running [Kaniko](https://github.com/GoogleContainerTools/kaniko) can be used to build these files instead.
Kaniko doesn't need to be `--privileged`.
Whether using dind/docker-in-docker sidecar or Kaniko, in this ADR I will refer to these containers as '**builder containers**'
# Guiding Principles
- **Security:** running a Kaniko builder container should be possible without the `--privileged` flag
- **Feature parity with Docker:** Any 'Dockerfile' that can be built with vanilla Docker should also be possible to build using a Kaniko build container
- **Ease of Use:** The customer should be able to build and push Docker images with minimal configuration
## Limitations
### User provided registry
The user needs to provide a a remote registry (like ghcr.io or dockerhub) and credentials, for the Kaniko builder container to push to and k8s to pull from later. This is the user's responsiblity so that our solution remains lightweight and generic.
- Alternatively, a user-managed local Docker Registry within the k8s cluster can of course be used instead
### Kaniko feature limit
Anything Kaniko can't do we'll be by definition unable to help with. Potential incompatibilities / inconsistencies between Docker and Kaniko will naturally be inherited by our solution.
## Interface
The user will set `containerMode:kubernetes`, because this is a change to the behaviour of our k8s hooks
The user will set two ENVs:
- `ACTIONS_RUNNER_CONTAINER_HOOKS_K8S_REGISTRY_HOST`: e.g. `ghcr.io/OWNER` or `dockerhandle`.
- `ACTIONS_RUNNER_CONTAINER_HOOKS_K8S_REGISTRY_SECRET_NAME`: e.g. `docker-secret`: the name of the `k8s` secret resource that allows you to authenticate against the registry with the given handle above
The workspace is used as the image name.
The image tag is a random generated string.
To execute a container-action, we then run a k8s job by loading the image from the specified registry
## Additional configuration
Users may want to use different URLs for the registry when pushing and pulling an image as they will be invoked by different machines on different networks.
- The **Kaniko build container pushes the image** after building is a pod that belongs to the runner pod.
- The **kubelet pulls the image** before starting a pod.
The above two might not resolve all host names 100% the same so it makes sense to allow different push and pull URLs.
ENVs `ACTIONS_RUNNER_CONTAINER_HOOKS_K8S_REGISTRY_HOST_PUSH` and `ACTIONS_RUNNER_CONTAINER_HOOKS_K8S_REGISTRY_HOST_PULL` will be preferred if set.
### Example
As an example, a cluster local docker registry could be a long running pod exposed as a service _and_ as a NodePort.
The Kaniko builder pod would push to `my-local-registry.default.svc.cluster.local:12345/foohandle`. (`ACTIONS_RUNNER_CONTAINER_HOOKS_K8S_REGISTRY_HOST_PUSH`)
This URL cannot be resolved by the kubelet to pull the image, so we need a secondary URL to pull it - in this case, using the NodePort, this URL is localhost:NODEPORT/foohandle. (`ACTIONS_RUNNER_CONTAINER_HOOKS_K8S_REGISTRY_HOST_PULL)
## Consequences
- Users build container-actions with a local Dockerfile in their k8s cluster without a privileged docker builder container

View File

@@ -1,184 +0,0 @@
# ADR 0072: Using Ephemeral Containers
**Date:** 27 March 2023
**Status**: Rejected <!--Accepted|Rejected|Superceded|Deprecated-->
## Context
We are evaluating using Kubernetes [ephemeral containers](https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/) as a drop-in replacement for creating pods for [jobs that run in containers](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container) and [service containers](https://docs.github.com/en/actions/using-containerized-services/about-service-containers).
The main motivator behind using ephemeral containers is to eliminate the need for [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). Persistent Volume implementations vary depending on the provider and we want to avoid building a dependency on it in order to provide our end-users a consistent experience.
With ephemeral containers we could leverage [emptyDir volumes](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) which fits our use case better and its behaviour is consistent across providers.
However, it's important to acknowledge that ephemeral containers were not designed to handle workloads but rather provide a mechanism to inspect running containers for debugging and troubleshooting purposes.
## Evaluation
The criteria that we are using to evaluate whether ephemeral containers are fit for purpose are:
- Networking
- Storage
- Security
- Resource limits
- Logs
- Customizability
### Networking
Ephemeral containers share the networking namespace of the pod they are attached to. This means that ephemeral containers can access the same network interfaces as the pod and can communicate with other containers in the same pod. However, ephemeral containers cannot have ports configured and as such the fields ports, livenessProbe, and readinessProbe are not available [^1][^2]
In this scenario we have 3 containers in a pod:
- `runner`: the main container that runs the GitHub Actions job
- `debugger`: the first ephemeral container
- `debugger2`: the second ephemeral container
By sequentially opening ports on each of these containers and connecting to them we can demonstrate that the communication flow between the runner and the debuggers is feasible.
<details>
<summary>1. Runner -> Debugger communication</summary>
![runner->debugger](./images/runner-debugger.png)
</details>
<details>
<summary>2. Debugger -> Runner communication</summary>
![debugger->runner](./images/debugger-runner.png)
</details>
<details>
<summary>3. Debugger2 -> Debugger communication</summary>
![debugger2->debugger](./images/debugger2-debugger.png)
</details>
### Storage
An emptyDir volume can be successfully mounted (read/write) by the runner as well as the ephemeral containers. This means that ephemeral containers can share data with the runner and other ephemeral containers.
<details>
<summary>Configuration</summary>
```yaml
# Extracted from the values.yaml for the gha-runner-scale-set helm chart
spec:
containers:
- name: runner
image: ghcr.io/actions/actions-runner:latest
command: ["/home/runner/run.sh"]
volumeMounts:
- mountPath: /workspace
name: work-volume
volumes:
- name: work-volume
emptyDir:
sizeLimit: 1Gi
```
```bash
# The API call to the Kubernetes API used to create the ephemeral containers
POD_NAME="arc-runner-set-6sfwd-runner-k7qq6"
NAMESPACE="arc-runners"
curl -v "https://<IP>:<PORT>/api/v1/namespaces/$NAMESPACE/pods/$POD_NAME/ephemeralcontainers" \
-X PATCH \
-H 'Content-Type: application/strategic-merge-patch+json' \
--cacert <PATH_TO_CACERT> \
--cert <PATH_TO_CERT> \
--key <PATH_TO_CLIENT_KEY> \
-d '
{
"spec":
{
"ephemeralContainers":
[
{
"name": "debugger",
"command": ["sh"],
"image": "ghcr.io/actions/actions-runner:latest",
"targetContainerName": "runner",
"stdin": true,
"tty": true,
"volumeMounts": [{
"mountPath": "/workspace",
"name": "work-volume",
"readOnly": false
}]
},
{
"name": "debugger2",
"command": ["sh"],
"image": "ghcr.io/actions/actions-runner:latest",
"targetContainerName": "runner",
"stdin": true,
"tty": true,
"volumeMounts": [{
"mountPath": "/workspace",
"name": "work-volume",
"readOnly": false
}]
}
]
}
}'
```
</details>
<details>
<summary>emptyDir volume mount</summary>
![emptyDir volume mount](./images/emptyDir_volume.png)
</details>
### Security
According to the [ephemeral containers API specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#ephemeralcontainer-v1-core) the configuration of the `securityContext` field is possible.
Ephemeral containers share the same network namespace as the pod they are attached to. This means that ephemeral containers can access the same network interfaces as the pod and can communicate with other containers in the same pod.
It is also possible for ephemeral containers to [share the process namespace](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) with the other containers in the pod. This is disabled by default.
The above could have unpredictable security implications.
### Resource limits
Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. [^1] This is a major drawback as it means that ephemeral containers cannot be configured to have resource limits.
There are no guaranteed resources for ad-hoc troubleshooting. If troubleshooting causes a pod to exceed its resource limit it may be evicted. [^3]
### Logs
Since ephemeral containers can share volumes with the runner container, it's possible to write logs to the same volume and have them available to the runner container.
### Customizability
Ephemeral containers can run any image and tag provided, they can be customized to run any arbitrary job. However, it's important to note that the following are not feasible:
- Lifecycle is not allowed for ephemeral containers
- Ephemeral containers will stop when their command exits, such as exiting a shell, and they will not be restarted. Unlike `kubectl exec`, processes in Ephemeral Containers will not receive an `EOF` if their connections are interrupted, so shells won't automatically exit on disconnect. There is no API support for killing or restarting an ephemeral container. The only way to exit the container is to send it an OS signal. [^4]
- Probes are not allowed for ephemeral containers.
- Ports are not allowed for ephemeral containers.
## Decision
While the evaluation shows that ephemeral containers can be used to run jobs in containers, it's important to acknowledge that ephemeral containers were not designed to handle workloads but rather provide a mechanism to inspect running containers for debugging and troubleshooting purposes.
Given the limitations of ephemeral containers, we decided not to use them outside of their intended purpose.
## Consequences
Proposal rejected, no further action required. This document will be used as a reference for future discussions.
[^1]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#ephemeralcontainer-v1-core
[^2]: https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/
[^3]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/277-ephemeral-containers/README.md#notesconstraintscaveats
[^4]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/277-ephemeral-containers/README.md#ephemeral-container-lifecycle

View File

@@ -1,34 +0,0 @@
# ADR 0096: Hook extensions
**Date:** 3 August 2023
**Status**: Superceded [^1]
## Context
The current implementation of container hooks does not allow users to customize the pods created by the hook. While the implementation is designed to be used as is or as a starting point, building and maintaining a custom hook implementation just to specify additional fields is not a good user experience.
## Decision
We have decided to add hook extensions to the container hook implementation. This will allow users to customize the pods created by the hook by specifying additional fields. The hook extensions will be implemented in a way that is backwards-compatible with the existing hook implementation.
To allow customization, the runner executing the hook should have `ACTIONS_RUNNER_CONTAINER_HOOK_TEMPLATE` environment variable pointing to a yaml file on the runner system. The extension specified in that file will be applied both for job pods, and container steps.
If environment variable is set, but the file can't be read, the hook will fail, signaling incorrect configuration.
If the environment variable does not exist, the hook will apply the default spec.
In case the hook is able to read the extended spec, it will first create a default configuration, and then merged modified fields in the following way:
1. The `.metadata` fields that will be appended if they are not reserved are `labels` and `annotations`.
2. The pod spec fields except for `containers` and `volumes` are applied from the template, possibly overwriting the field.
3. The volumes are applied in form of appending additional volumes to the default volumes.
4. The containers are merged based on the name assigned to them:
1. If the name of the container *is not* "$job", the entire spec of the container will be added to the pod definition.
2. If the name of the container *is* "$job", the `name` and the `image` fields are going to be ignored and the spec will be applied so that `env`, `volumeMounts`, `ports` are appended to the default container spec created by the hook, while the rest of the fields are going to be applied to the newly created container spec.
## Consequences
The addition of hook extensions will provide a better user experience for users who need to customize the pods created by the container hook. However, it will require additional effort to provide the template to the runner pod, and configure it properly.
[^1]: Superseded by [ADR 0134](0134-hook-extensions.md)

View File

@@ -1,41 +0,0 @@
# ADR 0134: Hook extensions
**Date:** 20 February 2024
**Status**: Accepted [^1]
## Context
The current implementation of container hooks does not allow users to customize the pods created by the hook.
While the implementation is designed to be used as is or as a starting point, building and maintaining a custom hook implementation just to specify additional fields is not a good user experience.
## Decision
We have decided to add hook extensions to the container hook implementation.
This will allow users to customize the pods created by the hook by specifying additional fields.
The hook extensions will be implemented in a way that is backwards-compatible with the existing hook implementation.
To allow customization, the runner executing the hook should have `ACTIONS_RUNNER_CONTAINER_HOOK_TEMPLATE` environment variable pointing to a yaml file on the runner system.
The extension specified in that file will be applied both for job pods, and container steps.
If environment variable is set, but the file can't be read, the hook will fail, signaling incorrect configuration.
If the environment variable does not exist, the hook will apply the default spec.
In case the hook is able to read the extended spec, it will first create a default configuration, and then merged modified fields in the following way:
1. The `.metadata` fields that will be appended if they are not reserved are `labels` and `annotations`.
2. The pod spec fields except for `containers` and `volumes` are applied from the template, possibly overwriting the field.
3. The volumes are applied in form of appending additional volumes to the default volumes.
4. The containers are merged based on the name assigned to them:
1. If the name of the container *is* "$job", the `name` and the `image` fields are going to be ignored and the spec will be applied so that `env`, `volumeMounts`, `ports` are appended to the default container spec created by the hook, while the rest of the fields are going to be applied to the newly created container spec.
2. If the name of the container *starts with* "$", and matches the name of the [container service](https://docs.github.com/en/actions/using-containerized-services/about-service-containers), the `name` and the `image` fields are going to be ignored and the spec will be applied to that service container, so that `env`, `volumeMounts`, `ports` are appended to the default container spec for service created by the hook, while the rest of the fields are going to be applied to the created container spec.
If there is no container service with such name defined in the workflow, such spec extension will be ignored.
3. If the name of the container *does not start with* "$", the entire spec of the container will be added to the pod definition.
## Consequences
The addition of hook extensions will provide a better user experience for users who need to customize the pods created by the container hook.
However, it will require additional effort to provide the template to the runner pod, and configure it properly.
[^1]: Supersedes [ADR 0096](0096-hook-extensions.md)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1,122 +0,0 @@
const eslint = require('@eslint/js');
const tseslint = require('@typescript-eslint/eslint-plugin');
const tsparser = require('@typescript-eslint/parser');
const globals = require('globals');
const pluginJest = require('eslint-plugin-jest');
module.exports = [
eslint.configs.recommended,
{
files: ['**/*.ts'],
languageOptions: {
parser: tsparser,
parserOptions: {
ecmaVersion: 2018,
sourceType: 'module',
project: ['./tsconfig.json', './packages/*/tsconfig.json']
},
globals: {
...globals.node,
...globals.es6
}
},
plugins: {
'@typescript-eslint': tseslint,
},
rules: {
// Disabled rules from original config
'eslint-comments/no-use': 'off',
'import/no-namespace': 'off',
'no-constant-condition': 'off',
'no-unused-vars': 'off',
'i18n-text/no-en': 'off',
'camelcase': 'off',
'semi': 'off',
'no-shadow': 'off',
// TypeScript ESLint rules
'@typescript-eslint/no-unused-vars': 'error',
'@typescript-eslint/explicit-member-accessibility': ['error', { accessibility: 'no-public' }],
'@typescript-eslint/no-require-imports': 'error',
'@typescript-eslint/array-type': 'error',
'@typescript-eslint/await-thenable': 'error',
'@typescript-eslint/explicit-function-return-type': ['error', { allowExpressions: true }],
'@typescript-eslint/no-array-constructor': 'error',
'@typescript-eslint/no-empty-interface': 'error',
'@typescript-eslint/no-explicit-any': 'off', // Fixed: removed duplicate and kept only this one
'@typescript-eslint/no-extraneous-class': 'error',
'@typescript-eslint/no-floating-promises': 'error',
'@typescript-eslint/no-for-in-array': 'error',
'@typescript-eslint/no-inferrable-types': 'error',
'@typescript-eslint/no-misused-new': 'error',
'@typescript-eslint/no-namespace': 'error',
'@typescript-eslint/no-non-null-assertion': 'warn',
'@typescript-eslint/no-unnecessary-qualifier': 'error',
'@typescript-eslint/no-unnecessary-type-assertion': 'error',
'@typescript-eslint/no-useless-constructor': 'error',
'@typescript-eslint/no-var-requires': 'error',
'@typescript-eslint/prefer-for-of': 'warn',
'@typescript-eslint/prefer-function-type': 'warn',
'@typescript-eslint/prefer-includes': 'error',
'@typescript-eslint/prefer-string-starts-ends-with': 'error',
'@typescript-eslint/promise-function-async': 'error',
'@typescript-eslint/require-array-sort-compare': 'error',
'@typescript-eslint/restrict-plus-operands': 'error',
'@typescript-eslint/unbound-method': 'error',
'@typescript-eslint/no-shadow': ['error']
}
},
{
// Test files configuration - Fixed file pattern to match .ts files
files: ['**/*test*.ts', '**/*spec*.ts', '**/tests/**/*.ts'],
languageOptions: {
parser: tsparser,
parserOptions: {
ecmaVersion: 2018,
sourceType: 'module',
project: ['./tsconfig.json', './packages/*/tsconfig.json']
},
globals: {
...globals.node,
...globals.es6,
// Fixed Jest globals
describe: 'readonly',
it: 'readonly',
test: 'readonly',
expect: 'readonly',
beforeEach: 'readonly',
afterEach: 'readonly',
beforeAll: 'readonly',
afterAll: 'readonly',
jest: 'readonly'
}
},
plugins: {
'@typescript-eslint': tseslint,
jest: pluginJest
},
rules: {
// Disable no-undef for test files since Jest globals are handled above
'no-undef': 'off',
// Relax some rules for test files
'@typescript-eslint/no-explicit-any': 'off',
'@typescript-eslint/no-non-null-assertion': 'off',
'@typescript-eslint/explicit-function-return-type': 'off'
}
},
{
files: ['**/jest.config.js', '**/jest.setup.js'],
languageOptions: {
globals: {
...globals.node,
jest: 'readonly',
module: 'writable'
}
},
rules: {
'@typescript-eslint/no-require-imports': 'off',
'@typescript-eslint/no-var-requires': 'off',
'import/no-commonjs': 'off'
}
}
];

View File

@@ -1,38 +0,0 @@
metadata:
annotations:
annotated-by: "extension"
labels:
labeled-by: "extension"
spec:
restartPolicy: Never
containers:
- name: $job # overwrites job container
env:
- name: ENV1
value: "value1"
imagePullPolicy: Always
image: "busybox:1.28" # Ignored
command:
- sh
args:
- -c
- sleep 50
- name: $redis # overwrites redis service
env:
- name: ENV2
value: "value2"
image: "busybox:1.28" # Ignored
resources:
requests:
memory: "1Mi"
cpu: "1"
limits:
memory: "1Gi"
cpu: "2"
- name: side-car
image: "ubuntu:latest" # required
command:
- sh
args:
- -c
- sleep 60

View File

@@ -4,7 +4,7 @@
"state": {}, "state": {},
"args": { "args": {
"container": { "container": {
"image": "node:22", "image": "node:14.16",
"workingDirectory": "/__w/repo/repo", "workingDirectory": "/__w/repo/repo",
"createOptions": "--cpus 1", "createOptions": "--cpus 1",
"environmentVariables": { "environmentVariables": {
@@ -73,8 +73,6 @@
"contextName": "redis", "contextName": "redis",
"image": "redis", "image": "redis",
"createOptions": "--cpus 1", "createOptions": "--cpus 1",
"entrypoint": null,
"entryPointArgs": [],
"environmentVariables": {}, "environmentVariables": {},
"userMountVolumes": [ "userMountVolumes": [
{ {

View File

@@ -9,7 +9,7 @@
} }
}, },
"args": { "args": {
"image": "node:22", "image": "node:14.16",
"dockerfile": null, "dockerfile": null,
"entryPointArgs": [ "entryPointArgs": [
"-e", "-e",

5913
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{ {
"name": "hooks", "name": "hooks",
"version": "0.8.0", "version": "0.1.3",
"description": "Three projects are included - k8s: a kubernetes hook implementation that spins up pods dynamically to run a job - docker: A hook implementation of the runner's docker implementation - A hook lib, which contains shared typescript definitions and utilities that the other packages consume", "description": "Three projects are included - k8s: a kubernetes hook implementation that spins up pods dynamically to run a job - docker: A hook implementation of the runner's docker implementation - A hook lib, which contains shared typescript definitions and utilities that the other packages consume",
"main": "", "main": "",
"directories": { "directories": {
@@ -12,7 +12,6 @@
"format": "prettier --write '**/*.ts'", "format": "prettier --write '**/*.ts'",
"format-check": "prettier --check '**/*.ts'", "format-check": "prettier --check '**/*.ts'",
"lint": "eslint packages/**/*.ts", "lint": "eslint packages/**/*.ts",
"lint:fix": "eslint packages/**/*.ts --fix",
"build-all": "npm run build --prefix packages/hooklib && npm run build --prefix packages/k8s && npm run build --prefix packages/docker" "build-all": "npm run build --prefix packages/hooklib && npm run build --prefix packages/k8s && npm run build --prefix packages/docker"
}, },
"repository": { "repository": {
@@ -26,18 +25,12 @@
}, },
"homepage": "https://github.com/actions/runner-container-hooks#readme", "homepage": "https://github.com/actions/runner-container-hooks#readme",
"devDependencies": { "devDependencies": {
"@eslint/js": "^9.31.0", "@types/jest": "^27.5.1",
"@types/jest": "^30.0.0", "@types/node": "^17.0.23",
"@types/node": "^24.0.14", "@typescript-eslint/parser": "^5.18.0",
"@typescript-eslint/eslint-plugin": "^8.37.0", "eslint": "^8.12.0",
"@typescript-eslint/parser": "^8.37.0", "eslint-plugin-github": "^4.3.6",
"eslint": "^9.31.0", "prettier": "^2.6.2",
"eslint-plugin-github": "^6.0.0", "typescript": "^4.6.3"
"globals": "^15.12.0",
"prettier": "^3.6.2",
"typescript": "^5.8.3"
},
"dependencies": {
"eslint-plugin-jest": "^29.0.1"
} }
} }

View File

@@ -1,26 +1,13 @@
// eslint-disable-next-line import/no-commonjs
module.exports = { module.exports = {
clearMocks: true, clearMocks: true,
preset: 'ts-jest',
moduleFileExtensions: ['js', 'ts'], moduleFileExtensions: ['js', 'ts'],
testEnvironment: 'node', testEnvironment: 'node',
testMatch: ['**/*-test.ts'], testMatch: ['**/*-test.ts'],
testRunner: 'jest-circus/runner', testRunner: 'jest-circus/runner',
verbose: true,
transform: { transform: {
'^.+\\.ts$': [ '^.+\\.ts$': 'ts-jest'
'ts-jest',
{
tsconfig: 'tsconfig.test.json'
}
],
// Transform ESM modules to CommonJS
'^.+\\.(js|mjs)$': ['babel-jest', {
presets: [['@babel/preset-env', { targets: { node: 'current' } }]]
}]
}, },
transformIgnorePatterns: [ setupFilesAfterEnv: ['./jest.setup.js'],
// Transform these ESM packages verbose: true
'node_modules/(?!(shlex|@kubernetes/client-node|openid-client|oauth4webapi|jose|uuid)/)'
],
setupFilesAfterEnv: ['./jest.setup.js']
} }

File diff suppressed because it is too large Load Diff

View File

@@ -5,31 +5,25 @@
"main": "lib/index.js", "main": "lib/index.js",
"scripts": { "scripts": {
"test": "jest --runInBand", "test": "jest --runInBand",
"build": "npx tsc && npx ncc build", "build": "npx tsc && npx ncc build"
"format": "prettier --write '**/*.ts'",
"format-check": "prettier --check '**/*.ts'",
"lint": "eslint src/**/*.ts"
}, },
"author": "", "author": "",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@actions/core": "^1.11.1", "@actions/core": "^1.6.0",
"@actions/exec": "^2.0.0", "@actions/exec": "^1.1.1",
"hooklib": "file:../hooklib", "hooklib": "file:../hooklib",
"shlex": "^3.0.0", "uuid": "^8.3.2"
"uuid": "^13.0.0"
}, },
"devDependencies": { "devDependencies": {
"@babel/core": "^7.28.5", "@types/jest": "^27.4.1",
"@babel/preset-env": "^7.28.5", "@types/node": "^17.0.23",
"@types/jest": "^30.0.0", "@typescript-eslint/parser": "^5.18.0",
"@types/node": "^24.0.14", "@vercel/ncc": "^0.33.4",
"@typescript-eslint/parser": "^8.49.0", "jest": "^27.5.1",
"@vercel/ncc": "^0.38.3", "ts-jest": "^27.1.4",
"jest": "^30.0.4", "ts-node": "^10.7.0",
"ts-jest": "^29.4.6", "tsconfig-paths": "^3.14.1",
"ts-node": "^10.9.2", "typescript": "^4.6.3"
"tsconfig-paths": "^4.2.0",
"typescript": "^5.8.3"
} }
} }

View File

@@ -43,25 +43,18 @@ export async function createContainer(
if (args.environmentVariables) { if (args.environmentVariables) {
for (const [key] of Object.entries(args.environmentVariables)) { for (const [key] of Object.entries(args.environmentVariables)) {
dockerArgs.push('-e', key) dockerArgs.push('-e')
dockerArgs.push(key)
} }
} }
dockerArgs.push('-e', 'GITHUB_ACTIONS=true')
// Use same behavior as the runner https://github.com/actions/runner/blob/27d9c886ab9a45e0013cb462529ac85d581f8c41/src/Runner.Worker/Container/DockerCommandManager.cs#L150
if (!('CI' in (args.environmentVariables ?? {}))) {
dockerArgs.push('-e', 'CI=true')
}
const mountVolumes = [ const mountVolumes = [
...(args.userMountVolumes || []), ...(args.userMountVolumes || []),
...(args.systemMountVolumes || []) ...(args.systemMountVolumes || [])
] ]
for (const mountVolume of mountVolumes) { for (const mountVolume of mountVolumes) {
dockerArgs.push( dockerArgs.push(
`-v=${mountVolume.sourceVolumePath}:${mountVolume.targetVolumePath}${ `-v=${mountVolume.sourceVolumePath}:${mountVolume.targetVolumePath}`
mountVolume.readOnly ? ':ro' : ''
}`
) )
} }
if (args.entryPoint) { if (args.entryPoint) {
@@ -98,12 +91,11 @@ export async function containerPull(
image: string, image: string,
configLocation: string configLocation: string
): Promise<void> { ): Promise<void> {
const dockerArgs: string[] = [] const dockerArgs: string[] = ['pull']
if (configLocation) { if (configLocation) {
dockerArgs.push('--config') dockerArgs.push('--config')
dockerArgs.push(configLocation) dockerArgs.push(configLocation)
} }
dockerArgs.push('pull')
dockerArgs.push(image) dockerArgs.push(image)
for (let i = 0; i < 3; i++) { for (let i = 0; i < 3; i++) {
try { try {
@@ -410,16 +402,11 @@ export async function containerRun(
} }
if (args.environmentVariables) { if (args.environmentVariables) {
for (const [key] of Object.entries(args.environmentVariables)) { for (const [key] of Object.entries(args.environmentVariables)) {
dockerArgs.push('-e', key) dockerArgs.push('-e')
dockerArgs.push(key)
} }
} }
dockerArgs.push('-e', 'GITHUB_ACTIONS=true')
// Use same behavior as the runner https://github.com/actions/runner/blob/27d9c886ab9a45e0013cb462529ac85d581f8c41/src/Runner.Worker/Container/DockerCommandManager.cs#L150
if (!('CI' in (args.environmentVariables ?? {}))) {
dockerArgs.push('-e', 'CI=true')
}
const mountVolumes = [ const mountVolumes = [
...(args.userMountVolumes || []), ...(args.userMountVolumes || []),
...(args.systemMountVolumes || []) ...(args.systemMountVolumes || [])
@@ -440,9 +427,6 @@ export async function containerRun(
dockerArgs.push(args.image) dockerArgs.push(args.image)
if (args.entryPointArgs) { if (args.entryPointArgs) {
for (const entryPointArg of args.entryPointArgs) { for (const entryPointArg of args.entryPointArgs) {
if (!entryPointArg) {
continue
}
dockerArgs.push(entryPointArg) dockerArgs.push(entryPointArg)
} }
} }
@@ -456,7 +440,7 @@ export async function isContainerAlpine(containerId: string): Promise<boolean> {
containerId, containerId,
'sh', 'sh',
'-c', '-c',
`'[ $(cat /etc/*release* | grep -i -e "^ID=*alpine*" -c) != 0 ] || exit 1'` "[ $(cat /etc/*release* | grep -i -e '^ID=*alpine*' -c) != 0 ] || exit 1"
] ]
try { try {
await runDockerCommand(dockerArgs) await runDockerCommand(dockerArgs)

View File

@@ -31,20 +31,16 @@ export async function prepareJob(
core.info('No containers exist, skipping hook invocation') core.info('No containers exist, skipping hook invocation')
exit(0) exit(0)
} }
const networkName = generateNetworkName()
let networkName = process.env.ACTIONS_RUNNER_NETWORK_DRIVER // Create network
if (!networkName) { await networkCreate(networkName)
networkName = generateNetworkName()
// Create network
await networkCreate(networkName)
}
// Create Job Container // Create Job Container
let containerMetadata: ContainerMetadata | undefined = undefined let containerMetadata: ContainerMetadata | undefined = undefined
if (!container?.image) { if (!container?.image) {
core.info('No job container provided, skipping') core.info('No job container provided, skipping')
} else { } else {
setupContainer(container, true) setupContainer(container)
const configLocation = await registryLogin(container.registry) const configLocation = await registryLogin(container.registry)
try { try {
@@ -178,11 +174,9 @@ function generateResponseFile(
writeToResponseFile(responseFile, JSON.stringify(response)) writeToResponseFile(responseFile, JSON.stringify(response))
} }
function setupContainer(container, jobContainer = false): void { function setupContainer(container): void {
if (!container.entryPoint && jobContainer) { container.entryPointArgs = [`-f`, `/dev/null`]
container.entryPointArgs = [`-f`, `/dev/null`] container.entryPoint = 'tail'
container.entryPoint = 'tail'
}
} }
function generateNetworkName(): string { function generateNetworkName(): string {

View File

@@ -16,14 +16,15 @@ import {
import { checkEnvironment } from './utils' import { checkEnvironment } from './utils'
async function run(): Promise<void> { async function run(): Promise<void> {
const input = await getInputFromStdin()
const args = input['args']
const command = input['command']
const responseFile = input['responseFile']
const state = input['state']
try { try {
checkEnvironment() checkEnvironment()
const input = await getInputFromStdin()
const args = input['args']
const command = input['command']
const responseFile = input['responseFile']
const state = input['state']
switch (command) { switch (command) {
case Command.PrepareJob: case Command.PrepareJob:
await prepareJob(args as PrepareJobArgs, responseFile) await prepareJob(args as PrepareJobArgs, responseFile)

View File

@@ -1,10 +1,10 @@
/* eslint-disable @typescript-eslint/no-var-requires */ /* eslint-disable @typescript-eslint/no-var-requires */
/* eslint-disable @typescript-eslint/no-require-imports */ /* eslint-disable @typescript-eslint/no-require-imports */
/* eslint-disable import/no-commonjs */
import * as core from '@actions/core' import * as core from '@actions/core'
import { env } from 'process' import { env } from 'process'
// Import this way otherwise typescript has errors // Import this way otherwise typescript has errors
const exec = require('@actions/exec') const exec = require('@actions/exec')
const shlex = require('shlex')
export interface RunDockerCommandOptions { export interface RunDockerCommandOptions {
workingDir?: string workingDir?: string
@@ -16,8 +16,6 @@ export async function runDockerCommand(
args: string[], args: string[],
options?: RunDockerCommandOptions options?: RunDockerCommandOptions
): Promise<string> { ): Promise<string> {
options = optionsWithDockerEnvs(options)
args = fixArgs(args)
const pipes = await exec.getExecOutput('docker', args, options) const pipes = await exec.getExecOutput('docker', args, options)
if (pipes.exitCode !== 0) { if (pipes.exitCode !== 0) {
core.error(`Docker failed with exit code ${pipes.exitCode}`) core.error(`Docker failed with exit code ${pipes.exitCode}`)
@@ -26,45 +24,6 @@ export async function runDockerCommand(
return Promise.resolve(pipes.stdout) return Promise.resolve(pipes.stdout)
} }
export function optionsWithDockerEnvs(
options?: RunDockerCommandOptions
): RunDockerCommandOptions | undefined {
// From https://docs.docker.com/engine/reference/commandline/cli/#environment-variables
const dockerCliEnvs = new Set([
'DOCKER_API_VERSION',
'DOCKER_CERT_PATH',
'DOCKER_CONFIG',
'DOCKER_CONTENT_TRUST_SERVER',
'DOCKER_CONTENT_TRUST',
'DOCKER_CONTEXT',
'DOCKER_DEFAULT_PLATFORM',
'DOCKER_HIDE_LEGACY_COMMANDS',
'DOCKER_HOST',
'DOCKER_STACK_ORCHESTRATOR',
'DOCKER_TLS_VERIFY',
'BUILDKIT_PROGRESS'
])
const dockerEnvs = {}
for (const key in process.env) {
if (dockerCliEnvs.has(key)) {
dockerEnvs[key] = process.env[key]
}
}
const newOptions = {
workingDir: options?.workingDir,
input: options?.input,
env: options?.env || {}
}
// Set docker envs or overwrite provided ones
for (const [key, value] of Object.entries(dockerEnvs)) {
newOptions.env[key] = value as string
}
return newOptions
}
export function sanitize(val: string): string { export function sanitize(val: string): string {
if (!val || typeof val !== 'string') { if (!val || typeof val !== 'string') {
return '' return ''
@@ -85,10 +44,6 @@ export function sanitize(val: string): string {
return newNameBuilder.join('') return newNameBuilder.join('')
} }
export function fixArgs(args: string[]): string[] {
return shlex.split(args.join(' '))
}
export function checkEnvironment(): void { export function checkEnvironment(): void {
if (!env.GITHUB_WORKSPACE) { if (!env.GITHUB_WORKSPACE) {
throw new Error('GITHUB_WORKSPACE is not set') throw new Error('GITHUB_WORKSPACE is not set')

View File

@@ -40,54 +40,21 @@ describe('run script step', () => {
definitions.runScriptStep.args.entryPoint = '/bin/bash' definitions.runScriptStep.args.entryPoint = '/bin/bash'
definitions.runScriptStep.args.entryPointArgs = [ definitions.runScriptStep.args.entryPointArgs = [
'-c', '-c',
`'if [[ ! $(env | grep "^PATH=") = "PATH=${definitions.runScriptStep.args.prependPath}:"* ]]; then exit 1; fi'` `if [[ ! $(env | grep "^PATH=") = "PATH=${definitions.runScriptStep.args.prependPath}:"* ]]; then exit 1; fi`
] ]
await expect( await expect(
runScriptStep(definitions.runScriptStep.args, prepareJobResponse.state) runScriptStep(definitions.runScriptStep.args, prepareJobResponse.state)
).resolves.not.toThrow() ).resolves.not.toThrow()
}) })
it("Should fix expansion and print correctly in container's stdout", async () => {
const spy = jest.spyOn(process.stdout, 'write').mockImplementation()
definitions.runScriptStep.args.entryPoint = 'echo'
definitions.runScriptStep.args.entryPointArgs = ['"Mona', 'the', `Octocat"`]
await expect(
runScriptStep(definitions.runScriptStep.args, prepareJobResponse.state)
).resolves.not.toThrow()
expect(spy).toHaveBeenCalledWith(
expect.stringContaining('Mona the Octocat')
)
spy.mockRestore()
})
it('Should have path variable changed in container with prepend path string array', async () => { it('Should have path variable changed in container with prepend path string array', async () => {
definitions.runScriptStep.args.prependPath = ['/some/other/path'] definitions.runScriptStep.args.prependPath = ['/some/other/path']
definitions.runScriptStep.args.entryPoint = '/bin/bash' definitions.runScriptStep.args.entryPoint = '/bin/bash'
definitions.runScriptStep.args.entryPointArgs = [ definitions.runScriptStep.args.entryPointArgs = [
'-c', '-c',
`'if [[ ! $(env | grep "^PATH=") = "PATH=${definitions.runScriptStep.args.prependPath.join( `if [[ ! $(env | grep "^PATH=") = "PATH=${definitions.runScriptStep.args.prependPath.join(
':' ':'
)}:"* ]]; then exit 1; fi'` )}:"* ]]; then exit 1; fi`
]
await expect(
runScriptStep(definitions.runScriptStep.args, prepareJobResponse.state)
).resolves.not.toThrow()
})
it('Should confirm that CI and GITHUB_ACTIONS are set', async () => {
definitions.runScriptStep.args.entryPoint = '/bin/bash'
definitions.runScriptStep.args.entryPointArgs = [
'-c',
`'if [[ ! $(env | grep "^CI=") = "CI=true" ]]; then exit 1; fi'`
]
await expect(
runScriptStep(definitions.runScriptStep.args, prepareJobResponse.state)
).resolves.not.toThrow()
definitions.runScriptStep.args.entryPointArgs = [
'-c',
`'if [[ ! $(env | grep "^GITHUB_ACTIONS=") = "GITHUB_ACTIONS=true" ]]; then exit 1; fi'`
] ]
await expect( await expect(
runScriptStep(definitions.runScriptStep.args, prepareJobResponse.state) runScriptStep(definitions.runScriptStep.args, prepareJobResponse.state)

View File

@@ -31,7 +31,7 @@ export default class TestSetup {
private get allTestDirectories() { private get allTestDirectories() {
const resp = [this.testdir, this.runnerMockDir, this.runnerOutputDir] const resp = [this.testdir, this.runnerMockDir, this.runnerOutputDir]
for (const [, value] of Object.entries(this.runnerMockSubdirs)) { for (const [key, value] of Object.entries(this.runnerMockSubdirs)) {
resp.push(`${this.runnerMockDir}/${value}`) resp.push(`${this.runnerMockDir}/${value}`)
} }
@@ -42,11 +42,12 @@ export default class TestSetup {
return resp return resp
} }
initialize(): void { public initialize(): void {
env['GITHUB_WORKSPACE'] = this.workingDirectory env['GITHUB_WORKSPACE'] = this.workingDirectory
env['RUNNER_NAME'] = 'test' env['RUNNER_NAME'] = 'test'
env['RUNNER_TEMP'] = env[
`${this.runnerMockDir}/${this.runnerMockSubdirs.workTemp}` 'RUNNER_TEMP'
] = `${this.runnerMockDir}/${this.runnerMockSubdirs.workTemp}`
for (const dir of this.allTestDirectories) { for (const dir of this.allTestDirectories) {
fs.mkdirSync(dir, { recursive: true }) fs.mkdirSync(dir, { recursive: true })
@@ -58,7 +59,7 @@ export default class TestSetup {
) )
} }
teardown(): void { public teardown(): void {
fs.rmdirSync(this.testdir, { recursive: true }) fs.rmdirSync(this.testdir, { recursive: true })
} }
@@ -107,21 +108,21 @@ export default class TestSetup {
] ]
} }
createOutputFile(name: string): string { public createOutputFile(name: string): string {
let filePath = path.join(this.runnerOutputDir, name || `${uuidv4()}.json`) let filePath = path.join(this.runnerOutputDir, name || `${uuidv4()}.json`)
fs.writeFileSync(filePath, '') fs.writeFileSync(filePath, '')
return filePath return filePath
} }
get workingDirectory(): string { public get workingDirectory(): string {
return `${this.runnerMockDir}/_work/${this.projectName}/${this.projectName}` return `${this.runnerMockDir}/_work/${this.projectName}/${this.projectName}`
} }
get containerWorkingDirectory(): string { public get containerWorkingDirectory(): string {
return `/__w/${this.projectName}/${this.projectName}` return `/__w/${this.projectName}/${this.projectName}`
} }
initializeDockerAction(): string { public initializeDockerAction(): string {
const actionPath = `${this.testdir}/_actions/example-handle/example-repo/example-branch/mock-directory` const actionPath = `${this.testdir}/_actions/example-handle/example-repo/example-branch/mock-directory`
fs.mkdirSync(actionPath, { recursive: true }) fs.mkdirSync(actionPath, { recursive: true })
this.writeDockerfile(actionPath) this.writeDockerfile(actionPath)
@@ -146,7 +147,7 @@ echo "::set-output name=time::$time"`
fs.chmodSync(entryPointPath, 0o755) fs.chmodSync(entryPointPath, 0o755)
} }
getPrepareJobDefinition(): HookData { public getPrepareJobDefinition(): HookData {
const prepareJob = JSON.parse( const prepareJob = JSON.parse(
fs.readFileSync( fs.readFileSync(
path.resolve(__dirname + '/../../../examples/prepare-job.json'), path.resolve(__dirname + '/../../../examples/prepare-job.json'),
@@ -165,7 +166,7 @@ echo "::set-output name=time::$time"`
return prepareJob return prepareJob
} }
getRunScriptStepDefinition(): HookData { public getRunScriptStepDefinition(): HookData {
const runScriptStep = JSON.parse( const runScriptStep = JSON.parse(
fs.readFileSync( fs.readFileSync(
path.resolve(__dirname + '/../../../examples/run-script-step.json'), path.resolve(__dirname + '/../../../examples/run-script-step.json'),
@@ -177,7 +178,7 @@ echo "::set-output name=time::$time"`
return runScriptStep return runScriptStep
} }
getRunContainerStepDefinition(): HookData { public getRunContainerStepDefinition(): HookData {
const runContainerStep = JSON.parse( const runContainerStep = JSON.parse(
fs.readFileSync( fs.readFileSync(
path.resolve(__dirname + '/../../../examples/run-container-step.json'), path.resolve(__dirname + '/../../../examples/run-container-step.json'),

View File

@@ -1,4 +1,4 @@
import { optionsWithDockerEnvs, sanitize, fixArgs } from '../src/utils' import { sanitize } from '../src/utils'
describe('Utilities', () => { describe('Utilities', () => {
it('should return sanitized image name', () => { it('should return sanitized image name', () => {
@@ -9,72 +9,4 @@ describe('Utilities', () => {
const validStr = 'teststr8_one' const validStr = 'teststr8_one'
expect(sanitize(validStr)).toBe(validStr) expect(sanitize(validStr)).toBe(validStr)
}) })
test.each([
[['"Hello', 'World"'], ['Hello World']],
[
[
'sh',
'-c',
`'[ $(cat /etc/*release* | grep -i -e "^ID=*alpine*" -c) != 0 ] || exit 1'`
],
[
'sh',
'-c',
`[ $(cat /etc/*release* | grep -i -e "^ID=*alpine*" -c) != 0 ] || exit 1`
]
],
[
[
'sh',
'-c',
`'[ $(cat /etc/*release* | grep -i -e '\\''^ID=*alpine*'\\'' -c) != 0 ] || exit 1'`
],
[
'sh',
'-c',
`[ $(cat /etc/*release* | grep -i -e '^ID=*alpine*' -c) != 0 ] || exit 1`
]
]
])('should fix split arguments(%p, %p)', (args, expected) => {
const got = fixArgs(args)
expect(got).toStrictEqual(expected)
})
describe('with docker options', () => {
it('should augment options with docker environment variables', () => {
process.env.DOCKER_HOST = 'unix:///run/user/1001/docker.sock'
process.env.DOCKER_NOTEXIST = 'notexist'
const optionDefinitions: any = [
undefined,
{},
{ env: {} },
{ env: { DOCKER_HOST: 'unix://var/run/docker.sock' } }
]
for (const opt of optionDefinitions) {
let options = optionsWithDockerEnvs(opt)
expect(options).toBeDefined()
expect(options?.env).toBeDefined()
expect(options?.env?.DOCKER_HOST).toBe(process.env.DOCKER_HOST)
expect(options?.env?.DOCKER_NOTEXIST).toBeUndefined()
}
})
it('should not overwrite other options', () => {
process.env.DOCKER_HOST = 'unix:///run/user/1001/docker.sock'
const opt = {
workingDir: 'test',
input: Buffer.from('test')
}
const options = optionsWithDockerEnvs(opt)
expect(options).toBeDefined()
expect(options?.workingDir).toBe(opt.workingDir)
expect(options?.input).toBe(opt.input)
expect(options?.env).toStrictEqual({
DOCKER_HOST: process.env.DOCKER_HOST
})
})
})
}) })

View File

@@ -1,6 +0,0 @@
{
"compilerOptions": {
"allowJs": true
},
"extends": "./tsconfig.json"
}

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@
"version": "0.1.0", "version": "0.1.0",
"description": "", "description": "",
"main": "lib/index.js", "main": "lib/index.js",
"types": "lib/index.d.ts", "types": "index.d.ts",
"scripts": { "scripts": {
"test": "echo \"Error: no test specified\" && exit 1", "test": "echo \"Error: no test specified\" && exit 1",
"build": "tsc", "build": "tsc",
@@ -14,14 +14,15 @@
"author": "", "author": "",
"license": "MIT", "license": "MIT",
"devDependencies": { "devDependencies": {
"@types/node": "^24.0.14", "@types/node": "^17.0.23",
"@typescript-eslint/parser": "^5.18.0",
"@zeit/ncc": "^0.22.3", "@zeit/ncc": "^0.22.3",
"eslint": "^9.31.0", "eslint": "^8.12.0",
"eslint-plugin-github": "^6.0.0", "eslint-plugin-github": "^4.3.6",
"prettier": "^3.6.2", "prettier": "^2.6.2",
"typescript": "^5.8.3" "typescript": "^4.6.3"
}, },
"dependencies": { "dependencies": {
"@actions/core": "^1.11.1" "@actions/core": "^1.6.0"
} }
} }

View File

@@ -22,6 +22,9 @@ rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["pods/log"] resources: ["pods/log"]
verbs: ["get", "list", "watch",] verbs: ["get", "list", "watch",]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: [""] - apiGroups: [""]
resources: ["secrets"] resources: ["secrets"]
verbs: ["get", "list", "create", "delete"] verbs: ["get", "list", "create", "delete"]
@@ -40,5 +43,3 @@ rules:
- Building container actions from a dockerfile is not supported at this time - Building container actions from a dockerfile is not supported at this time
- Container actions will not have access to the services network or job container network - Container actions will not have access to the services network or job container network
- Docker [create options](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idcontaineroptions) are not supported - Docker [create options](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idcontaineroptions) are not supported
- Container actions will have to specify the entrypoint, since the default entrypoint will be overridden to run the commands from the workflow.
- Container actions need to have the following binaries in their container image: `sh`, `env`, `tail`.

View File

@@ -1,26 +1,13 @@
// eslint-disable-next-line import/no-commonjs
module.exports = { module.exports = {
clearMocks: true, clearMocks: true,
preset: 'ts-jest',
moduleFileExtensions: ['js', 'ts'], moduleFileExtensions: ['js', 'ts'],
testEnvironment: 'node', testEnvironment: 'node',
testMatch: ['**/*-test.ts'], testMatch: ['**/*-test.ts'],
testRunner: 'jest-circus/runner', testRunner: 'jest-circus/runner',
verbose: true,
transform: { transform: {
'^.+\\.ts$': [ '^.+\\.ts$': 'ts-jest'
'ts-jest',
{
tsconfig: 'tsconfig.test.json'
}
],
// Transform ESM modules to CommonJS
'^.+\\.(js|mjs)$': ['babel-jest', {
presets: [['@babel/preset-env', { targets: { node: 'current' } }]]
}]
}, },
transformIgnorePatterns: [ setupFilesAfterEnv: ['./jest.setup.js'],
// Transform these ESM packages verbose: true
'node_modules/(?!(shlex|@kubernetes/client-node|openid-client|oauth4webapi|jose|uuid)/)'
],
setupFilesAfterEnv: ['./jest.setup.js']
} }

View File

@@ -1,2 +1 @@
// eslint-disable-next-line filenames/match-regex, no-undef
jest.setTimeout(500000) jest.setTimeout(500000)

File diff suppressed because it is too large Load Diff

View File

@@ -13,25 +13,18 @@
"author": "", "author": "",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@actions/core": "^1.11.1", "@actions/core": "^1.6.0",
"@actions/exec": "^1.1.1", "@actions/exec": "^1.1.1",
"@actions/io": "^1.1.3", "@actions/io": "^1.1.2",
"@kubernetes/client-node": "^1.3.0", "@kubernetes/client-node": "^0.16.3",
"hooklib": "file:../hooklib", "hooklib": "file:../hooklib"
"js-yaml": "^4.1.0",
"shlex": "^3.0.0",
"tar-fs": "^3.1.0",
"uuid": "^11.1.0"
}, },
"devDependencies": { "devDependencies": {
"@babel/core": "^7.28.3", "@types/jest": "^27.4.1",
"@babel/preset-env": "^7.28.3", "@types/node": "^17.0.23",
"@types/jest": "^30.0.0", "@vercel/ncc": "^0.33.4",
"@types/node": "^24.3.0", "jest": "^27.5.1",
"@vercel/ncc": "^0.38.3", "ts-jest": "^27.1.4",
"babel-jest": "^30.1.1", "typescript": "^4.6.3"
"jest": "^30.1.1",
"ts-jest": "^29.4.1",
"typescript": "^5.9.2"
} }
} }

View File

@@ -41,9 +41,7 @@ export function getSecretName(): string {
export const MAX_POD_NAME_LENGTH = 63 export const MAX_POD_NAME_LENGTH = 63
export const STEP_POD_NAME_SUFFIX_LENGTH = 8 export const STEP_POD_NAME_SUFFIX_LENGTH = 8
export const CONTAINER_EXTENSION_PREFIX = '$'
export const JOB_CONTAINER_NAME = 'job' export const JOB_CONTAINER_NAME = 'job'
export const JOB_CONTAINER_EXTENSION_NAME = '$job'
export class RunnerInstanceLabel { export class RunnerInstanceLabel {
private podName: string private podName: string

View File

@@ -1,42 +1,25 @@
import * as core from '@actions/core' import * as core from '@actions/core'
import * as io from '@actions/io'
import * as k8s from '@kubernetes/client-node' import * as k8s from '@kubernetes/client-node'
import { import { ContextPorts, prepareJobArgs, writeToResponseFile } from 'hooklib'
JobContainerInfo, import path from 'path'
ContextPorts,
PrepareJobArgs,
writeToResponseFile,
ServiceContainerInfo
} from 'hooklib'
import { import {
containerPorts, containerPorts,
createJobPod, createPod,
isPodContainerAlpine, isPodContainerAlpine,
prunePods, prunePods,
waitForPodPhases, waitForPodPhases
getPrepareJobTimeoutSeconds,
execCpToPod,
execPodStep
} from '../k8s' } from '../k8s'
import { import {
CONTAINER_VOLUMES, containerVolumes,
DEFAULT_CONTAINER_ENTRY_POINT, DEFAULT_CONTAINER_ENTRY_POINT,
DEFAULT_CONTAINER_ENTRY_POINT_ARGS, DEFAULT_CONTAINER_ENTRY_POINT_ARGS,
generateContainerName, PodPhase
mergeContainerWithOptions,
readExtensionFromFile,
PodPhase,
fixArgs,
prepareJobScript
} from '../k8s/utils' } from '../k8s/utils'
import { import { JOB_CONTAINER_NAME } from './constants'
CONTAINER_EXTENSION_PREFIX,
getJobPodName,
JOB_CONTAINER_NAME
} from './constants'
import { dirname } from 'path'
export async function prepareJob( export async function prepareJob(
args: PrepareJobArgs, args: prepareJobArgs,
responseFile responseFile
): Promise<void> { ): Promise<void> {
if (!args.container) { if (!args.container) {
@@ -44,49 +27,29 @@ export async function prepareJob(
} }
await prunePods() await prunePods()
await copyExternalsToRoot()
const extension = readExtensionFromFile()
let container: k8s.V1Container | undefined = undefined let container: k8s.V1Container | undefined = undefined
if (args.container?.image) { if (args.container?.image) {
container = createContainerSpec( core.debug(`Using image '${args.container.image}' for job image`)
args.container, container = createPodSpec(args.container, JOB_CONTAINER_NAME, true)
JOB_CONTAINER_NAME,
true,
extension
)
} }
let services: k8s.V1Container[] = [] let services: k8s.V1Container[] = []
if (args.services?.length) { if (args.services?.length) {
services = args.services.map(service => { services = args.services.map(service => {
return createContainerSpec( core.debug(`Adding service '${service.image}' to pod definition`)
service, return createPodSpec(service, service.image.split(':')[0])
generateContainerName(service.image),
false,
extension
)
}) })
} }
if (!container && !services?.length) { if (!container && !services?.length) {
throw new Error('No containers exist, skipping hook invocation') throw new Error('No containers exist, skipping hook invocation')
} }
let createdPod: k8s.V1Pod | undefined = undefined let createdPod: k8s.V1Pod | undefined = undefined
try { try {
createdPod = await createJobPod( createdPod = await createPod(container, services, args.container.registry)
getJobPodName(),
container,
services,
args.container.registry,
extension
)
} catch (err) { } catch (err) {
await prunePods() await prunePods()
core.debug(`createPod failed: ${JSON.stringify(err)}`) throw new Error(`failed to create job pod: ${err}`)
const message = (err as any)?.response?.body?.message || err
throw new Error(`failed to create job pod: ${message}`)
} }
if (!createdPod?.metadata?.name) { if (!createdPod?.metadata?.name) {
@@ -96,45 +59,15 @@ export async function prepareJob(
`Job pod created, waiting for it to come online ${createdPod?.metadata?.name}` `Job pod created, waiting for it to come online ${createdPod?.metadata?.name}`
) )
const runnerWorkspace = dirname(process.env.RUNNER_WORKSPACE as string)
let prepareScript: { containerPath: string; runnerPath: string } | undefined
if (args.container?.userMountVolumes?.length) {
prepareScript = prepareJobScript(args.container.userMountVolumes || [])
}
try { try {
await waitForPodPhases( await waitForPodPhases(
createdPod.metadata.name, createdPod.metadata.name,
new Set([PodPhase.RUNNING]), new Set([PodPhase.RUNNING]),
new Set([PodPhase.PENDING]), new Set([PodPhase.PENDING])
getPrepareJobTimeoutSeconds()
) )
} catch (err) { } catch (err) {
await prunePods() await prunePods()
throw new Error(`pod failed to come online with error: ${err}`) throw new Error(`Pod failed to come online with error: ${err}`)
}
await execCpToPod(createdPod.metadata.name, runnerWorkspace, '/__w')
if (prepareScript) {
await execPodStep(
['sh', '-e', prepareScript.containerPath],
createdPod.metadata.name,
JOB_CONTAINER_NAME
)
const promises: Promise<void>[] = []
for (const vol of args?.container?.userMountVolumes || []) {
promises.push(
execCpToPod(
createdPod.metadata.name,
vol.sourceVolumePath,
vol.targetVolumePath
)
)
}
await Promise.all(promises)
} }
core.debug('Job pod is ready for traffic') core.debug('Job pod is ready for traffic')
@@ -146,21 +79,16 @@ export async function prepareJob(
JOB_CONTAINER_NAME JOB_CONTAINER_NAME
) )
} catch (err) { } catch (err) {
core.debug( throw new Error(`Failed to determine if the pod is alpine: ${err}`)
`Failed to determine if the pod is alpine: ${JSON.stringify(err)}`
)
const message = (err as any)?.response?.body?.message || err
throw new Error(`failed to determine if the pod is alpine: ${message}`)
} }
core.debug(`Setting isAlpine to ${isAlpine}`) core.debug(`Setting isAlpine to ${isAlpine}`)
generateResponseFile(responseFile, args, createdPod, isAlpine) generateResponseFile(responseFile, createdPod, isAlpine)
} }
function generateResponseFile( function generateResponseFile(
responseFile: string, responseFile: string,
args: PrepareJobArgs,
appPod: k8s.V1Pod, appPod: k8s.V1Pod,
isAlpine: boolean isAlpine
): void { ): void {
if (!appPod.metadata?.name) { if (!appPod.metadata?.name) {
throw new Error('app pod must have metadata.name specified') throw new Error('app pod must have metadata.name specified')
@@ -191,39 +119,46 @@ function generateResponseFile(
} }
} }
if (args.services?.length) { const serviceContainers = appPod.spec?.containers.filter(
const serviceContainerNames = c => c.name !== JOB_CONTAINER_NAME
args.services?.map(s => generateContainerName(s.image)) || [] )
if (serviceContainers?.length) {
response.context['services'] = serviceContainers.map(c => {
if (!c.ports) {
return
}
response.context['services'] = appPod?.spec?.containers const ctxPorts: ContextPorts = {}
?.filter(c => serviceContainerNames.includes(c.name)) for (const port of c.ports) {
.map(c => { ctxPorts[port.containerPort] = port.hostPort
const ctxPorts: ContextPorts = {} }
if (c.ports?.length) {
for (const port of c.ports) {
if (port.containerPort && port.hostPort) {
ctxPorts[port.containerPort.toString()] = port.hostPort.toString()
}
}
}
return { return {
image: c.image, image: c.image,
ports: ctxPorts ports: ctxPorts
} }
}) })
} }
writeToResponseFile(responseFile, JSON.stringify(response)) writeToResponseFile(responseFile, JSON.stringify(response))
} }
export function createContainerSpec( async function copyExternalsToRoot(): Promise<void> {
container: JobContainerInfo | ServiceContainerInfo, const workspace = process.env['RUNNER_WORKSPACE']
if (workspace) {
await io.cp(
path.join(workspace, '../../externals'),
path.join(workspace, '../externals'),
{ force: true, recursive: true, copySourceDirectory: false }
)
}
}
function createPodSpec(
container,
name: string, name: string,
jobContainer = false, jobContainer = false
extension?: k8s.V1PodTemplateSpec
): k8s.V1Container { ): k8s.V1Container {
if (!container.entryPoint && jobContainer) { if (!container.entryPoint) {
container.entryPoint = DEFAULT_CONTAINER_ENTRY_POINT container.entryPoint = DEFAULT_CONTAINER_ENTRY_POINT
container.entryPointArgs = DEFAULT_CONTAINER_ENTRY_POINT_ARGS container.entryPointArgs = DEFAULT_CONTAINER_ENTRY_POINT_ARGS
} }
@@ -231,54 +166,27 @@ export function createContainerSpec(
const podContainer = { const podContainer = {
name, name,
image: container.image, image: container.image,
command: [container.entryPoint],
args: container.entryPointArgs,
ports: containerPorts(container) ports: containerPorts(container)
} as k8s.V1Container } as k8s.V1Container
if (container['workingDirectory']) { if (container.workingDirectory) {
podContainer.workingDir = container['workingDirectory'] podContainer.workingDir = container.workingDirectory
}
if (container.entryPoint) {
podContainer.command = [container.entryPoint]
}
if (container.entryPointArgs && container.entryPointArgs.length > 0) {
podContainer.args = fixArgs(container.entryPointArgs)
} }
podContainer.env = [] podContainer.env = []
for (const [key, value] of Object.entries( for (const [key, value] of Object.entries(
container['environmentVariables'] || {} container['environmentVariables']
)) { )) {
if (value && key !== 'HOME') { if (value && key !== 'HOME') {
podContainer.env.push({ name: key, value }) podContainer.env.push({ name: key, value: value as string })
} }
} }
podContainer.env.push({ podContainer.volumeMounts = containerVolumes(
name: 'GITHUB_ACTIONS', container.userMountVolumes,
value: 'true' jobContainer
})
if (!('CI' in (container['environmentVariables'] || {}))) {
podContainer.env.push({
name: 'CI',
value: 'true'
})
}
podContainer.volumeMounts = CONTAINER_VOLUMES
if (!extension) {
return podContainer
}
const from = extension.spec?.containers?.find(
c => c.name === CONTAINER_EXTENSION_PREFIX + name
) )
if (from) {
mergeContainerWithOptions(podContainer, from)
}
return podContainer return podContainer
} }

View File

@@ -1,31 +1,23 @@
import * as core from '@actions/core' import * as core from '@actions/core'
import * as fs from 'fs'
import * as k8s from '@kubernetes/client-node' import * as k8s from '@kubernetes/client-node'
import { RunContainerStepArgs } from 'hooklib' import { RunContainerStepArgs } from 'hooklib'
import { dirname } from 'path'
import { import {
createContainerStepPod, createJob,
deletePod, createSecretForEnvs,
execCpFromPod, getContainerJobPodName,
execCpToPod, getPodLogs,
execPodStep, getPodStatus,
getPrepareJobTimeoutSeconds, waitForJobToComplete,
waitForPodPhases waitForPodPhases
} from '../k8s' } from '../k8s'
import { import {
CONTAINER_VOLUMES, containerVolumes,
mergeContainerWithOptions, DEFAULT_CONTAINER_ENTRY_POINT,
PodPhase,
readExtensionFromFile,
DEFAULT_CONTAINER_ENTRY_POINT_ARGS, DEFAULT_CONTAINER_ENTRY_POINT_ARGS,
writeContainerStepScript PodPhase,
writeEntryPointScript
} from '../k8s/utils' } from '../k8s/utils'
import { import { JOB_CONTAINER_NAME } from './constants'
getJobPodName,
getStepPodName,
JOB_CONTAINER_EXTENSION_NAME,
JOB_CONTAINER_NAME
} from './constants'
export async function runContainerStep( export async function runContainerStep(
stepContainer: RunContainerStepArgs stepContainer: RunContainerStepArgs
@@ -34,120 +26,85 @@ export async function runContainerStep(
throw new Error('Building container actions is not currently supported') throw new Error('Building container actions is not currently supported')
} }
if (!stepContainer.entryPoint) { let secretName: string | undefined = undefined
throw new Error( if (stepContainer.environmentVariables) {
'failed to start the container since the entrypoint is overwritten' secretName = await createSecretForEnvs(stepContainer.environmentVariables)
)
} }
const envs = stepContainer.environmentVariables || {} core.debug(`Created secret ${secretName} for container job envs`)
envs['GITHUB_ACTIONS'] = 'true' const container = createPodSpec(stepContainer, secretName)
if (!('CI' in envs)) {
envs.CI = 'true'
}
const extension = readExtensionFromFile() const job = await createJob(container)
if (!job.metadata?.name) {
const container = createContainerSpec(stepContainer, extension)
let pod: k8s.V1Pod
try {
pod = await createContainerStepPod(getStepPodName(), container, extension)
} catch (err) {
core.debug(`createJob failed: ${JSON.stringify(err)}`)
const message = (err as any)?.response?.body?.message || err
throw new Error(`failed to run script step: ${message}`)
}
if (!pod.metadata?.name) {
throw new Error( throw new Error(
`Expected job ${JSON.stringify( `Expected job ${JSON.stringify(
pod job
)} to have correctly set the metadata.name` )} to have correctly set the metadata.name`
) )
} }
const podName = pod.metadata.name core.debug(`Job created, waiting for pod to start: ${job.metadata?.name}`)
try { const podName = await getContainerJobPodName(job.metadata.name)
await waitForPodPhases( await waitForPodPhases(
podName, podName,
new Set([PodPhase.RUNNING]), new Set([PodPhase.COMPLETED, PodPhase.RUNNING, PodPhase.SUCCEEDED]),
new Set([PodPhase.PENDING, PodPhase.UNKNOWN]), new Set([PodPhase.PENDING, PodPhase.UNKNOWN])
getPrepareJobTimeoutSeconds() )
) core.debug('Container step is running or complete, pulling logs')
const runnerWorkspace = dirname(process.env.RUNNER_WORKSPACE as string) await getPodLogs(podName, JOB_CONTAINER_NAME)
const githubWorkspace = process.env.GITHUB_WORKSPACE as string
const parts = githubWorkspace.split('/').slice(-2)
if (parts.length !== 2) {
throw new Error(`Invalid github workspace directory: ${githubWorkspace}`)
}
const relativeWorkspace = parts.join('/')
core.debug( core.debug('Waiting for container job to complete')
`Copying files from pod ${getJobPodName()} to ${runnerWorkspace}/${relativeWorkspace}` await waitForJobToComplete(job.metadata.name)
) // pod has failed so pull the status code from the container
await execCpFromPod(getJobPodName(), `/__w`, `${runnerWorkspace}`) const status = await getPodStatus(podName)
if (status?.phase === 'Succeeded') {
const { containerPath, runnerPath } = writeContainerStepScript( return 0
`${runnerWorkspace}/__w/_temp`,
githubWorkspace,
stepContainer.entryPoint,
stepContainer.entryPointArgs,
envs
)
await execCpToPod(podName, `${runnerWorkspace}/__w`, '/__w')
fs.rmSync(`${runnerWorkspace}/__w`, { recursive: true, force: true })
try {
core.debug(`Executing container step script in pod ${podName}`)
return await execPodStep(
['sh', '-e', containerPath],
pod.metadata.name,
JOB_CONTAINER_NAME
)
} catch (err) {
core.debug(`execPodStep failed: ${JSON.stringify(err)}`)
const message = (err as any)?.response?.body?.message || err
throw new Error(`failed to run script step: ${message}`)
} finally {
fs.rmSync(runnerPath, { force: true })
}
} catch (error) {
core.error(`Failed to run container step: ${error}`)
throw error
} finally {
await deletePod(podName).catch(err => {
core.error(`Failed to delete step pod ${podName}: ${err}`)
})
} }
if (!status?.containerStatuses?.length) {
core.error(
`Can't determine container status from response: ${JSON.stringify(
status
)}`
)
return 1
}
const exitCode =
status.containerStatuses[status.containerStatuses.length - 1].state
?.terminated?.exitCode
return Number(exitCode) || 1
} }
function createContainerSpec( function createPodSpec(
container: RunContainerStepArgs, container: RunContainerStepArgs,
extension?: k8s.V1PodTemplateSpec secretName?: string
): k8s.V1Container { ): k8s.V1Container {
const podContainer = new k8s.V1Container() const podContainer = new k8s.V1Container()
podContainer.name = JOB_CONTAINER_NAME podContainer.name = JOB_CONTAINER_NAME
podContainer.image = container.image podContainer.image = container.image
podContainer.workingDir = '/__w'
podContainer.command = ['tail']
podContainer.args = DEFAULT_CONTAINER_ENTRY_POINT_ARGS
podContainer.volumeMounts = CONTAINER_VOLUMES const { entryPoint, entryPointArgs } = container
container.entryPoint = 'sh'
if (!extension) { const { containerPath } = writeEntryPointScript(
return podContainer container.workingDirectory,
} entryPoint || DEFAULT_CONTAINER_ENTRY_POINT,
entryPoint ? entryPointArgs || [] : DEFAULT_CONTAINER_ENTRY_POINT_ARGS
const from = extension.spec?.containers?.find(
c => c.name === JOB_CONTAINER_EXTENSION_NAME
) )
if (from) { container.entryPointArgs = ['-e', containerPath]
mergeContainerWithOptions(podContainer, from) podContainer.command = [container.entryPoint, ...container.entryPointArgs]
if (secretName) {
podContainer.envFrom = [
{
secretRef: {
name: secretName,
optional: false
}
}
]
} }
podContainer.volumeMounts = containerVolumes(undefined, false, true)
return podContainer return podContainer
} }

View File

@@ -1,20 +1,17 @@
/* eslint-disable @typescript-eslint/no-unused-vars */ /* eslint-disable @typescript-eslint/no-unused-vars */
import * as fs from 'fs' import * as fs from 'fs'
import * as core from '@actions/core'
import { RunScriptStepArgs } from 'hooklib' import { RunScriptStepArgs } from 'hooklib'
import { execCpFromPod, execCpToPod, execPodStep } from '../k8s' import { execPodStep } from '../k8s'
import { writeRunScript, sleep, listDirAllCommand } from '../k8s/utils' import { writeEntryPointScript } from '../k8s/utils'
import { JOB_CONTAINER_NAME } from './constants' import { JOB_CONTAINER_NAME } from './constants'
import { dirname } from 'path'
import * as shlex from 'shlex'
export async function runScriptStep( export async function runScriptStep(
args: RunScriptStepArgs, args: RunScriptStepArgs,
state state,
responseFile
): Promise<void> { ): Promise<void> {
// Write the entrypoint first. This will be later coppied to the workflow pod
const { entryPoint, entryPointArgs, environmentVariables } = args const { entryPoint, entryPointArgs, environmentVariables } = args
const { containerPath, runnerPath } = writeRunScript( const { containerPath, runnerPath } = writeEntryPointScript(
args.workingDirectory, args.workingDirectory,
entryPoint, entryPoint,
entryPointArgs, entryPointArgs,
@@ -22,55 +19,6 @@ export async function runScriptStep(
environmentVariables environmentVariables
) )
const workdir = dirname(process.env.RUNNER_WORKSPACE as string)
const runnerTemp = `${workdir}/_temp`
const containerTemp = '/__w/_temp'
const containerTempSrc = '/__w/_temp_pre'
// Ensure base and staging dirs exist before copying
await execPodStep(
[
'sh',
'-c',
'mkdir -p /__w && mkdir -p /__w/_temp && mkdir -p /__w/_temp_pre'
],
state.jobPod,
JOB_CONTAINER_NAME
)
await execCpToPod(state.jobPod, runnerTemp, containerTempSrc)
// Copy GitHub directories from temp to /github
// Merge strategy:
// - Overwrite files in _runner_file_commands
// - Append files not already present elsewhere
const mergeCommands = [
'set -e',
'mkdir -p /__w/_temp /__w/_temp_pre',
'SRC=/__w/_temp_pre',
'DST=/__w/_temp',
// Overwrite _runner_file_commands
`find "$SRC" -type f ! -path "*/_runner_file_commands/*" -exec sh -c '
rel="\${1#$2/}"
target="$3/$rel"
mkdir -p "$(dirname "$target")"
cp -a "$1" "$target"
' _ {} "$SRC" "$DST" \\;`,
// Remove _temp_pre after merging
'rm -rf /__w/_temp_pre'
]
try {
await execPodStep(
['sh', '-c', mergeCommands.join(' && ')],
state.jobPod,
JOB_CONTAINER_NAME
)
} catch (err) {
core.debug(`Failed to merge temp directories: ${JSON.stringify(err)}`)
const message = (err as any)?.response?.body?.message || err
throw new Error(`failed to merge temp dirs: ${message}`)
}
// Execute the entrypoint script
args.entryPoint = 'sh' args.entryPoint = 'sh'
args.entryPointArgs = ['-e', containerPath] args.entryPointArgs = ['-e', containerPath]
try { try {
@@ -80,27 +28,8 @@ export async function runScriptStep(
JOB_CONTAINER_NAME JOB_CONTAINER_NAME
) )
} catch (err) { } catch (err) {
core.debug(`execPodStep failed: ${JSON.stringify(err)}`) throw new Error(`failed to run script step: ${err}`)
const message = (err as any)?.response?.body?.message || err
throw new Error(`failed to run script step: ${message}`)
} finally { } finally {
try { fs.rmSync(runnerPath)
fs.rmSync(runnerPath, { force: true })
} catch (removeErr) {
core.debug(`Failed to remove file ${runnerPath}: ${removeErr}`)
}
}
try {
core.debug(
`Copying from job pod '${state.jobPod}' ${containerTemp} to ${runnerTemp}`
)
await execCpFromPod(
state.jobPod,
`${containerTemp}/_runner_file_commands`,
`${workdir}/_temp`
)
} catch (error) {
core.warning('Failed to copy _temp from pod')
} }
} }

View File

@@ -1,11 +1,5 @@
import * as core from '@actions/core' import * as core from '@actions/core'
import { import { Command, getInputFromStdin, prepareJobArgs } from 'hooklib'
Command,
getInputFromStdin,
PrepareJobArgs,
RunContainerStepArgs,
RunScriptStepArgs
} from 'hooklib'
import { import {
cleanupJob, cleanupJob,
prepareJob, prepareJob,
@@ -15,13 +9,15 @@ import {
import { isAuthPermissionsOK, namespace, requiredPermissions } from './k8s' import { isAuthPermissionsOK, namespace, requiredPermissions } from './k8s'
async function run(): Promise<void> { async function run(): Promise<void> {
try { const input = await getInputFromStdin()
const input = await getInputFromStdin()
const args = input['args'] const args = input['args']
const command = input['command'] const command = input['command']
const responseFile = input['responseFile'] const responseFile = input['responseFile']
const state = input['state'] const state = input['state']
let exitCode = 0
try {
if (!(await isAuthPermissionsOK())) { if (!(await isAuthPermissionsOK())) {
throw new Error( throw new Error(
`The Service account needs the following permissions ${JSON.stringify( `The Service account needs the following permissions ${JSON.stringify(
@@ -29,28 +25,28 @@ async function run(): Promise<void> {
)} on the pod resource in the '${namespace()}' namespace. Please contact your self hosted runner administrator.` )} on the pod resource in the '${namespace()}' namespace. Please contact your self hosted runner administrator.`
) )
} }
let exitCode = 0
switch (command) { switch (command) {
case Command.PrepareJob: case Command.PrepareJob:
await prepareJob(args as PrepareJobArgs, responseFile) await prepareJob(args as prepareJobArgs, responseFile)
return process.exit(0) break
case Command.CleanupJob: case Command.CleanupJob:
await cleanupJob() await cleanupJob()
return process.exit(0) break
case Command.RunScriptStep: case Command.RunScriptStep:
await runScriptStep(args as RunScriptStepArgs, state) await runScriptStep(args, state, null)
return process.exit(0) break
case Command.RunContainerStep: case Command.RunContainerStep:
exitCode = await runContainerStep(args as RunContainerStepArgs) exitCode = await runContainerStep(args)
return process.exit(exitCode) break
case Command.runContainerStep:
default: default:
throw new Error(`Command not recognized: ${command}`) throw new Error(`Command not recognized: ${command}`)
} }
} catch (error) { } catch (error) {
core.error(error as Error) core.error(error as Error)
process.exit(1) exitCode = 1
} }
process.exitCode = exitCode
} }
void run() void run()

View File

@@ -1,29 +1,16 @@
import * as core from '@actions/core' import * as core from '@actions/core'
import * as path from 'path'
import { spawn } from 'child_process'
import * as k8s from '@kubernetes/client-node' import * as k8s from '@kubernetes/client-node'
import tar from 'tar-fs' import { ContainerInfo, Registry } from 'hooklib'
import * as stream from 'stream' import * as stream from 'stream'
import { WritableStreamBuffer } from 'stream-buffers'
import { createHash } from 'crypto'
import type { ContainerInfo, Registry } from 'hooklib'
import { import {
getJobPodName,
getRunnerPodName,
getSecretName, getSecretName,
JOB_CONTAINER_NAME, getStepPodName,
getVolumeClaimName,
RunnerInstanceLabel RunnerInstanceLabel
} from '../hooks/constants' } from '../hooks/constants'
import { import { PodPhase } from './utils'
PodPhase,
mergePodSpecWithOptions,
mergeObjectMeta,
fixArgs,
listDirAllCommand,
sleep,
EXTERNALS_VOLUME_NAME,
GITHUB_VOLUME_NAME,
WORK_VOLUME
} from './utils'
import * as shlex from 'shlex'
const kc = new k8s.KubeConfig() const kc = new k8s.KubeConfig()
@@ -33,7 +20,7 @@ const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
const k8sBatchV1Api = kc.makeApiClient(k8s.BatchV1Api) const k8sBatchV1Api = kc.makeApiClient(k8s.BatchV1Api)
const k8sAuthorizationV1Api = kc.makeApiClient(k8s.AuthorizationV1Api) const k8sAuthorizationV1Api = kc.makeApiClient(k8s.AuthorizationV1Api)
const DEFAULT_WAIT_FOR_POD_TIME_SECONDS = 10 * 60 // 10 min export const POD_VOLUME_NAME = 'work'
export const requiredPermissions = [ export const requiredPermissions = [
{ {
@@ -54,6 +41,12 @@ export const requiredPermissions = [
resource: 'pods', resource: 'pods',
subresource: 'log' subresource: 'log'
}, },
{
group: 'batch',
verbs: ['get', 'list', 'create', 'delete'],
resource: 'jobs',
subresource: ''
},
{ {
group: '', group: '',
verbs: ['create', 'delete', 'get', 'list'], verbs: ['create', 'delete', 'get', 'list'],
@@ -62,12 +55,10 @@ export const requiredPermissions = [
} }
] ]
export async function createJobPod( export async function createPod(
name: string,
jobContainer?: k8s.V1Container, jobContainer?: k8s.V1Container,
services?: k8s.V1Container[], services?: k8s.V1Container[],
registry?: Registry, registry?: Registry
extension?: k8s.V1PodTemplateSpec
): Promise<k8s.V1Pod> { ): Promise<k8s.V1Pod> {
const containers: k8s.V1Container[] = [] const containers: k8s.V1Container[] = []
if (jobContainer) { if (jobContainer) {
@@ -83,78 +74,22 @@ export async function createJobPod(
appPod.kind = 'Pod' appPod.kind = 'Pod'
appPod.metadata = new k8s.V1ObjectMeta() appPod.metadata = new k8s.V1ObjectMeta()
appPod.metadata.name = name appPod.metadata.name = getJobPodName()
const instanceLabel = new RunnerInstanceLabel() const instanceLabel = new RunnerInstanceLabel()
appPod.metadata.labels = { appPod.metadata.labels = {
[instanceLabel.key]: instanceLabel.value [instanceLabel.key]: instanceLabel.value
} }
appPod.metadata.annotations = {}
appPod.spec = new k8s.V1PodSpec() appPod.spec = new k8s.V1PodSpec()
appPod.spec.containers = containers appPod.spec.containers = containers
appPod.spec.securityContext = {
fsGroup: 1001
}
// Extract working directory from GITHUB_WORKSPACE
// GITHUB_WORKSPACE is like /__w/repo-name/repo-name
const githubWorkspace = process.env.GITHUB_WORKSPACE
const workingDirPath = githubWorkspace?.split('/').slice(-2).join('/') ?? ''
const initCommands = [
'mkdir -p /mnt/externals',
'mkdir -p /mnt/work',
'mkdir -p /mnt/github',
'mv /home/runner/externals/* /mnt/externals/'
]
if (workingDirPath) {
initCommands.push(`mkdir -p /mnt/work/${workingDirPath}`)
}
appPod.spec.initContainers = [
{
name: 'fs-init',
image:
process.env.ACTIONS_RUNNER_IMAGE ||
'ghcr.io/actions/actions-runner:latest',
command: ['sh', '-c', initCommands.join(' && ')],
securityContext: {
runAsGroup: 1001,
runAsUser: 1001
},
volumeMounts: [
{
name: EXTERNALS_VOLUME_NAME,
mountPath: '/mnt/externals'
},
{
name: WORK_VOLUME,
mountPath: '/mnt/work'
},
{
name: GITHUB_VOLUME_NAME,
mountPath: '/mnt/github'
}
]
}
]
appPod.spec.restartPolicy = 'Never' appPod.spec.restartPolicy = 'Never'
appPod.spec.nodeName = await getCurrentNodeName()
const claimName = getVolumeClaimName()
appPod.spec.volumes = [ appPod.spec.volumes = [
{ {
name: EXTERNALS_VOLUME_NAME, name: 'work',
emptyDir: {} persistentVolumeClaim: { claimName }
},
{
name: GITHUB_VOLUME_NAME,
emptyDir: {}
},
{
name: WORK_VOLUME,
emptyDir: {}
} }
] ]
@@ -168,79 +103,80 @@ export async function createJobPod(
appPod.spec.imagePullSecrets = [secretReference] appPod.spec.imagePullSecrets = [secretReference]
} }
if (extension?.metadata) { const { body } = await k8sApi.createNamespacedPod(namespace(), appPod)
mergeObjectMeta(appPod, extension.metadata) return body
}
if (extension?.spec) {
mergePodSpecWithOptions(appPod.spec, extension.spec)
}
return await k8sApi.createNamespacedPod({
namespace: namespace(),
body: appPod
})
} }
export async function createContainerStepPod( export async function createJob(
name: string, container: k8s.V1Container
container: k8s.V1Container, ): Promise<k8s.V1Job> {
extension?: k8s.V1PodTemplateSpec const runnerInstanceLabel = new RunnerInstanceLabel()
): Promise<k8s.V1Pod> {
const appPod = new k8s.V1Pod()
appPod.apiVersion = 'v1' const job = new k8s.V1Job()
appPod.kind = 'Pod' job.apiVersion = 'batch/v1'
job.kind = 'Job'
job.metadata = new k8s.V1ObjectMeta()
job.metadata.name = getStepPodName()
job.metadata.labels = { [runnerInstanceLabel.key]: runnerInstanceLabel.value }
appPod.metadata = new k8s.V1ObjectMeta() job.spec = new k8s.V1JobSpec()
appPod.metadata.name = name job.spec.ttlSecondsAfterFinished = 300
job.spec.backoffLimit = 0
job.spec.template = new k8s.V1PodTemplateSpec()
const instanceLabel = new RunnerInstanceLabel() job.spec.template.spec = new k8s.V1PodSpec()
appPod.metadata.labels = { job.spec.template.spec.containers = [container]
[instanceLabel.key]: instanceLabel.value job.spec.template.spec.restartPolicy = 'Never'
} job.spec.template.spec.nodeName = await getCurrentNodeName()
appPod.metadata.annotations = {}
appPod.spec = new k8s.V1PodSpec() const claimName = getVolumeClaimName()
appPod.spec.containers = [container] job.spec.template.spec.volumes = [
appPod.spec.restartPolicy = 'Never'
appPod.spec.volumes = [
{ {
name: EXTERNALS_VOLUME_NAME, name: 'work',
emptyDir: {} persistentVolumeClaim: { claimName }
},
{
name: GITHUB_VOLUME_NAME,
emptyDir: {}
},
{
name: WORK_VOLUME,
emptyDir: {}
} }
] ]
if (extension?.metadata) { const { body } = await k8sBatchV1Api.createNamespacedJob(namespace(), job)
mergeObjectMeta(appPod, extension.metadata) return body
}
if (extension?.spec) {
mergePodSpecWithOptions(appPod.spec, extension.spec)
}
return await k8sApi.createNamespacedPod({
namespace: namespace(),
body: appPod
})
} }
export async function deletePod(name: string): Promise<void> { export async function getContainerJobPodName(jobName: string): Promise<string> {
await k8sApi.deleteNamespacedPod({ const selector = `job-name=${jobName}`
name, const backOffManager = new BackOffManager(60)
namespace: namespace(), while (true) {
gracePeriodSeconds: 0 const podList = await k8sApi.listNamespacedPod(
}) namespace(),
undefined,
undefined,
undefined,
undefined,
selector,
1
)
if (!podList.body.items?.length) {
await backOffManager.backOff()
continue
}
if (!podList.body.items[0].metadata?.name) {
throw new Error(
`Failed to determine the name of the pod for job ${jobName}`
)
}
return podList.body.items[0].metadata.name
}
}
export async function deletePod(podName: string): Promise<void> {
await k8sApi.deleteNamespacedPod(
podName,
namespace(),
undefined,
undefined,
0
)
} }
export async function execPodStep( export async function execPodStep(
@@ -248,322 +184,34 @@ export async function execPodStep(
podName: string, podName: string,
containerName: string, containerName: string,
stdin?: stream.Readable stdin?: stream.Readable
): Promise<number> {
const exec = new k8s.Exec(kc)
command = fixArgs(command)
return await new Promise(function (resolve, reject) {
exec
.exec(
namespace(),
podName,
containerName,
command,
process.stdout,
process.stderr,
stdin ?? null,
false /* tty */,
resp => {
core.debug(`execPodStep response: ${JSON.stringify(resp)}`)
if (resp.status === 'Success') {
resolve(resp.code || 0)
} else {
core.debug(
JSON.stringify({
message: resp?.message,
details: resp?.details
})
)
reject(new Error(resp?.message || 'execPodStep failed'))
}
}
)
.catch(e => reject(e))
})
}
export async function execCalculateOutputHashSorted(
podName: string,
containerName: string,
command: string[]
): Promise<string> {
const exec = new k8s.Exec(kc)
let output = ''
const outputWriter = new stream.Writable({
write(chunk, _enc, cb) {
try {
output += chunk.toString('utf8')
cb()
} catch (e) {
cb(e as Error)
}
}
})
await new Promise<void>((resolve, reject) => {
exec
.exec(
namespace(),
podName,
containerName,
command,
outputWriter, // capture stdout
process.stderr,
null,
false /* tty */,
resp => {
core.debug(`internalExecOutput response: ${JSON.stringify(resp)}`)
if (resp.status === 'Success') {
resolve()
} else {
core.debug(
JSON.stringify({
message: resp?.message,
details: resp?.details
})
)
reject(new Error(resp?.message || 'internalExecOutput failed'))
}
}
)
.catch(e => reject(e))
})
outputWriter.end()
// Sort lines for consistent ordering across platforms
const sortedOutput =
output
.split('\n')
.filter(line => line.length > 0)
.sort()
.join('\n') + '\n'
const hash = createHash('sha256')
hash.update(sortedOutput)
return hash.digest('hex')
}
export async function localCalculateOutputHashSorted(
commands: string[]
): Promise<string> {
return await new Promise<string>((resolve, reject) => {
const child = spawn(commands[0], commands.slice(1), {
stdio: ['ignore', 'pipe', 'ignore']
})
let output = ''
child.stdout.on('data', chunk => {
output += chunk.toString('utf8')
})
child.on('error', reject)
child.on('close', (code: number) => {
if (code === 0) {
// Sort lines for consistent ordering across distributions/platforms
const sortedOutput =
output
.split('\n')
.filter(line => line.length > 0)
.sort()
.join('\n') + '\n'
const hash = createHash('sha256')
hash.update(sortedOutput)
resolve(hash.digest('hex'))
} else {
reject(new Error(`child process exited with code ${code}`))
}
})
})
}
export async function execCpToPod(
podName: string,
runnerPath: string,
containerPath: string
): Promise<void> { ): Promise<void> {
core.debug(`Copying ${runnerPath} to pod ${podName} at ${containerPath}`) const exec = new k8s.Exec(kc)
await new Promise(async function (resolve, reject) {
let attempt = 0 await exec.exec(
while (true) { namespace(),
try { podName,
const exec = new k8s.Exec(kc) containerName,
// Use tar to extract with --no-same-owner to avoid ownership issues. command,
// Then use find to fix permissions. The -m flag helps but we also need to fix permissions after. process.stdout,
const command = [ process.stderr,
'sh', stdin ?? null,
'-c', false /* tty */,
`tar xf - --no-same-owner -C ${shlex.quote(containerPath)} 2>/dev/null; ` + resp => {
`find ${shlex.quote(containerPath)} -type f -exec chmod u+rw {} \\; 2>/dev/null; ` + // kube.exec returns an error if exit code is not 0, but we can't actually get the exit code
`find ${shlex.quote(containerPath)} -type d -exec chmod u+rwx {} \\; 2>/dev/null` if (resp.status === 'Success') {
] resolve(resp.code)
const readStream = tar.pack(runnerPath) } else {
const errStream = new WritableStreamBuffer() core.debug(
await new Promise((resolve, reject) => { JSON.stringify({
exec message: resp?.message,
.exec( details: resp?.details
namespace(), })
podName,
JOB_CONTAINER_NAME,
command,
null,
errStream,
readStream,
false,
async status => {
if (errStream.size()) {
reject(
new Error(
`Error from execCpToPod - status: ${status.status}, details: \n ${errStream.getContentsAsString()}`
)
)
}
resolve(status)
}
) )
.catch(e => reject(e)) reject(resp?.message)
}) }
break
} catch (error) {
core.debug(`cpToPod: Attempt ${attempt + 1} failed: ${error}`)
attempt++
if (attempt >= 30) {
throw new Error(
`cpToPod failed after ${attempt} attempts: ${JSON.stringify(error)}`
)
} }
await sleep(1000) )
} })
}
let attempts = 15
const delay = 1000
for (let i = 0; i < attempts; i++) {
try {
const want = await localCalculateOutputHashSorted([
'sh',
'-c',
listDirAllCommand(runnerPath)
])
const got = await execCalculateOutputHashSorted(
podName,
JOB_CONTAINER_NAME,
['sh', '-c', listDirAllCommand(containerPath)]
)
if (got !== want) {
core.debug(
`The hash of the directory does not match the expected value; want='${want}' got='${got}'`
)
await sleep(delay)
continue
}
break
} catch (error) {
core.debug(`Attempt ${i + 1} failed: ${error}`)
await sleep(delay)
}
}
}
export async function execCpFromPod(
podName: string,
containerPath: string,
parentRunnerPath: string
): Promise<void> {
const targetRunnerPath = `${parentRunnerPath}/${path.basename(containerPath)}`
core.debug(
`Copying from pod ${podName} ${containerPath} to ${targetRunnerPath}`
)
let attempt = 0
while (true) {
try {
// make temporary directory
const exec = new k8s.Exec(kc)
const containerPaths = containerPath.split('/')
const dirname = containerPaths.pop() as string
const command = [
'tar',
'cf',
'-',
'-C',
containerPaths.join('/') || '/',
dirname
]
const writerStream = tar.extract(parentRunnerPath)
const errStream = new WritableStreamBuffer()
await new Promise((resolve, reject) => {
exec
.exec(
namespace(),
podName,
JOB_CONTAINER_NAME,
command,
writerStream,
errStream,
null,
false,
async status => {
if (errStream.size()) {
reject(
new Error(
`Error from cpFromPod - details: \n ${errStream.getContentsAsString()}`
)
)
}
resolve(status)
}
)
.catch(e => reject(e))
})
break
} catch (error) {
core.debug(`Attempt ${attempt + 1} failed: ${error}`)
attempt++
if (attempt >= 30) {
throw new Error(
`execCpFromPod failed after ${attempt} attempts: ${JSON.stringify(error)}`
)
}
await sleep(1000)
}
}
let attempts = 15
const delay = 1000
for (let i = 0; i < attempts; i++) {
try {
const want = await execCalculateOutputHashSorted(
podName,
JOB_CONTAINER_NAME,
['sh', '-c', listDirAllCommand(containerPath)]
)
const got = await localCalculateOutputHashSorted([
'sh',
'-c',
listDirAllCommand(targetRunnerPath)
])
if (got !== want) {
core.debug(
`The hash of the directory does not match the expected value; want='${want}' got='${got}'`
)
await sleep(delay)
continue
}
break
} catch (error) {
core.debug(`Attempt ${i + 1} failed: ${error}`)
await sleep(delay)
}
}
} }
export async function waitForJobToComplete(jobName: string): Promise<void> { export async function waitForJobToComplete(jobName: string): Promise<void> {
@@ -574,7 +222,7 @@ export async function waitForJobToComplete(jobName: string): Promise<void> {
return return
} }
} catch (error) { } catch (error) {
throw new Error(`job ${jobName} has failed: ${JSON.stringify(error)}`) throw new Error(`job ${jobName} has failed`)
} }
await backOffManager.backOff() await backOffManager.backOff()
} }
@@ -615,10 +263,8 @@ export async function createDockerSecret(
) )
} }
return await k8sApi.createNamespacedSecret({ const { body } = await k8sApi.createNamespacedSecret(namespace(), secret)
namespace: namespace(), return body
body: secret
})
} }
export async function createSecretForEnvs(envs: { export async function createSecretForEnvs(envs: {
@@ -642,33 +288,30 @@ export async function createSecretForEnvs(envs: {
secret.data[key] = Buffer.from(value).toString('base64') secret.data[key] = Buffer.from(value).toString('base64')
} }
await k8sApi.createNamespacedSecret({ await k8sApi.createNamespacedSecret(namespace(), secret)
namespace: namespace(),
body: secret
})
return secretName return secretName
} }
export async function deleteSecret(name: string): Promise<void> { export async function deleteSecret(secretName: string): Promise<void> {
await k8sApi.deleteNamespacedSecret({ await k8sApi.deleteNamespacedSecret(secretName, namespace())
name,
namespace: namespace()
})
} }
export async function pruneSecrets(): Promise<void> { export async function pruneSecrets(): Promise<void> {
const secretList = await k8sApi.listNamespacedSecret({ const secretList = await k8sApi.listNamespacedSecret(
namespace: namespace(), namespace(),
labelSelector: new RunnerInstanceLabel().toString() undefined,
}) undefined,
if (!secretList.items.length) { undefined,
undefined,
new RunnerInstanceLabel().toString()
)
if (!secretList.body.items.length) {
return return
} }
await Promise.all( await Promise.all(
secretList.items.map( secretList.body.items.map(
async secret => secret => secret.metadata?.name && deleteSecret(secret.metadata.name)
secret.metadata?.name && (await deleteSecret(secret.metadata.name))
) )
) )
} }
@@ -677,7 +320,7 @@ export async function waitForPodPhases(
podName: string, podName: string,
awaitingPhases: Set<PodPhase>, awaitingPhases: Set<PodPhase>,
backOffPhases: Set<PodPhase>, backOffPhases: Set<PodPhase>,
maxTimeSeconds = DEFAULT_WAIT_FOR_POD_TIME_SECONDS maxTimeSeconds = 10 * 60 // 10 min
): Promise<void> { ): Promise<void> {
const backOffManager = new BackOffManager(maxTimeSeconds) const backOffManager = new BackOffManager(maxTimeSeconds)
let phase: PodPhase = PodPhase.UNKNOWN let phase: PodPhase = PodPhase.UNKNOWN
@@ -696,32 +339,11 @@ export async function waitForPodPhases(
await backOffManager.backOff() await backOffManager.backOff()
} }
} catch (error) { } catch (error) {
throw new Error( throw new Error(`Pod ${podName} is unhealthy with phase status ${phase}`)
`Pod ${podName} is unhealthy with phase status ${phase}: ${JSON.stringify(error)}`
)
} }
} }
export function getPrepareJobTimeoutSeconds(): number { async function getPodPhase(podName: string): Promise<PodPhase> {
const envTimeoutSeconds =
process.env['ACTIONS_RUNNER_PREPARE_JOB_TIMEOUT_SECONDS']
if (!envTimeoutSeconds) {
return DEFAULT_WAIT_FOR_POD_TIME_SECONDS
}
const timeoutSeconds = parseInt(envTimeoutSeconds, 10)
if (!timeoutSeconds || timeoutSeconds <= 0) {
core.warning(
`Prepare job timeout is invalid ("${timeoutSeconds}"): use an int > 0`
)
return DEFAULT_WAIT_FOR_POD_TIME_SECONDS
}
return timeoutSeconds
}
async function getPodPhase(name: string): Promise<PodPhase> {
const podPhaseLookup = new Set<string>([ const podPhaseLookup = new Set<string>([
PodPhase.PENDING, PodPhase.PENDING,
PodPhase.RUNNING, PodPhase.RUNNING,
@@ -729,10 +351,8 @@ async function getPodPhase(name: string): Promise<PodPhase> {
PodPhase.FAILED, PodPhase.FAILED,
PodPhase.UNKNOWN PodPhase.UNKNOWN
]) ])
const pod = await k8sApi.readNamespacedPod({ const { body } = await k8sApi.readNamespacedPod(podName, namespace())
name, const pod = body
namespace: namespace()
})
if (!pod.status?.phase || !podPhaseLookup.has(pod.status.phase)) { if (!pod.status?.phase || !podPhaseLookup.has(pod.status.phase)) {
return PodPhase.UNKNOWN return PodPhase.UNKNOWN
@@ -740,13 +360,11 @@ async function getPodPhase(name: string): Promise<PodPhase> {
return pod.status?.phase as PodPhase return pod.status?.phase as PodPhase
} }
async function isJobSucceeded(name: string): Promise<boolean> { async function isJobSucceeded(jobName: string): Promise<boolean> {
const job = await k8sBatchV1Api.readNamespacedJob({ const { body } = await k8sBatchV1Api.readNamespacedJob(jobName, namespace())
name, const job = body
namespace: namespace()
})
if (job.status?.failed) { if (job.status?.failed) {
throw new Error(`job ${name} has failed`) throw new Error(`job ${jobName} has failed`)
} }
return !!job.status?.succeeded return !!job.status?.succeeded
} }
@@ -766,26 +384,31 @@ export async function getPodLogs(
process.stderr.write(err.message) process.stderr.write(err.message)
}) })
await log.log(namespace(), podName, containerName, logStream, { const r = await log.log(namespace(), podName, containerName, logStream, {
follow: true, follow: true,
tailLines: 50,
pretty: false, pretty: false,
timestamps: false timestamps: false
}) })
await new Promise(resolve => logStream.on('end', () => resolve(null))) await new Promise(resolve => r.on('close', () => resolve(null)))
} }
export async function prunePods(): Promise<void> { export async function prunePods(): Promise<void> {
const podList = await k8sApi.listNamespacedPod({ const podList = await k8sApi.listNamespacedPod(
namespace: namespace(), namespace(),
labelSelector: new RunnerInstanceLabel().toString() undefined,
}) undefined,
if (!podList.items.length) { undefined,
undefined,
new RunnerInstanceLabel().toString()
)
if (!podList.body.items.length) {
return return
} }
await Promise.all( await Promise.all(
podList.items.map( podList.body.items.map(
async pod => pod.metadata?.name && (await deletePod(pod.metadata.name)) pod => pod.metadata?.name && deletePod(pod.metadata.name)
) )
) )
} }
@@ -793,16 +416,16 @@ export async function prunePods(): Promise<void> {
export async function getPodStatus( export async function getPodStatus(
name: string name: string
): Promise<k8s.V1PodStatus | undefined> { ): Promise<k8s.V1PodStatus | undefined> {
const pod = await k8sApi.readNamespacedPod({ const { body } = await k8sApi.readNamespacedPod(name, namespace())
name, return body.status
namespace: namespace()
})
return pod.status
} }
export async function isAuthPermissionsOK(): Promise<boolean> { export async function isAuthPermissionsOK(): Promise<boolean> {
const sar = new k8s.V1SelfSubjectAccessReview() const sar = new k8s.V1SelfSubjectAccessReview()
const asyncs: Promise<k8s.V1SelfSubjectAccessReview>[] = [] const asyncs: Promise<{
response: unknown
body: k8s.V1SelfSubjectAccessReview
}>[] = []
for (const resource of requiredPermissions) { for (const resource of requiredPermissions) {
for (const verb of resource.verbs) { for (const verb of resource.verbs) {
sar.spec = new k8s.V1SelfSubjectAccessReviewSpec() sar.spec = new k8s.V1SelfSubjectAccessReviewSpec()
@@ -812,13 +435,11 @@ export async function isAuthPermissionsOK(): Promise<boolean> {
sar.spec.resourceAttributes.group = resource.group sar.spec.resourceAttributes.group = resource.group
sar.spec.resourceAttributes.resource = resource.resource sar.spec.resourceAttributes.resource = resource.resource
sar.spec.resourceAttributes.subresource = resource.subresource sar.spec.resourceAttributes.subresource = resource.subresource
asyncs.push( asyncs.push(k8sAuthorizationV1Api.createSelfSubjectAccessReview(sar))
k8sAuthorizationV1Api.createSelfSubjectAccessReview({ body: sar })
)
} }
} }
const responses = await Promise.all(asyncs) const responses = await Promise.all(asyncs)
return responses.every(resp => resp.status?.allowed) return responses.every(resp => resp.body.status?.allowed)
} }
export async function isPodContainerAlpine( export async function isPodContainerAlpine(
@@ -831,18 +452,27 @@ export async function isPodContainerAlpine(
[ [
'sh', 'sh',
'-c', '-c',
`[ $(cat /etc/*release* | grep -i -e "^ID=*alpine*" -c) != 0 ] || exit 1` "[ $(cat /etc/*release* | grep -i -e '^ID=*alpine*' -c) != 0 ] || exit 1"
], ],
podName, podName,
containerName containerName
) )
} catch { } catch (err) {
isAlpine = false isAlpine = false
} }
return isAlpine return isAlpine
} }
async function getCurrentNodeName(): Promise<string> {
const resp = await k8sApi.readNamespacedPod(getRunnerPodName(), namespace())
const nodeName = resp.body.spec?.nodeName
if (!nodeName) {
throw new Error('Failed to determine node name')
}
return nodeName
}
export function namespace(): string { export function namespace(): string {
if (process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE']) { if (process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE']) {
return process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE'] return process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE']
@@ -886,48 +516,28 @@ class BackOffManager {
export function containerPorts( export function containerPorts(
container: ContainerInfo container: ContainerInfo
): k8s.V1ContainerPort[] { ): k8s.V1ContainerPort[] {
// 8080:8080/tcp
const portFormat = /(\d{1,5})(:(\d{1,5}))?(\/(tcp|udp))?/
const ports: k8s.V1ContainerPort[] = [] const ports: k8s.V1ContainerPort[] = []
if (!container.portMappings?.length) {
return ports
}
for (const portDefinition of container.portMappings) { for (const portDefinition of container.portMappings) {
const portProtoSplit = portDefinition.split('/') const submatches = portFormat.exec(portDefinition)
if (portProtoSplit.length > 2) { if (!submatches) {
throw new Error(`Unexpected port format: ${portDefinition}`) throw new Error(
`Port definition "${portDefinition}" is in incorrect format`
)
} }
const port = new k8s.V1ContainerPort() const port = new k8s.V1ContainerPort()
port.protocol = port.hostPort = Number(submatches[1])
portProtoSplit.length === 2 ? portProtoSplit[1].toUpperCase() : 'TCP' if (submatches[3]) {
port.containerPort = Number(submatches[3])
const portSplit = portProtoSplit[0].split(':')
if (portSplit.length > 2) {
throw new Error('ports should have at most one ":" separator')
} }
if (submatches[5]) {
const parsePort = (p: string): number => { port.protocol = submatches[5].toUpperCase()
const num = Number(p)
if (!Number.isInteger(num) || num < 1 || num > 65535) {
throw new Error(`invalid container port: ${p}`)
}
return num
}
if (portSplit.length === 1) {
port.containerPort = parsePort(portSplit[0])
} else { } else {
port.hostPort = parsePort(portSplit[0]) port.protocol = 'TCP'
port.containerPort = parsePort(portSplit[1])
} }
ports.push(port) ports.push(port)
} }
return ports return ports
} }
export async function getPodByName(name): Promise<k8s.V1Pod> {
return await k8sApi.readNamespacedPod({
name,
namespace: namespace()
})
}

View File

@@ -1,60 +1,96 @@
import * as k8s from '@kubernetes/client-node' import * as k8s from '@kubernetes/client-node'
import * as fs from 'fs' import * as fs from 'fs'
import * as yaml from 'js-yaml'
import * as core from '@actions/core'
import { v1 as uuidv4 } from 'uuid'
import { CONTAINER_EXTENSION_PREFIX } from '../hooks/constants'
import * as shlex from 'shlex'
import { Mount } from 'hooklib' import { Mount } from 'hooklib'
import * as path from 'path'
import { v1 as uuidv4 } from 'uuid'
import { POD_VOLUME_NAME } from './index'
export const DEFAULT_CONTAINER_ENTRY_POINT_ARGS = [`-f`, `/dev/null`] export const DEFAULT_CONTAINER_ENTRY_POINT_ARGS = [`-f`, `/dev/null`]
export const DEFAULT_CONTAINER_ENTRY_POINT = 'tail' export const DEFAULT_CONTAINER_ENTRY_POINT = 'tail'
export const ENV_HOOK_TEMPLATE_PATH = 'ACTIONS_RUNNER_CONTAINER_HOOK_TEMPLATE' export function containerVolumes(
export const ENV_USE_KUBE_SCHEDULER = 'ACTIONS_RUNNER_USE_KUBE_SCHEDULER' userMountVolumes: Mount[] = [],
jobContainer = true,
containerAction = false
): k8s.V1VolumeMount[] {
const mounts: k8s.V1VolumeMount[] = [
{
name: POD_VOLUME_NAME,
mountPath: '/__w'
}
]
export const EXTERNALS_VOLUME_NAME = 'externals' const workspacePath = process.env.GITHUB_WORKSPACE as string
export const GITHUB_VOLUME_NAME = 'github' if (containerAction) {
export const WORK_VOLUME = 'work' mounts.push(
{
export const CONTAINER_VOLUMES: k8s.V1VolumeMount[] = [ name: POD_VOLUME_NAME,
{ mountPath: '/github/workspace',
name: EXTERNALS_VOLUME_NAME, subPath: workspacePath.substring(workspacePath.indexOf('work/') + 1)
mountPath: '/__e' },
}, {
{ name: POD_VOLUME_NAME,
name: WORK_VOLUME, mountPath: '/github/file_commands',
mountPath: '/__w' subPath: workspacePath.substring(workspacePath.indexOf('work/') + 1)
}, }
{ )
name: GITHUB_VOLUME_NAME, return mounts
mountPath: '/github'
} }
]
export function prepareJobScript(userVolumeMounts: Mount[]): { if (!jobContainer) {
containerPath: string return mounts
runnerPath: string
} {
let mountDirs = userVolumeMounts.map(m => m.targetVolumePath).join(' ')
const content = `#!/bin/sh -l
set -e
cp -R /__w/_temp/_github_home /github/home
cp -R /__w/_temp/_github_workflow /github/workflow
mkdir -p ${mountDirs}
`
const filename = `${uuidv4()}.sh`
const entryPointPath = `${process.env.RUNNER_TEMP}/${filename}`
fs.writeFileSync(entryPointPath, content)
return {
containerPath: `/__w/_temp/${filename}`,
runnerPath: entryPointPath
} }
mounts.push(
{
name: POD_VOLUME_NAME,
mountPath: '/__e',
subPath: 'externals'
},
{
name: POD_VOLUME_NAME,
mountPath: '/github/home',
subPath: '_temp/_github_home'
},
{
name: POD_VOLUME_NAME,
mountPath: '/github/workflow',
subPath: '_temp/_github_workflow'
}
)
if (!userMountVolumes?.length) {
return mounts
}
for (const userVolume of userMountVolumes) {
let sourceVolumePath = ''
if (path.isAbsolute(userVolume.sourceVolumePath)) {
if (!userVolume.sourceVolumePath.startsWith(workspacePath)) {
throw new Error(
'Volume mounts outside of the work folder are not supported'
)
}
// source volume path should be relative path
sourceVolumePath = userVolume.sourceVolumePath.slice(
workspacePath.length + 1
)
} else {
sourceVolumePath = userVolume.sourceVolumePath
}
mounts.push({
name: POD_VOLUME_NAME,
mountPath: userVolume.targetVolumePath,
subPath: sourceVolumePath,
readOnly: userVolume.readOnly
})
}
return mounts
} }
export function writeRunScript( export function writeEntryPointScript(
workingDirectory: string, workingDirectory: string,
entryPoint: string, entryPoint: string,
entryPointArgs?: string[], entryPointArgs?: string[],
@@ -68,12 +104,22 @@ export function writeRunScript(
typeof prependPath === 'string' ? prependPath : prependPath.join(':') typeof prependPath === 'string' ? prependPath : prependPath.join(':')
exportPath = `export PATH=${prepend}:$PATH` exportPath = `export PATH=${prepend}:$PATH`
} }
let environmentPrefix = ''
let environmentPrefix = scriptEnv(environmentVariables) if (environmentVariables && Object.entries(environmentVariables).length) {
const envBuffer: string[] = []
for (const [key, value] of Object.entries(environmentVariables)) {
envBuffer.push(
`"${key}=${value
.replace(/\\/g, '\\\\')
.replace(/"/g, '\\"')
.replace(/=/g, '\\=')}"`
)
}
environmentPrefix = `env ${envBuffer.join(' ')} `
}
const content = `#!/bin/sh -l const content = `#!/bin/sh -l
set -e
rm "$0" # remove script after running
${exportPath} ${exportPath}
cd ${workingDirectory} && \ cd ${workingDirectory} && \
exec ${environmentPrefix} ${entryPoint} ${ exec ${environmentPrefix} ${entryPoint} ${
@@ -89,186 +135,6 @@ exec ${environmentPrefix} ${entryPoint} ${
} }
} }
export function writeContainerStepScript(
dst: string,
workingDirectory: string,
entryPoint: string,
entryPointArgs?: string[],
environmentVariables?: { [key: string]: string }
): { containerPath: string; runnerPath: string } {
let environmentPrefix = scriptEnv(environmentVariables)
const parts = workingDirectory.split('/').slice(-2)
if (parts.length !== 2) {
throw new Error(`Invalid working directory: ${workingDirectory}`)
}
const content = `#!/bin/sh -l
rm "$0" # remove script after running
mv /__w/_temp/_github_home /github/home && \
mv /__w/_temp/_github_workflow /github/workflow && \
mv /__w/_temp/_runner_file_commands /github/file_commands || true && \
mv /__w/${parts.join('/')}/ /github/workspace && \
cd /github/workspace && \
exec ${environmentPrefix} ${entryPoint} ${
entryPointArgs?.length ? entryPointArgs.join(' ') : ''
}
`
const filename = `${uuidv4()}.sh`
const entryPointPath = `${dst}/${filename}`
core.debug(`Writing container step script to ${entryPointPath}`)
fs.writeFileSync(entryPointPath, content)
return {
containerPath: `/__w/_temp/${filename}`,
runnerPath: entryPointPath
}
}
function scriptEnv(envs?: { [key: string]: string }): string {
if (!envs || !Object.entries(envs).length) {
return ''
}
const envBuffer: string[] = []
for (const [key, value] of Object.entries(envs)) {
if (
key.includes(`=`) ||
key.includes(`'`) ||
key.includes(`"`) ||
key.includes(`$`)
) {
throw new Error(
`environment key ${key} is invalid - the key must not contain =, $, ', or "`
)
}
envBuffer.push(
`"${key}=${value
.replace(/\\/g, '\\\\')
.replace(/"/g, '\\"')
.replace(/\$/g, '\\$')
.replace(/`/g, '\\`')}"`
)
}
if (!envBuffer?.length) {
return ''
}
return `env ${envBuffer.join(' ')} `
}
export function generateContainerName(image: string): string {
const nameWithTag = image.split('/').pop()
const name = nameWithTag?.split(':')[0]
if (!name) {
throw new Error(`Image definition '${image}' is invalid`)
}
return name
}
// Overwrite or append based on container options
//
// Keep in mind, envs and volumes could be passed as fields in container definition
// so default volume mounts and envs are appended first, and then create options are used
// to append more values
//
// Rest of the fields are just applied
// For example, container.createOptions.container.image is going to overwrite container.image field
export function mergeContainerWithOptions(
base: k8s.V1Container,
from: k8s.V1Container
): void {
for (const [key, value] of Object.entries(from)) {
if (key === 'name') {
if (value !== CONTAINER_EXTENSION_PREFIX + base.name) {
core.warning("Skipping name override: name can't be overwritten")
}
continue
} else if (key === 'image') {
core.warning("Skipping image override: image can't be overwritten")
continue
} else if (key === 'env') {
const envs = value as k8s.V1EnvVar[]
base.env = mergeLists(base.env, envs)
} else if (key === 'volumeMounts' && value) {
const volumeMounts = value as k8s.V1VolumeMount[]
base.volumeMounts = mergeLists(base.volumeMounts, volumeMounts)
} else if (key === 'ports' && value) {
const ports = value as k8s.V1ContainerPort[]
base.ports = mergeLists(base.ports, ports)
} else {
base[key] = value
}
}
}
export function mergePodSpecWithOptions(
base: k8s.V1PodSpec,
from: k8s.V1PodSpec
): void {
for (const [key, value] of Object.entries(from)) {
if (key === 'containers') {
base.containers.push(
...from.containers.filter(
e => !e.name?.startsWith(CONTAINER_EXTENSION_PREFIX)
)
)
} else if (key === 'volumes' && value) {
const volumes = value as k8s.V1Volume[]
base.volumes = mergeLists(base.volumes, volumes)
} else {
base[key] = value
}
}
}
export function mergeObjectMeta(
base: { metadata?: k8s.V1ObjectMeta },
from: k8s.V1ObjectMeta
): void {
if (!base.metadata?.labels || !base.metadata?.annotations) {
throw new Error(
"Can't merge metadata: base.metadata or base.annotations field is undefined"
)
}
if (from?.labels) {
for (const [key, value] of Object.entries(from.labels)) {
if (base.metadata?.labels?.[key]) {
core.warning(`Label ${key} is already defined and will be overwritten`)
}
base.metadata.labels[key] = value
}
}
if (from?.annotations) {
for (const [key, value] of Object.entries(from.annotations)) {
if (base.metadata?.annotations?.[key]) {
core.warning(
`Annotation ${key} is already defined and will be overwritten`
)
}
base.metadata.annotations[key] = value
}
}
}
export function readExtensionFromFile(): k8s.V1PodTemplateSpec | undefined {
const filePath = process.env[ENV_HOOK_TEMPLATE_PATH]
if (!filePath) {
return undefined
}
const doc = yaml.load(fs.readFileSync(filePath, 'utf8'))
if (!doc || typeof doc !== 'object') {
throw new Error(`Failed to parse ${filePath}`)
}
return doc as k8s.V1PodTemplateSpec
}
export function useKubeScheduler(): boolean {
return process.env[ENV_USE_KUBE_SCHEDULER] === 'true'
}
export enum PodPhase { export enum PodPhase {
PENDING = 'Pending', PENDING = 'Pending',
RUNNING = 'Running', RUNNING = 'Running',
@@ -277,29 +143,3 @@ export enum PodPhase {
UNKNOWN = 'Unknown', UNKNOWN = 'Unknown',
COMPLETED = 'Completed' COMPLETED = 'Completed'
} }
function mergeLists<T>(base?: T[], from?: T[]): T[] {
const b: T[] = base || []
if (!from?.length) {
return b
}
b.push(...from)
return b
}
export function fixArgs(args: string[]): string[] {
// Preserve shell command strings passed via `sh -c` without re-tokenizing.
// Retokenizing would split the script into multiple args, breaking `sh -c`.
if (args.length >= 2 && args[0] === 'sh' && args[1] === '-c') {
return args
}
return shlex.split(args.join(' '))
}
export async function sleep(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms))
}
export function listDirAllCommand(dir: string): string {
return `cd ${shlex.quote(dir)} && find . -not -path '*/_runner_hook_responses*' -exec stat -c '%s %n' {} \\;`
}

View File

@@ -3,7 +3,6 @@ import { cleanupJob, prepareJob } from '../src/hooks'
import { RunnerInstanceLabel } from '../src/hooks/constants' import { RunnerInstanceLabel } from '../src/hooks/constants'
import { namespace } from '../src/k8s' import { namespace } from '../src/k8s'
import { TestHelper } from './test-setup' import { TestHelper } from './test-setup'
import { PrepareJobArgs } from 'hooklib'
let testHelper: TestHelper let testHelper: TestHelper
@@ -15,10 +14,7 @@ describe('Cleanup Job', () => {
const prepareJobOutputFilePath = testHelper.createFile( const prepareJobOutputFilePath = testHelper.createFile(
'prepare-job-output.json' 'prepare-job-output.json'
) )
await prepareJob( await prepareJob(prepareJobData.args, prepareJobOutputFilePath)
prepareJobData.args as PrepareJobArgs,
prepareJobOutputFilePath
)
}) })
afterEach(async () => { afterEach(async () => {
@@ -36,12 +32,16 @@ describe('Cleanup Job', () => {
kc.loadFromDefault() kc.loadFromDefault()
const k8sApi = kc.makeApiClient(k8s.CoreV1Api) const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
const podList = await k8sApi.listNamespacedPod({ const podList = await k8sApi.listNamespacedPod(
namespace: namespace(), namespace(),
labelSelector: new RunnerInstanceLabel().toString() undefined,
}) undefined,
undefined,
undefined,
new RunnerInstanceLabel().toString()
)
expect(podList.items.length).toBe(0) expect(podList.body.items.length).toBe(0)
}) })
it('should have no runner linked secrets', async () => { it('should have no runner linked secrets', async () => {
@@ -51,11 +51,15 @@ describe('Cleanup Job', () => {
kc.loadFromDefault() kc.loadFromDefault()
const k8sApi = kc.makeApiClient(k8s.CoreV1Api) const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
const secretList = await k8sApi.listNamespacedSecret({ const secretList = await k8sApi.listNamespacedSecret(
namespace: namespace(), namespace(),
labelSelector: new RunnerInstanceLabel().toString() undefined,
}) undefined,
undefined,
undefined,
new RunnerInstanceLabel().toString()
)
expect(secretList.items.length).toBe(0) expect(secretList.body.items.length).toBe(0)
}) })
}) })

View File

@@ -4,7 +4,6 @@ import {
getSecretName, getSecretName,
getStepPodName, getStepPodName,
getVolumeClaimName, getVolumeClaimName,
JOB_CONTAINER_NAME,
MAX_POD_NAME_LENGTH, MAX_POD_NAME_LENGTH,
RunnerInstanceLabel, RunnerInstanceLabel,
STEP_POD_NAME_SUFFIX_LENGTH STEP_POD_NAME_SUFFIX_LENGTH
@@ -171,12 +170,4 @@ describe('constants', () => {
} }
}) })
}) })
describe('const values', () => {
it('should have constants set', () => {
expect(JOB_CONTAINER_NAME).toBeTruthy()
expect(MAX_POD_NAME_LENGTH).toBeGreaterThan(0)
expect(STEP_POD_NAME_SUFFIX_LENGTH).toBeGreaterThan(0)
})
})
}) })

View File

@@ -6,7 +6,6 @@ import {
runScriptStep runScriptStep
} from '../src/hooks' } from '../src/hooks'
import { TestHelper } from './test-setup' import { TestHelper } from './test-setup'
import { RunContainerStepArgs, RunScriptStepArgs } from 'hooklib'
jest.useRealTimers() jest.useRealTimers()
@@ -26,7 +25,6 @@ describe('e2e', () => {
afterEach(async () => { afterEach(async () => {
await testHelper.cleanup() await testHelper.cleanup()
}) })
it('should prepare job, run script step, run container step then cleanup without errors', async () => { it('should prepare job, run script step, run container step then cleanup without errors', async () => {
await expect( await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath) prepareJob(prepareJobData.args, prepareJobOutputFilePath)
@@ -38,16 +36,13 @@ describe('e2e', () => {
const prepareJobOutputData = JSON.parse(prepareJobOutputJson.toString()) const prepareJobOutputData = JSON.parse(prepareJobOutputJson.toString())
await expect( await expect(
runScriptStep( runScriptStep(scriptStepData.args, prepareJobOutputData.state, null)
scriptStepData.args as RunScriptStepArgs,
prepareJobOutputData.state
)
).resolves.not.toThrow() ).resolves.not.toThrow()
const runContainerStepData = testHelper.getRunContainerStepDefinition() const runContainerStepData = testHelper.getRunContainerStepDefinition()
await expect( await expect(
runContainerStep(runContainerStepData.args as RunContainerStepArgs) runContainerStep(runContainerStepData.args)
).resolves.not.toThrow() ).resolves.not.toThrow()
await expect(cleanupJob()).resolves.not.toThrow() await expect(cleanupJob()).resolves.not.toThrow()

View File

@@ -1,14 +1,6 @@
import * as fs from 'fs' import * as fs from 'fs'
import { containerPorts } from '../src/k8s' import { POD_VOLUME_NAME } from '../src/k8s'
import { import { containerVolumes, writeEntryPointScript } from '../src/k8s/utils'
generateContainerName,
writeRunScript,
mergePodSpecWithOptions,
mergeContainerWithOptions,
readExtensionFromFile,
ENV_HOOK_TEMPLATE_PATH
} from '../src/k8s/utils'
import * as k8s from '@kubernetes/client-node'
import { TestHelper } from './test-setup' import { TestHelper } from './test-setup'
let testHelper: TestHelper let testHelper: TestHelper
@@ -26,74 +18,35 @@ describe('k8s utils', () => {
it('should not throw', () => { it('should not throw', () => {
expect(() => expect(() =>
writeRunScript('/test', 'sh', ['-e', 'script.sh'], ['/prepend/path'], { writeEntryPointScript(
SOME_ENV: 'SOME_VALUE' '/test',
}) 'sh',
['-e', 'script.sh'],
['/prepend/path'],
{
SOME_ENV: 'SOME_VALUE'
}
)
).not.toThrow() ).not.toThrow()
}) })
it('should throw if RUNNER_TEMP is not set', () => { it('should throw if RUNNER_TEMP is not set', () => {
delete process.env.RUNNER_TEMP delete process.env.RUNNER_TEMP
expect(() => expect(() =>
writeRunScript('/test', 'sh', ['-e', 'script.sh'], ['/prepend/path'], { writeEntryPointScript(
SOME_ENV: 'SOME_VALUE' '/test',
}) 'sh',
['-e', 'script.sh'],
['/prepend/path'],
{
SOME_ENV: 'SOME_VALUE'
}
)
).toThrow() ).toThrow()
}) })
it('should throw if environment variable name contains double quote', () => {
expect(() =>
writeRunScript('/test', 'sh', ['-e', 'script.sh'], ['/prepend/path'], {
'SOME"_ENV': 'SOME_VALUE'
})
).toThrow()
})
it('should throw if environment variable name contains =', () => {
expect(() =>
writeRunScript('/test', 'sh', ['-e', 'script.sh'], ['/prepend/path'], {
'SOME=ENV': 'SOME_VALUE'
})
).toThrow()
})
it('should throw if environment variable name contains single quote', () => {
expect(() =>
writeRunScript('/test', 'sh', ['-e', 'script.sh'], ['/prepend/path'], {
"SOME'_ENV": 'SOME_VALUE'
})
).toThrow()
})
it('should throw if environment variable name contains dollar', () => {
expect(() =>
writeRunScript('/test', 'sh', ['-e', 'script.sh'], ['/prepend/path'], {
SOME_$_ENV: 'SOME_VALUE'
})
).toThrow()
})
it('should escape double quote, dollar and backslash in environment variable values', () => {
const { runnerPath } = writeRunScript(
'/test',
'sh',
['-e', 'script.sh'],
['/prepend/path'],
{
DQUOTE: '"',
BACK_SLASH: '\\',
DOLLAR: '$'
}
)
expect(fs.existsSync(runnerPath)).toBe(true)
const script = fs.readFileSync(runnerPath, 'utf8')
expect(script).toContain('"DQUOTE=\\"')
expect(script).toContain('"BACK_SLASH=\\\\"')
expect(script).toContain('"DOLLAR=\\$"')
})
it('should return object with containerPath and runnerPath', () => { it('should return object with containerPath and runnerPath', () => {
const { containerPath, runnerPath } = writeRunScript( const { containerPath, runnerPath } = writeEntryPointScript(
'/test', '/test',
'sh', 'sh',
['-e', 'script.sh'], ['-e', 'script.sh'],
@@ -108,7 +61,7 @@ describe('k8s utils', () => {
}) })
it('should write entrypoint path and the file should exist', () => { it('should write entrypoint path and the file should exist', () => {
const { runnerPath } = writeRunScript( const { runnerPath } = writeEntryPointScript(
'/test', '/test',
'sh', 'sh',
['-e', 'script.sh'], ['-e', 'script.sh'],
@@ -131,279 +84,70 @@ describe('k8s utils', () => {
await testHelper.cleanup() await testHelper.cleanup()
}) })
it('should parse container ports', () => { it('should throw if container action and GITHUB_WORKSPACE env is not set', () => {
const tt = [ delete process.env.GITHUB_WORKSPACE
{ expect(() => containerVolumes([], true, true)).toThrow()
spec: '8080:80', expect(() => containerVolumes([], false, true)).toThrow()
want: {
containerPort: 80,
hostPort: 8080,
protocol: 'TCP'
}
},
{
spec: '8080:80/udp',
want: {
containerPort: 80,
hostPort: 8080,
protocol: 'UDP'
}
},
{
spec: '8080/udp',
want: {
containerPort: 8080,
hostPort: undefined,
protocol: 'UDP'
}
},
{
spec: '8080',
want: {
containerPort: 8080,
hostPort: undefined,
protocol: 'TCP'
}
}
]
for (const tc of tt) {
const got = containerPorts({ portMappings: [tc.spec] })
for (const [key, value] of Object.entries(tc.want)) {
expect(got[0][key]).toBe(value)
}
}
}) })
it('should throw when ports are out of range (0, 65536)', () => { it('should always have work mount', () => {
expect(() => containerPorts({ portMappings: ['65536'] })).toThrow() let volumes = containerVolumes([], true, true)
expect(() => containerPorts({ portMappings: ['0'] })).toThrow() expect(volumes.find(e => e.mountPath === '/__w')).toBeTruthy()
expect(() => containerPorts({ portMappings: ['65536/udp'] })).toThrow() volumes = containerVolumes([], true, false)
expect(() => containerPorts({ portMappings: ['0/udp'] })).toThrow() expect(volumes.find(e => e.mountPath === '/__w')).toBeTruthy()
expect(() => containerPorts({ portMappings: ['1:65536'] })).toThrow() volumes = containerVolumes([], false, true)
expect(() => containerPorts({ portMappings: ['65536:1'] })).toThrow() expect(volumes.find(e => e.mountPath === '/__w')).toBeTruthy()
expect(() => containerPorts({ portMappings: ['1:65536/tcp'] })).toThrow() volumes = containerVolumes([], false, false)
expect(() => containerPorts({ portMappings: ['65536:1/tcp'] })).toThrow() expect(volumes.find(e => e.mountPath === '/__w')).toBeTruthy()
expect(() => containerPorts({ portMappings: ['1:'] })).toThrow()
expect(() => containerPorts({ portMappings: [':1'] })).toThrow()
expect(() => containerPorts({ portMappings: ['1:/tcp'] })).toThrow()
expect(() => containerPorts({ portMappings: [':1/tcp'] })).toThrow()
}) })
it('should throw on multi ":" splits', () => { it('should have container action volumes', () => {
expect(() => containerPorts({ portMappings: ['1:1:1'] })).toThrow() let volumes = containerVolumes([], true, true)
})
it('should throw on multi "/" splits', () => {
expect(() => containerPorts({ portMappings: ['1:1/tcp/udp'] })).toThrow()
expect(() => containerPorts({ portMappings: ['1/tcp/udp'] })).toThrow()
})
})
describe('generate container name', () => {
it('should return the container name from image string', () => {
expect( expect(
generateContainerName('public.ecr.aws/localstack/localstack') volumes.find(e => e.mountPath === '/github/workspace')
).toEqual('localstack') ).toBeTruthy()
expect( expect(
generateContainerName( volumes.find(e => e.mountPath === '/github/file_commands')
'public.ecr.aws/url/with/multiple/slashes/postgres:latest' ).toBeTruthy()
) volumes = containerVolumes([], false, true)
).toEqual('postgres') expect(
expect(generateContainerName('postgres')).toEqual('postgres') volumes.find(e => e.mountPath === '/github/workspace')
expect(generateContainerName('postgres:latest')).toEqual('postgres') ).toBeTruthy()
expect(generateContainerName('localstack/localstack')).toEqual( expect(
'localstack' volumes.find(e => e.mountPath === '/github/file_commands')
) ).toBeTruthy()
expect(generateContainerName('localstack/localstack:latest')).toEqual(
'localstack'
)
}) })
it('should throw on invalid image string', () => { it('should have externals, github home and github workflow mounts if job container', () => {
const volumes = containerVolumes()
expect(volumes.find(e => e.mountPath === '/__e')).toBeTruthy()
expect(volumes.find(e => e.mountPath === '/github/home')).toBeTruthy()
expect(volumes.find(e => e.mountPath === '/github/workflow')).toBeTruthy()
})
it('should throw if user volume source volume path is not in workspace', () => {
expect(() => expect(() =>
generateContainerName('localstack/localstack/:latest') containerVolumes(
[
{
sourceVolumePath: '/outside/of/workdir'
}
],
true,
false
)
).toThrow() ).toThrow()
expect(() => generateContainerName(':latest')).toThrow()
})
})
describe('read extension', () => {
beforeEach(async () => {
testHelper = new TestHelper()
await testHelper.initialize()
}) })
afterEach(async () => { it(`all volumes should have name ${POD_VOLUME_NAME}`, () => {
await testHelper.cleanup() let volumes = containerVolumes([], true, true)
expect(volumes.every(e => e.name === POD_VOLUME_NAME)).toBeTruthy()
volumes = containerVolumes([], true, false)
expect(volumes.every(e => e.name === POD_VOLUME_NAME)).toBeTruthy()
volumes = containerVolumes([], false, true)
expect(volumes.every(e => e.name === POD_VOLUME_NAME)).toBeTruthy()
volumes = containerVolumes([], false, false)
expect(volumes.every(e => e.name === POD_VOLUME_NAME)).toBeTruthy()
}) })
it('should throw if env variable is set but file does not exist', () => {
process.env[ENV_HOOK_TEMPLATE_PATH] =
'/path/that/does/not/exist/data.yaml'
expect(() => readExtensionFromFile()).toThrow()
})
it('should return undefined if env variable is not set', () => {
delete process.env[ENV_HOOK_TEMPLATE_PATH]
expect(readExtensionFromFile()).toBeUndefined()
})
it('should throw if file is empty', () => {
let filePath = testHelper.createFile('data.yaml')
process.env[ENV_HOOK_TEMPLATE_PATH] = filePath
expect(() => readExtensionFromFile()).toThrow()
})
it('should throw if file is not valid yaml', () => {
let filePath = testHelper.createFile('data.yaml')
fs.writeFileSync(filePath, 'invalid yaml')
process.env[ENV_HOOK_TEMPLATE_PATH] = filePath
expect(() => readExtensionFromFile()).toThrow()
})
it('should return object if file is valid', () => {
let filePath = testHelper.createFile('data.yaml')
fs.writeFileSync(
filePath,
`
metadata:
labels:
label-name: label-value
annotations:
annotation-name: annotation-value
spec:
containers:
- name: test
image: node:22
- name: job
image: ubuntu:latest`
)
process.env[ENV_HOOK_TEMPLATE_PATH] = filePath
const extension = readExtensionFromFile()
expect(extension).toBeDefined()
})
})
it('should merge container spec', () => {
const base = {
image: 'node:22',
name: 'test',
env: [
{
name: 'TEST',
value: 'TEST'
}
],
ports: [
{
containerPort: 8080,
hostPort: 8080,
protocol: 'TCP'
}
]
} as k8s.V1Container
const from = {
ports: [
{
containerPort: 9090,
hostPort: 9090,
protocol: 'TCP'
}
],
env: [
{
name: 'TEST_TWO',
value: 'TEST_TWO'
}
],
image: 'ubuntu:latest',
name: 'overwrite'
} as k8s.V1Container
const expectContainer = {
name: base.name,
image: base.image,
ports: [
...(base.ports as k8s.V1ContainerPort[]),
...(from.ports as k8s.V1ContainerPort[])
],
env: [...(base.env as k8s.V1EnvVar[]), ...(from.env as k8s.V1EnvVar[])]
}
const expectJobContainer = JSON.parse(JSON.stringify(expectContainer))
expectJobContainer.name = base.name
mergeContainerWithOptions(base, from)
expect(base).toStrictEqual(expectContainer)
})
it('should merge pod spec', () => {
const base = {
containers: [
{
image: 'node:22',
name: 'test',
env: [
{
name: 'TEST',
value: 'TEST'
}
],
ports: [
{
containerPort: 8080,
hostPort: 8080,
protocol: 'TCP'
}
]
}
],
restartPolicy: 'Never'
} as k8s.V1PodSpec
const from = {
securityContext: {
runAsUser: 1000,
fsGroup: 2000
},
restartPolicy: 'Always',
volumes: [
{
name: 'work',
emptyDir: {}
}
],
containers: [
{
image: 'ubuntu:latest',
name: 'side-car',
env: [
{
name: 'TEST',
value: 'TEST'
}
],
ports: [
{
containerPort: 8080,
hostPort: 8080,
protocol: 'TCP'
}
]
}
]
} as k8s.V1PodSpec
const expected = JSON.parse(JSON.stringify(base))
expected.securityContext = from.securityContext
expected.restartPolicy = from.restartPolicy
expected.volumes = from.volumes
expected.containers.push(from.containers[0])
mergePodSpecWithOptions(base, from)
expect(base).toStrictEqual(expected)
}) })
}) })

View File

@@ -1,12 +1,8 @@
import * as fs from 'fs' import * as fs from 'fs'
import * as path from 'path' import * as path from 'path'
import { cleanupJob } from '../src/hooks' import { cleanupJob } from '../src/hooks'
import { createContainerSpec, prepareJob } from '../src/hooks/prepare-job' import { prepareJob } from '../src/hooks/prepare-job'
import { TestHelper } from './test-setup' import { TestHelper } from './test-setup'
import { ENV_HOOK_TEMPLATE_PATH, generateContainerName } from '../src/k8s/utils'
import { execPodStep, getPodByName } from '../src/k8s'
import { V1Container } from '@kubernetes/client-node'
import { JOB_CONTAINER_NAME } from '../src/hooks/constants'
jest.useRealTimers() jest.useRealTimers()
@@ -41,82 +37,32 @@ describe('Prepare job', () => {
}) })
it('should prepare job with absolute path for userVolumeMount', async () => { it('should prepare job with absolute path for userVolumeMount', async () => {
const userVolumeMount = path.join(
process.env.GITHUB_WORKSPACE as string,
'myvolume'
)
fs.mkdirSync(userVolumeMount, { recursive: true })
fs.writeFileSync(path.join(userVolumeMount, 'file.txt'), 'hello')
prepareJobData.args.container.userMountVolumes = [ prepareJobData.args.container.userMountVolumes = [
{ {
sourceVolumePath: userVolumeMount, sourceVolumePath: path.join(
targetVolumePath: '/__w/myvolume', process.env.GITHUB_WORKSPACE as string,
'/myvolume'
),
targetVolumePath: '/volume_mount',
readOnly: false readOnly: false
} }
] ]
await expect( await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath) prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).resolves.not.toThrow() ).resolves.not.toThrow()
const content = JSON.parse(
fs.readFileSync(prepareJobOutputFilePath).toString()
)
await execPodStep(
['sh', '-c', '[ "$(cat /__w/myvolume/file.txt)" = "hello" ] || exit 5'],
content!.state!.jobPod,
JOB_CONTAINER_NAME
).then(output => {
expect(output).toBe(0)
})
}) })
it('should prepare job with envs CI and GITHUB_ACTIONS', async () => { it('should throw an exception if the user volume mount is absolute path outside of GITHUB_WORKSPACE', async () => {
await prepareJob(prepareJobData.args, prepareJobOutputFilePath) prepareJobData.args.container.userMountVolumes = [
{
const content = JSON.parse( sourceVolumePath: '/somewhere/not/in/gh-workspace',
fs.readFileSync(prepareJobOutputFilePath).toString() targetVolumePath: '/containermount',
) readOnly: false
}
const got = await getPodByName(content.state.jobPod) ]
expect(got.spec?.containers[0].env).toEqual( await expect(
expect.arrayContaining([ prepareJob(prepareJobData.args, prepareJobOutputFilePath)
{ name: 'CI', value: 'true' }, ).rejects.toThrow()
{ name: 'GITHUB_ACTIONS', value: 'true' }
])
)
expect(got.spec?.containers[1].env).toEqual(
expect.arrayContaining([
{ name: 'CI', value: 'true' },
{ name: 'GITHUB_ACTIONS', value: 'true' }
])
)
})
it('should not override CI env var if already set', async () => {
prepareJobData.args.container.environmentVariables = {
CI: 'false'
}
await prepareJob(prepareJobData.args, prepareJobOutputFilePath)
const content = JSON.parse(
fs.readFileSync(prepareJobOutputFilePath).toString()
)
const got = await getPodByName(content.state.jobPod)
expect(got.spec?.containers[0].env).toEqual(
expect.arrayContaining([
{ name: 'CI', value: 'false' },
{ name: 'GITHUB_ACTIONS', value: 'true' }
])
)
expect(got.spec?.containers[1].env).toEqual(
expect.arrayContaining([
{ name: 'CI', value: 'true' },
{ name: 'GITHUB_ACTIONS', value: 'true' }
])
)
}) })
it('should not run prepare job without the job container', async () => { it('should not run prepare job without the job container', async () => {
@@ -125,122 +71,4 @@ describe('Prepare job', () => {
prepareJob(prepareJobData.args, prepareJobOutputFilePath) prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).rejects.toThrow() ).rejects.toThrow()
}) })
it('should not set command + args for service container if not passed in args', async () => {
const services = prepareJobData.args.services.map(service => {
return createContainerSpec(service, generateContainerName(service.image))
}) as [V1Container]
expect(services[0].command).toBe(undefined)
expect(services[0].args).toBe(undefined)
})
it('should determine alpine correctly', async () => {
prepareJobData.args.container.image = 'alpine:latest'
await prepareJob(prepareJobData.args, prepareJobOutputFilePath)
const content = JSON.parse(
fs.readFileSync(prepareJobOutputFilePath).toString()
)
expect(content.isAlpine).toBe(true)
})
it('should run pod with extensions applied', async () => {
process.env[ENV_HOOK_TEMPLATE_PATH] = path.join(
__dirname,
'../../../examples/extension.yaml'
)
await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).resolves.not.toThrow()
delete process.env[ENV_HOOK_TEMPLATE_PATH]
const content = JSON.parse(
fs.readFileSync(prepareJobOutputFilePath).toString()
)
const got = await getPodByName(content.state.jobPod)
expect(got.metadata?.annotations?.['annotated-by']).toBe('extension')
expect(got.metadata?.labels?.['labeled-by']).toBe('extension')
expect(got.spec?.restartPolicy).toBe('Never')
// job container
expect(got.spec?.containers[0].name).toBe(JOB_CONTAINER_NAME)
expect(got.spec?.containers[0].image).toBe('node:22')
expect(got.spec?.containers[0].command).toEqual(['sh'])
expect(got.spec?.containers[0].args).toEqual(['-c', 'sleep 50'])
// service container
expect(got.spec?.containers[1].image).toBe('redis')
expect(got.spec?.containers[1].command).toBeFalsy()
expect(got.spec?.containers[1].args).toBeFalsy()
expect(got.spec?.containers[1].env).toEqual(
expect.arrayContaining([
{ name: 'CI', value: 'true' },
{ name: 'GITHUB_ACTIONS', value: 'true' },
{ name: 'ENV2', value: 'value2' }
])
)
expect(got.spec?.containers[1].resources).toEqual({
requests: { memory: '1Mi', cpu: '1' },
limits: { memory: '1Gi', cpu: '2' }
})
// side-car
expect(got.spec?.containers[2].name).toBe('side-car')
expect(got.spec?.containers[2].image).toBe('ubuntu:latest')
expect(got.spec?.containers[2].command).toEqual(['sh'])
expect(got.spec?.containers[2].args).toEqual(['-c', 'sleep 60'])
})
it('should put only job and services in output context file', async () => {
process.env[ENV_HOOK_TEMPLATE_PATH] = path.join(
__dirname,
'../../../examples/extension.yaml'
)
await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).resolves.not.toThrow()
const content = JSON.parse(
fs.readFileSync(prepareJobOutputFilePath).toString()
)
expect(content.state.jobPod).toBeTruthy()
expect(content.context.container).toBeTruthy()
expect(content.context.services).toBeTruthy()
expect(content.context.services.length).toBe(1)
})
test.each([undefined, null, []])(
'should not throw exception when portMapping=%p',
async pm => {
prepareJobData.args.services.forEach(s => {
s.portMappings = pm
})
await prepareJob(prepareJobData.args, prepareJobOutputFilePath)
const content = JSON.parse(
fs.readFileSync(prepareJobOutputFilePath).toString()
)
expect(() => content.context.services[0].image).not.toThrow()
}
)
it('should prepare job with container with non-root user', async () => {
prepareJobData.args!.container!.image =
'ghcr.io/actions/actions-runner:latest' // known to use user 1001
await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).resolves.not.toThrow()
const content = JSON.parse(
fs.readFileSync(prepareJobOutputFilePath).toString()
)
expect(content.state.jobPod).toBeTruthy()
expect(content.context.container.image).toBe(
'ghcr.io/actions/actions-runner:latest'
)
})
}) })

View File

@@ -1,25 +1,16 @@
import { prepareJob, runContainerStep } from '../src/hooks' import { runContainerStep } from '../src/hooks'
import { TestHelper } from './test-setup' import { TestHelper } from './test-setup'
import { ENV_HOOK_TEMPLATE_PATH } from '../src/k8s/utils'
import * as fs from 'fs'
import * as yaml from 'js-yaml'
import { JOB_CONTAINER_EXTENSION_NAME } from '../src/hooks/constants'
jest.useRealTimers() jest.useRealTimers()
let testHelper: TestHelper let testHelper: TestHelper
let runContainerStepData: any let runContainerStepData: any
let prepareJobData: any
let prepareJobOutputFilePath: string
describe('Run container step', () => { describe('Run container step', () => {
beforeEach(async () => { beforeEach(async () => {
testHelper = new TestHelper() testHelper = new TestHelper()
await testHelper.initialize() await testHelper.initialize()
prepareJobData = testHelper.getPrepareJobDefinition()
prepareJobOutputFilePath = testHelper.createFile('prepare-job-output.json')
await prepareJob(prepareJobData.args, prepareJobOutputFilePath)
runContainerStepData = testHelper.getRunContainerStepDefinition() runContainerStepData = testHelper.getRunContainerStepDefinition()
}) })
@@ -27,41 +18,14 @@ describe('Run container step', () => {
await testHelper.cleanup() await testHelper.cleanup()
}) })
it('should run pod with extensions applied', async () => { it('should not throw', async () => {
const extension = { const exitCode = await runContainerStep(runContainerStepData.args)
metadata: { expect(exitCode).toBe(0)
annotations: { })
foo: 'bar'
},
labels: {
bar: 'baz'
}
},
spec: {
containers: [
{
name: JOB_CONTAINER_EXTENSION_NAME,
command: ['sh'],
args: ['-c', 'sleep 10000']
},
{
name: 'side-container',
image: 'ubuntu:latest',
command: ['sh'],
args: ['-c', 'echo test']
}
],
restartPolicy: 'Never'
}
}
let filePath = testHelper.createFile() it('should fail if the working directory does not exist', async () => {
fs.writeFileSync(filePath, yaml.dump(extension)) runContainerStepData.args.workingDirectory = '/foo/bar'
process.env[ENV_HOOK_TEMPLATE_PATH] = filePath await expect(runContainerStep(runContainerStepData.args)).rejects.toThrow()
await expect(
runContainerStep(runContainerStepData.args)
).resolves.not.toThrow()
delete process.env[ENV_HOOK_TEMPLATE_PATH]
}) })
it('should shold have env variables available', async () => { it('should shold have env variables available', async () => {
@@ -74,15 +38,4 @@ describe('Run container step', () => {
runContainerStep(runContainerStepData.args) runContainerStep(runContainerStepData.args)
).resolves.not.toThrow() ).resolves.not.toThrow()
}) })
it('should run container step with envs CI and GITHUB_ACTIONS', async () => {
runContainerStepData.args.entryPoint = 'bash'
runContainerStepData.args.entryPointArgs = [
'-c',
"'if [[ -z $GITHUB_ACTIONS ]] || [[ -z $CI ]]; then exit 1; fi'"
]
await expect(
runContainerStep(runContainerStepData.args)
).resolves.not.toThrow()
})
}) })

View File

@@ -1,7 +1,6 @@
import * as fs from 'fs' import * as fs from 'fs'
import { cleanupJob, prepareJob, runScriptStep } from '../src/hooks' import { cleanupJob, prepareJob, runScriptStep } from '../src/hooks'
import { TestHelper } from './test-setup' import { TestHelper } from './test-setup'
import { PrepareJobArgs, RunScriptStepArgs } from 'hooklib'
jest.useRealTimers() jest.useRealTimers()
@@ -9,9 +8,7 @@ let testHelper: TestHelper
let prepareJobOutputData: any let prepareJobOutputData: any
let runScriptStepDefinition: { let runScriptStepDefinition
args: RunScriptStepArgs
}
describe('Run script step', () => { describe('Run script step', () => {
beforeEach(async () => { beforeEach(async () => {
@@ -22,14 +19,9 @@ describe('Run script step', () => {
) )
const prepareJobData = testHelper.getPrepareJobDefinition() const prepareJobData = testHelper.getPrepareJobDefinition()
runScriptStepDefinition = testHelper.getRunScriptStepDefinition() as { runScriptStepDefinition = testHelper.getRunScriptStepDefinition()
args: RunScriptStepArgs
}
await prepareJob( await prepareJob(prepareJobData.args, prepareJobOutputFilePath)
prepareJobData.args as PrepareJobArgs,
prepareJobOutputFilePath
)
const outputContent = fs.readFileSync(prepareJobOutputFilePath) const outputContent = fs.readFileSync(prepareJobOutputFilePath)
prepareJobOutputData = JSON.parse(outputContent.toString()) prepareJobOutputData = JSON.parse(outputContent.toString())
}) })
@@ -45,14 +37,22 @@ describe('Run script step', () => {
it('should not throw an exception', async () => { it('should not throw an exception', async () => {
await expect( await expect(
runScriptStep(runScriptStepDefinition.args, prepareJobOutputData.state) runScriptStep(
runScriptStepDefinition.args,
prepareJobOutputData.state,
null
)
).resolves.not.toThrow() ).resolves.not.toThrow()
}) })
it('should fail if the working directory does not exist', async () => { it('should fail if the working directory does not exist', async () => {
runScriptStepDefinition.args.workingDirectory = '/foo/bar' runScriptStepDefinition.args.workingDirectory = '/foo/bar'
await expect( await expect(
runScriptStep(runScriptStepDefinition.args, prepareJobOutputData.state) runScriptStep(
runScriptStepDefinition.args,
prepareJobOutputData.state,
null
)
).rejects.toThrow() ).rejects.toThrow()
}) })
@@ -64,12 +64,16 @@ describe('Run script step', () => {
"'if [[ -z $NODE_ENV ]]; then exit 1; fi'" "'if [[ -z $NODE_ENV ]]; then exit 1; fi'"
] ]
await expect( await expect(
runScriptStep(runScriptStepDefinition.args, prepareJobOutputData.state) runScriptStep(
runScriptStepDefinition.args,
prepareJobOutputData.state,
null
)
).resolves.not.toThrow() ).resolves.not.toThrow()
}) })
it('Should have path variable changed in container with prepend path string', async () => { it('Should have path variable changed in container with prepend path string', async () => {
runScriptStepDefinition.args.prependPath = ['/some/path'] runScriptStepDefinition.args.prependPath = '/some/path'
runScriptStepDefinition.args.entryPoint = '/bin/bash' runScriptStepDefinition.args.entryPoint = '/bin/bash'
runScriptStepDefinition.args.entryPointArgs = [ runScriptStepDefinition.args.entryPointArgs = [
'-c', '-c',
@@ -77,25 +81,11 @@ describe('Run script step', () => {
] ]
await expect( await expect(
runScriptStep(runScriptStepDefinition.args, prepareJobOutputData.state) runScriptStep(
).resolves.not.toThrow() runScriptStepDefinition.args,
}) prepareJobOutputData.state,
null
it('Dollar symbols in environment variables should not be expanded', async () => { )
runScriptStepDefinition.args.environmentVariables = {
VARIABLE1: '$VAR',
VARIABLE2: '${VAR}',
VARIABLE3: '$(VAR)'
}
runScriptStepDefinition.args.entryPointArgs = [
'-c',
'\'if [[ -z "$VARIABLE1" ]]; then exit 1; fi\'',
'\'if [[ -z "$VARIABLE2" ]]; then exit 2; fi\'',
'\'if [[ -z "$VARIABLE3" ]]; then exit 3; fi\''
]
await expect(
runScriptStep(runScriptStepDefinition.args, prepareJobOutputData.state)
).resolves.not.toThrow() ).resolves.not.toThrow()
}) })
@@ -110,7 +100,11 @@ describe('Run script step', () => {
] ]
await expect( await expect(
runScriptStep(runScriptStepDefinition.args, prepareJobOutputData.state) runScriptStep(
runScriptStepDefinition.args,
prepareJobOutputData.state,
null
)
).resolves.not.toThrow() ).resolves.not.toThrow()
}) })
}) })

View File

@@ -9,97 +9,87 @@ const kc = new k8s.KubeConfig()
kc.loadFromDefault() kc.loadFromDefault()
const k8sApi = kc.makeApiClient(k8s.CoreV1Api) const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
const k8sStorageApi = kc.makeApiClient(k8s.StorageV1Api)
export class TestHelper { export class TestHelper {
private tempDirPath: string private tempDirPath: string
private podName: string private podName: string
private runnerWorkdir: string
private runnerTemp: string
constructor() { constructor() {
this.tempDirPath = `${__dirname}/_temp/runner` this.tempDirPath = `${__dirname}/_temp/runner`
this.runnerWorkdir = `${this.tempDirPath}/_work`
this.runnerTemp = `${this.tempDirPath}/_work/_temp`
this.podName = uuidv4().replace(/-/g, '') this.podName = uuidv4().replace(/-/g, '')
} }
async initialize(): Promise<void> { public async initialize(): Promise<void> {
process.env['ACTIONS_RUNNER_POD_NAME'] = `${this.podName}` process.env['ACTIONS_RUNNER_POD_NAME'] = `${this.podName}`
process.env['RUNNER_WORKSPACE'] = `${this.runnerWorkdir}/repo` process.env['RUNNER_WORKSPACE'] = `${this.tempDirPath}/_work/repo`
process.env['RUNNER_TEMP'] = `${this.runnerTemp}` process.env['RUNNER_TEMP'] = `${this.tempDirPath}/_work/_temp`
process.env['GITHUB_WORKSPACE'] = `${this.runnerWorkdir}/repo/repo` process.env['GITHUB_WORKSPACE'] = `${this.tempDirPath}/_work/repo/repo`
process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE'] = 'default' process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE'] = 'default'
fs.mkdirSync(`${this.runnerWorkdir}/repo/repo`, { recursive: true }) fs.mkdirSync(`${this.tempDirPath}/_work/repo/repo`, { recursive: true })
fs.mkdirSync(`${this.tempDirPath}/externals`, { recursive: true }) fs.mkdirSync(`${this.tempDirPath}/externals`, { recursive: true })
fs.mkdirSync(this.runnerTemp, { recursive: true }) fs.mkdirSync(process.env.RUNNER_TEMP, { recursive: true })
fs.mkdirSync(`${this.runnerTemp}/_github_workflow`, { recursive: true })
fs.mkdirSync(`${this.runnerTemp}/_github_home`, { recursive: true })
fs.mkdirSync(`${this.runnerTemp}/_runner_file_commands`, {
recursive: true
})
fs.copyFileSync( fs.copyFileSync(
path.resolve(`${__dirname}/../../../examples/example-script.sh`), path.resolve(`${__dirname}/../../../examples/example-script.sh`),
`${this.runnerTemp}/example-script.sh` `${process.env.RUNNER_TEMP}/example-script.sh`
) )
await this.cleanupK8sResources() await this.cleanupK8sResources()
try { try {
await this.createTestVolume()
await this.createTestJobPod() await this.createTestJobPod()
} catch (e) { } catch (e) {
console.log(e) console.log(e)
} }
} }
async cleanup(): Promise<void> { public async cleanup(): Promise<void> {
try { try {
await this.cleanupK8sResources() await this.cleanupK8sResources()
fs.rmSync(this.tempDirPath, { recursive: true }) fs.rmSync(this.tempDirPath, { recursive: true })
} catch { } catch {}
// Ignore errors during cleanup
}
} }
public async cleanupK8sResources() {
async cleanupK8sResources(): Promise<void> {
await k8sApi await k8sApi
.deleteNamespacedPod({ .deleteNamespacedPersistentVolumeClaim(
name: this.podName, `${this.podName}-work`,
namespace: 'default', 'default',
gracePeriodSeconds: 0 undefined,
}) undefined,
.catch((e: k8s.ApiException<any>) => { 0
if (e.code !== 404) { )
console.error(JSON.stringify(e)) .catch(e => {})
} await k8sApi.deletePersistentVolume(`${this.podName}-pv`).catch(e => {})
}) await k8sStorageApi.deleteStorageClass('local-storage').catch(e => {})
await k8sApi await k8sApi
.deleteNamespacedPod({ .deleteNamespacedPod(this.podName, 'default', undefined, undefined, 0)
name: `${this.podName}-workflow`, .catch(e => {})
namespace: 'default', await k8sApi
gracePeriodSeconds: 0 .deleteNamespacedPod(
}) `${this.podName}-workflow`,
.catch((e: k8s.ApiException<any>) => { 'default',
if (e.code !== 404) { undefined,
console.error(JSON.stringify(e)) undefined,
} 0
}) )
.catch(e => {})
} }
createFile(fileName?: string): string { public createFile(fileName?: string): string {
const filePath = `${this.tempDirPath}/${fileName || uuidv4()}` const filePath = `${this.tempDirPath}/${fileName || uuidv4()}`
fs.writeFileSync(filePath, '') fs.writeFileSync(filePath, '')
return filePath return filePath
} }
removeFile(fileName: string): void { public removeFile(fileName: string): void {
const filePath = `${this.tempDirPath}/${fileName}` const filePath = `${this.tempDirPath}/${fileName}`
fs.rmSync(filePath) fs.rmSync(filePath)
} }
async createTestJobPod(): Promise<void> { public async createTestJobPod() {
const container = { const container = {
name: 'runner', name: 'nginx',
image: 'ghcr.io/actions/actions-runner:latest', image: 'nginx:latest',
imagePullPolicy: 'IfNotPresent' imagePullPolicy: 'IfNotPresent'
} as k8s.V1Container } as k8s.V1Container
@@ -109,18 +99,59 @@ export class TestHelper {
}, },
spec: { spec: {
restartPolicy: 'Never', restartPolicy: 'Never',
containers: [container], containers: [container]
securityContext: {
runAsUser: 1001,
runAsGroup: 1001,
fsGroup: 1001
}
} }
} as k8s.V1Pod } as k8s.V1Pod
await k8sApi.createNamespacedPod({ namespace: 'default', body: pod }) await k8sApi.createNamespacedPod('default', pod)
} }
getPrepareJobDefinition(): HookData { public async createTestVolume() {
var sc: k8s.V1StorageClass = {
metadata: {
name: 'local-storage'
},
provisioner: 'kubernetes.io/no-provisioner',
volumeBindingMode: 'Immediate'
}
await k8sStorageApi.createStorageClass(sc)
var volume: k8s.V1PersistentVolume = {
metadata: {
name: `${this.podName}-pv`
},
spec: {
storageClassName: 'local-storage',
capacity: {
storage: '2Gi'
},
volumeMode: 'Filesystem',
accessModes: ['ReadWriteOnce'],
hostPath: {
path: `${this.tempDirPath}/_work`
}
}
}
await k8sApi.createPersistentVolume(volume)
var volumeClaim: k8s.V1PersistentVolumeClaim = {
metadata: {
name: `${this.podName}-work`
},
spec: {
accessModes: ['ReadWriteOnce'],
volumeMode: 'Filesystem',
storageClassName: 'local-storage',
volumeName: `${this.podName}-pv`,
resources: {
requests: {
storage: '1Gi'
}
}
}
}
await k8sApi.createNamespacedPersistentVolumeClaim('default', volumeClaim)
}
public getPrepareJobDefinition(): HookData {
const prepareJob = JSON.parse( const prepareJob = JSON.parse(
fs.readFileSync( fs.readFileSync(
path.resolve(__dirname + '/../../../examples/prepare-job.json'), path.resolve(__dirname + '/../../../examples/prepare-job.json'),
@@ -137,7 +168,7 @@ export class TestHelper {
return prepareJob return prepareJob
} }
getRunScriptStepDefinition(): HookData { public getRunScriptStepDefinition(): HookData {
const runScriptStep = JSON.parse( const runScriptStep = JSON.parse(
fs.readFileSync( fs.readFileSync(
path.resolve(__dirname + '/../../../examples/run-script-step.json'), path.resolve(__dirname + '/../../../examples/run-script-step.json'),
@@ -149,7 +180,7 @@ export class TestHelper {
return runScriptStep return runScriptStep
} }
getRunContainerStepDefinition(): HookData { public getRunContainerStepDefinition(): HookData {
const runContainerStep = JSON.parse( const runContainerStep = JSON.parse(
fs.readFileSync( fs.readFileSync(
path.resolve(__dirname + '/../../../examples/run-container-step.json'), path.resolve(__dirname + '/../../../examples/run-container-step.json'),

View File

@@ -5,8 +5,7 @@
"outDir": "./lib", "outDir": "./lib",
"rootDir": "./src" "rootDir": "./src"
}, },
"esModuleInterop": true, /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */
"include": [ "include": [
"src/**/*", "./src"
] ]
} }

View File

@@ -1,6 +0,0 @@
{
"compilerOptions": {
"allowJs": true
},
"extends": "./tsconfig.json"
}

View File

@@ -1,19 +1,6 @@
## Features ## Features
- k8s: remove dependency on the runner's volume [#244]
## Bugs ## Bugs
- Fixed an issue where default private registry images did not pull correctly [#25]
- docker: fix readOnly volumes in createContainer [#236] ## Misc
## Misc
- bump all dependencies [#234] [#240] [#239] [#238]
- bump actions [#254]
## SHA-256 Checksums
The SHA-256 checksums for the packages included in this build are shown below:
- actions-runner-hooks-docker-<HOOK_VERSION>.zip <DOCKER_SHA>
- actions-runner-hooks-k8s-<HOOK_VERSION>.zip <K8S_SHA>