mirror of
https://github.com/actions/runner-container-hooks.git
synced 2025-12-30 13:57:15 +08:00
Compare commits
26 Commits
v0.3.2
...
fhammerl+n
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
56f935a374 | ||
|
|
7271e71008 | ||
|
|
e33f331739 | ||
|
|
11de25a121 | ||
|
|
4e674e284a | ||
|
|
f841b42f55 | ||
|
|
66566368e0 | ||
|
|
79262ba5fb | ||
|
|
0cb9e396ea | ||
|
|
b696059824 | ||
|
|
365a99a4de | ||
|
|
02f00d0fd5 | ||
|
|
5e916d49cc | ||
|
|
a29f87c874 | ||
|
|
6de86a9ef4 | ||
|
|
31a2cda987 | ||
|
|
67d3f481f5 | ||
|
|
5b7b738864 | ||
|
|
a99346d1ab | ||
|
|
3d102fd372 | ||
|
|
4de51ee6a5 | ||
|
|
c8e272367f | ||
|
|
c4aa97c974 | ||
|
|
f400db92cc | ||
|
|
5f0dc3f3b6 | ||
|
|
6ef042836f |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1 +0,0 @@
|
||||
*.png filter=lfs diff=lfs merge=lfs -text
|
||||
@@ -1,184 +0,0 @@
|
||||
# ADR 0072: Using Ephemeral Containers
|
||||
|
||||
**Date:** 27 March 2023
|
||||
|
||||
**Status**: Rejected <!--Accepted|Rejected|Superceded|Deprecated-->
|
||||
|
||||
## Context
|
||||
|
||||
We are evaluating using Kubernetes [ephemeral containers](https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/) as a drop-in replacement for creating pods for [jobs that run in containers](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container) and [service containers](https://docs.github.com/en/actions/using-containerized-services/about-service-containers).
|
||||
|
||||
The main motivator behind using ephemeral containers is to eliminate the need for [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). Persistent Volume implementations vary depending on the provider and we want to avoid building a dependency on it in order to provide our end-users a consistent experience.
|
||||
|
||||
With ephemeral containers we could leverage [emptyDir volumes](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) which fits our use case better and its behaviour is consistent across providers.
|
||||
|
||||
However, it's important to acknowledge that ephemeral containers were not designed to handle workloads but rather provide a mechanism to inspect running containers for debugging and troubleshooting purposes.
|
||||
|
||||
## Evaluation
|
||||
|
||||
The criteria that we are using to evaluate whether ephemeral containers are fit for purpose are:
|
||||
|
||||
- Networking
|
||||
- Storage
|
||||
- Security
|
||||
- Resource limits
|
||||
- Logs
|
||||
- Customizability
|
||||
|
||||
### Networking
|
||||
|
||||
Ephemeral containers share the networking namespace of the pod they are attached to. This means that ephemeral containers can access the same network interfaces as the pod and can communicate with other containers in the same pod. However, ephemeral containers cannot have ports configured and as such the fields ports, livenessProbe, and readinessProbe are not available [^1][^2]
|
||||
|
||||
In this scenario we have 3 containers in a pod:
|
||||
|
||||
- `runner`: the main container that runs the GitHub Actions job
|
||||
- `debugger`: the first ephemeral container
|
||||
- `debugger2`: the second ephemeral container
|
||||
|
||||
By sequentially opening ports on each of these containers and connecting to them we can demonstrate that the communication flow between the runner and the debuggers is feasible.
|
||||
|
||||
<details>
|
||||
<summary>1. Runner -> Debugger communication</summary>
|
||||
|
||||

|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>2. Debugger -> Runner communication</summary>
|
||||
|
||||

|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>3. Debugger2 -> Debugger communication</summary>
|
||||
|
||||

|
||||
</details>
|
||||
|
||||
### Storage
|
||||
|
||||
An emptyDir volume can be successfully mounted (read/write) by the runner as well as the ephemeral containers. This means that ephemeral containers can share data with the runner and other ephemeral containers.
|
||||
|
||||
<details>
|
||||
<summary>Configuration</summary>
|
||||
|
||||
```yaml
|
||||
# Extracted from the values.yaml for the gha-runner-scale-set helm chart
|
||||
spec:
|
||||
containers:
|
||||
- name: runner
|
||||
image: ghcr.io/actions/actions-runner:latest
|
||||
command: ["/home/runner/run.sh"]
|
||||
volumeMounts:
|
||||
- mountPath: /workspace
|
||||
name: work-volume
|
||||
volumes:
|
||||
- name: work-volume
|
||||
emptyDir:
|
||||
sizeLimit: 1Gi
|
||||
```
|
||||
|
||||
```bash
|
||||
# The API call to the Kubernetes API used to create the ephemeral containers
|
||||
|
||||
POD_NAME="arc-runner-set-6sfwd-runner-k7qq6"
|
||||
NAMESPACE="arc-runners"
|
||||
|
||||
curl -v "https://<IP>:<PORT>/api/v1/namespaces/$NAMESPACE/pods/$POD_NAME/ephemeralcontainers" \
|
||||
-X PATCH \
|
||||
-H 'Content-Type: application/strategic-merge-patch+json' \
|
||||
--cacert <PATH_TO_CACERT> \
|
||||
--cert <PATH_TO_CERT> \
|
||||
--key <PATH_TO_CLIENT_KEY> \
|
||||
-d '
|
||||
{
|
||||
"spec":
|
||||
{
|
||||
"ephemeralContainers":
|
||||
[
|
||||
{
|
||||
"name": "debugger",
|
||||
"command": ["sh"],
|
||||
"image": "ghcr.io/actions/actions-runner:latest",
|
||||
"targetContainerName": "runner",
|
||||
"stdin": true,
|
||||
"tty": true,
|
||||
"volumeMounts": [{
|
||||
"mountPath": "/workspace",
|
||||
"name": "work-volume",
|
||||
"readOnly": false
|
||||
}]
|
||||
},
|
||||
{
|
||||
"name": "debugger2",
|
||||
"command": ["sh"],
|
||||
"image": "ghcr.io/actions/actions-runner:latest",
|
||||
"targetContainerName": "runner",
|
||||
"stdin": true,
|
||||
"tty": true,
|
||||
"volumeMounts": [{
|
||||
"mountPath": "/workspace",
|
||||
"name": "work-volume",
|
||||
"readOnly": false
|
||||
}]
|
||||
}
|
||||
]
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>emptyDir volume mount</summary>
|
||||
|
||||

|
||||
|
||||
</details>
|
||||
|
||||
### Security
|
||||
|
||||
According to the [ephemeral containers API specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#ephemeralcontainer-v1-core) the configuration of the `securityContext` field is possible.
|
||||
|
||||
Ephemeral containers share the same network namespace as the pod they are attached to. This means that ephemeral containers can access the same network interfaces as the pod and can communicate with other containers in the same pod.
|
||||
|
||||
It is also possible for ephemeral containers to [share the process namespace](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) with the other containers in the pod. This is disabled by default.
|
||||
|
||||
The above could have unpredictable security implications.
|
||||
|
||||
### Resource limits
|
||||
|
||||
Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. [^1] This is a major drawback as it means that ephemeral containers cannot be configured to have resource limits.
|
||||
|
||||
There are no guaranteed resources for ad-hoc troubleshooting. If troubleshooting causes a pod to exceed its resource limit it may be evicted. [^3]
|
||||
|
||||
### Logs
|
||||
|
||||
Since ephemeral containers can share volumes with the runner container, it's possible to write logs to the same volume and have them available to the runner container.
|
||||
|
||||
### Customizability
|
||||
|
||||
Ephemeral containers can run any image and tag provided, they can be customized to run any arbitrary job. However, it's important to note that the following are not feasible:
|
||||
|
||||
- Lifecycle is not allowed for ephemeral containers
|
||||
- Ephemeral containers will stop when their command exits, such as exiting a shell, and they will not be restarted. Unlike `kubectl exec`, processes in Ephemeral Containers will not receive an `EOF` if their connections are interrupted, so shells won't automatically exit on disconnect. There is no API support for killing or restarting an ephemeral container. The only way to exit the container is to send it an OS signal. [^4]
|
||||
- Probes are not allowed for ephemeral containers.
|
||||
- Ports are not allowed for ephemeral containers.
|
||||
|
||||
## Decision
|
||||
|
||||
While the evaluation shows that ephemeral containers can be used to run jobs in containers, it's important to acknowledge that ephemeral containers were not designed to handle workloads but rather provide a mechanism to inspect running containers for debugging and troubleshooting purposes.
|
||||
|
||||
Given the limitations of ephemeral containers, we decided not to use them outside of their intended purpose.
|
||||
|
||||
## Consequences
|
||||
|
||||
Proposal rejected, no further action required. This document will be used as a reference for future discussions.
|
||||
|
||||
[^1]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#ephemeralcontainer-v1-core
|
||||
|
||||
[^2]: https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/
|
||||
|
||||
[^3]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/277-ephemeral-containers/README.md#notesconstraintscaveats
|
||||
|
||||
[^4]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/277-ephemeral-containers/README.md#ephemeral-container-lifecycle
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -73,8 +73,6 @@
|
||||
"contextName": "redis",
|
||||
"image": "redis",
|
||||
"createOptions": "--cpus 1",
|
||||
"entrypoint": null,
|
||||
"entryPointArgs": [],
|
||||
"environmentVariables": {},
|
||||
"userMountVolumes": [
|
||||
{
|
||||
|
||||
16
package-lock.json
generated
16
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "hooks",
|
||||
"version": "0.3.2",
|
||||
"version": "0.1.3",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "hooks",
|
||||
"version": "0.3.2",
|
||||
"version": "0.1.3",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@types/jest": "^27.5.1",
|
||||
@@ -1800,9 +1800,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/json5": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
|
||||
"integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
|
||||
"integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"minimist": "^1.2.0"
|
||||
@@ -3926,9 +3926,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"json5": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
|
||||
"integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
|
||||
"integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"minimist": "^1.2.0"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "hooks",
|
||||
"version": "0.3.2",
|
||||
"version": "0.2.0",
|
||||
"description": "Three projects are included - k8s: a kubernetes hook implementation that spins up pods dynamically to run a job - docker: A hook implementation of the runner's docker implementation - A hook lib, which contains shared typescript definitions and utilities that the other packages consume",
|
||||
"main": "",
|
||||
"directories": {
|
||||
|
||||
24
packages/docker/package-lock.json
generated
24
packages/docker/package-lock.json
generated
@@ -3779,9 +3779,9 @@
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/json5": {
|
||||
"version": "2.2.3",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
|
||||
"integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
|
||||
"version": "2.2.1",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-2.2.1.tgz",
|
||||
"integrity": "sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA==",
|
||||
"dev": true,
|
||||
"bin": {
|
||||
"json5": "lib/cli.js"
|
||||
@@ -4903,9 +4903,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/tsconfig-paths/node_modules/json5": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
|
||||
"integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
|
||||
"integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"minimist": "^1.2.0"
|
||||
@@ -8176,9 +8176,9 @@
|
||||
"peer": true
|
||||
},
|
||||
"json5": {
|
||||
"version": "2.2.3",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
|
||||
"integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
|
||||
"version": "2.2.1",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-2.2.1.tgz",
|
||||
"integrity": "sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA==",
|
||||
"dev": true
|
||||
},
|
||||
"kleur": {
|
||||
@@ -8985,9 +8985,9 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"json5": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
|
||||
"integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
|
||||
"integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"minimist": "^1.2.0"
|
||||
|
||||
@@ -16,14 +16,15 @@ import {
|
||||
import { checkEnvironment } from './utils'
|
||||
|
||||
async function run(): Promise<void> {
|
||||
const input = await getInputFromStdin()
|
||||
|
||||
const args = input['args']
|
||||
const command = input['command']
|
||||
const responseFile = input['responseFile']
|
||||
const state = input['state']
|
||||
|
||||
try {
|
||||
checkEnvironment()
|
||||
const input = await getInputFromStdin()
|
||||
|
||||
const args = input['args']
|
||||
const command = input['command']
|
||||
const responseFile = input['responseFile']
|
||||
const state = input['state']
|
||||
switch (command) {
|
||||
case Command.PrepareJob:
|
||||
await prepareJob(args as PrepareJobArgs, responseFile)
|
||||
|
||||
12
packages/hooklib/package-lock.json
generated
12
packages/hooklib/package-lock.json
generated
@@ -1742,9 +1742,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/json5": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
|
||||
"integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
|
||||
"integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"minimist": "^1.2.0"
|
||||
@@ -3789,9 +3789,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"json5": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
|
||||
"integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
|
||||
"integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"minimist": "^1.2.0"
|
||||
|
||||
985
packages/k8s/package-lock.json
generated
985
packages/k8s/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -16,7 +16,7 @@
|
||||
"@actions/core": "^1.9.1",
|
||||
"@actions/exec": "^1.1.1",
|
||||
"@actions/io": "^1.1.2",
|
||||
"@kubernetes/client-node": "^0.18.1",
|
||||
"@kubernetes/client-node": "^0.16.3",
|
||||
"hooklib": "file:../hooklib"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -14,7 +14,6 @@ import {
|
||||
containerVolumes,
|
||||
DEFAULT_CONTAINER_ENTRY_POINT,
|
||||
DEFAULT_CONTAINER_ENTRY_POINT_ARGS,
|
||||
generateContainerName,
|
||||
PodPhase
|
||||
} from '../k8s/utils'
|
||||
import { JOB_CONTAINER_NAME } from './constants'
|
||||
@@ -32,14 +31,14 @@ export async function prepareJob(
|
||||
let container: k8s.V1Container | undefined = undefined
|
||||
if (args.container?.image) {
|
||||
core.debug(`Using image '${args.container.image}' for job image`)
|
||||
container = createContainerSpec(args.container, JOB_CONTAINER_NAME, true)
|
||||
container = createPodSpec(args.container, JOB_CONTAINER_NAME, true)
|
||||
}
|
||||
|
||||
let services: k8s.V1Container[] = []
|
||||
if (args.services?.length) {
|
||||
services = args.services.map(service => {
|
||||
core.debug(`Adding service '${service.image}' to pod definition`)
|
||||
return createContainerSpec(service, generateContainerName(service.image))
|
||||
return createPodSpec(service, service.image.split(':')[0])
|
||||
})
|
||||
}
|
||||
if (!container && !services?.length) {
|
||||
@@ -125,11 +124,13 @@ function generateResponseFile(
|
||||
)
|
||||
if (serviceContainers?.length) {
|
||||
response.context['services'] = serviceContainers.map(c => {
|
||||
if (!c.ports) {
|
||||
return
|
||||
}
|
||||
|
||||
const ctxPorts: ContextPorts = {}
|
||||
if (c.ports?.length) {
|
||||
for (const port of c.ports) {
|
||||
ctxPorts[port.containerPort] = port.hostPort
|
||||
}
|
||||
for (const port of c.ports) {
|
||||
ctxPorts[port.containerPort] = port.hostPort
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -152,7 +153,7 @@ async function copyExternalsToRoot(): Promise<void> {
|
||||
}
|
||||
}
|
||||
|
||||
export function createContainerSpec(
|
||||
function createPodSpec(
|
||||
container,
|
||||
name: string,
|
||||
jobContainer = false
|
||||
@@ -165,20 +166,14 @@ export function createContainerSpec(
|
||||
const podContainer = {
|
||||
name,
|
||||
image: container.image,
|
||||
command: [container.entryPoint],
|
||||
args: container.entryPointArgs,
|
||||
ports: containerPorts(container)
|
||||
} as k8s.V1Container
|
||||
if (container.workingDirectory) {
|
||||
podContainer.workingDir = container.workingDirectory
|
||||
}
|
||||
|
||||
if (container.entryPoint) {
|
||||
podContainer.command = [container.entryPoint]
|
||||
}
|
||||
|
||||
if (container.entryPointArgs?.length > 0) {
|
||||
podContainer.args = container.entryPointArgs
|
||||
}
|
||||
|
||||
podContainer.env = []
|
||||
for (const [key, value] of Object.entries(
|
||||
container['environmentVariables']
|
||||
|
||||
@@ -8,7 +8,8 @@ import {
|
||||
getPodLogs,
|
||||
getPodStatus,
|
||||
waitForJobToComplete,
|
||||
waitForPodPhases
|
||||
waitForPodPhases,
|
||||
containerBuild
|
||||
} from '../k8s'
|
||||
import {
|
||||
containerVolumes,
|
||||
@@ -23,7 +24,8 @@ export async function runContainerStep(
|
||||
stepContainer: RunContainerStepArgs
|
||||
): Promise<number> {
|
||||
if (stepContainer.dockerfile) {
|
||||
throw new Error('Building container actions is not currently supported')
|
||||
const imageUrl = await containerBuild(stepContainer)
|
||||
stepContainer.image = imageUrl
|
||||
}
|
||||
|
||||
let secretName: string | undefined = undefined
|
||||
|
||||
@@ -9,13 +9,15 @@ import {
|
||||
import { isAuthPermissionsOK, namespace, requiredPermissions } from './k8s'
|
||||
|
||||
async function run(): Promise<void> {
|
||||
try {
|
||||
const input = await getInputFromStdin()
|
||||
const input = await getInputFromStdin()
|
||||
|
||||
const args = input['args']
|
||||
const command = input['command']
|
||||
const responseFile = input['responseFile']
|
||||
const state = input['state']
|
||||
const args = input['args']
|
||||
const command = input['command']
|
||||
const responseFile = input['responseFile']
|
||||
const state = input['state']
|
||||
|
||||
let exitCode = 0
|
||||
try {
|
||||
if (!(await isAuthPermissionsOK())) {
|
||||
throw new Error(
|
||||
`The Service account needs the following permissions ${JSON.stringify(
|
||||
@@ -23,28 +25,28 @@ async function run(): Promise<void> {
|
||||
)} on the pod resource in the '${namespace()}' namespace. Please contact your self hosted runner administrator.`
|
||||
)
|
||||
}
|
||||
|
||||
let exitCode = 0
|
||||
switch (command) {
|
||||
case Command.PrepareJob:
|
||||
await prepareJob(args as prepareJobArgs, responseFile)
|
||||
return process.exit(0)
|
||||
break
|
||||
case Command.CleanupJob:
|
||||
await cleanupJob()
|
||||
return process.exit(0)
|
||||
break
|
||||
case Command.RunScriptStep:
|
||||
await runScriptStep(args, state, null)
|
||||
return process.exit(0)
|
||||
break
|
||||
case Command.RunContainerStep:
|
||||
exitCode = await runContainerStep(args)
|
||||
return process.exit(exitCode)
|
||||
break
|
||||
case Command.runContainerStep:
|
||||
default:
|
||||
throw new Error(`Command not recognized: ${command}`)
|
||||
}
|
||||
} catch (error) {
|
||||
core.error(error as Error)
|
||||
process.exit(1)
|
||||
exitCode = 1
|
||||
}
|
||||
process.exitCode = exitCode
|
||||
}
|
||||
|
||||
void run()
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import * as core from '@actions/core'
|
||||
import * as k8s from '@kubernetes/client-node'
|
||||
import { ContainerInfo, Registry } from 'hooklib'
|
||||
import { RunContainerStepArgs, ContainerInfo, Registry } from 'hooklib'
|
||||
import * as stream from 'stream'
|
||||
import {
|
||||
getJobPodName,
|
||||
@@ -10,15 +10,25 @@ import {
|
||||
getVolumeClaimName,
|
||||
RunnerInstanceLabel
|
||||
} from '../hooks/constants'
|
||||
import { kanikoPod } from './kaniko'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { PodPhase } from './utils'
|
||||
import {
|
||||
namespace,
|
||||
kc,
|
||||
k8sApi,
|
||||
k8sBatchV1Api,
|
||||
k8sAuthorizationV1Api,
|
||||
localRegistryNodePort,
|
||||
localRegistryHost,
|
||||
localRegistryPort,
|
||||
remoteRegistryHost,
|
||||
remoteRegistryHandle,
|
||||
remoteRegistrySecretName,
|
||||
isLocalRegistrySet
|
||||
} from './settings'
|
||||
|
||||
const kc = new k8s.KubeConfig()
|
||||
|
||||
kc.loadFromDefault()
|
||||
|
||||
const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
|
||||
const k8sBatchV1Api = kc.makeApiClient(k8s.BatchV1Api)
|
||||
const k8sAuthorizationV1Api = kc.makeApiClient(k8s.AuthorizationV1Api)
|
||||
export * from './settings'
|
||||
|
||||
export const POD_VOLUME_NAME = 'work'
|
||||
|
||||
@@ -46,12 +56,6 @@ export const requiredPermissions = [
|
||||
verbs: ['get', 'list', 'create', 'delete'],
|
||||
resource: 'jobs',
|
||||
subresource: ''
|
||||
},
|
||||
{
|
||||
group: '',
|
||||
verbs: ['create', 'delete', 'get', 'list'],
|
||||
resource: 'secrets',
|
||||
subresource: ''
|
||||
}
|
||||
]
|
||||
|
||||
@@ -325,8 +329,20 @@ export async function waitForPodPhases(
|
||||
const backOffManager = new BackOffManager(maxTimeSeconds)
|
||||
let phase: PodPhase = PodPhase.UNKNOWN
|
||||
try {
|
||||
while (true) {
|
||||
phase = await getPodPhase(podName)
|
||||
let retryCount = 0
|
||||
while (retryCount < 3) {
|
||||
try {
|
||||
phase = await getPodPhase(podName)
|
||||
} catch (err) {
|
||||
const e = err as k8s.HttpError
|
||||
if (e?.body?.reason === 'NotFound') {
|
||||
retryCount++
|
||||
await backOffManager.backOff()
|
||||
continue
|
||||
} else {
|
||||
throw err
|
||||
}
|
||||
}
|
||||
if (awaitingPhases.has(phase)) {
|
||||
return
|
||||
}
|
||||
@@ -338,6 +354,7 @@ export async function waitForPodPhases(
|
||||
}
|
||||
await backOffManager.backOff()
|
||||
}
|
||||
throw new Error(`Failed to get pod phase after ${retryCount} attempts`)
|
||||
} catch (error) {
|
||||
throw new Error(`Pod ${podName} is unhealthy with phase status ${phase}`)
|
||||
}
|
||||
@@ -464,6 +481,42 @@ export async function isPodContainerAlpine(
|
||||
return isAlpine
|
||||
}
|
||||
|
||||
export async function containerBuild(
|
||||
args: RunContainerStepArgs
|
||||
): Promise<string> {
|
||||
let kanikoRegistry = ''
|
||||
let pullRegistry = ''
|
||||
let secretName: string | undefined = undefined
|
||||
if (isLocalRegistrySet()) {
|
||||
const host = `${localRegistryHost()}.${namespace()}.svc.cluster.local`
|
||||
const port = localRegistryPort()
|
||||
const uri = `${generateBuildHandle()}/${generateBuildImage()}`
|
||||
kanikoRegistry = `${host}:${port}/${uri}`
|
||||
pullRegistry = `localhost:${localRegistryNodePort()}/${uri}`
|
||||
} else {
|
||||
const uri = `${remoteRegistryHandle()}/${generateBuildImage()}`
|
||||
if (remoteRegistryHost()) {
|
||||
kanikoRegistry = `${remoteRegistryHost()}/${uri}`
|
||||
} else {
|
||||
kanikoRegistry = uri
|
||||
}
|
||||
pullRegistry = kanikoRegistry
|
||||
secretName = remoteRegistrySecretName()
|
||||
}
|
||||
|
||||
const pod = kanikoPod(args.dockerfile, kanikoRegistry, secretName)
|
||||
if (!pod.metadata?.name) {
|
||||
throw new Error('kaniko pod name is not set')
|
||||
}
|
||||
await k8sApi.createNamespacedPod(namespace(), pod)
|
||||
await waitForPodPhases(
|
||||
pod.metadata.name,
|
||||
new Set([PodPhase.SUCCEEDED]),
|
||||
new Set([PodPhase.PENDING, PodPhase.UNKNOWN, PodPhase.RUNNING])
|
||||
)
|
||||
return pullRegistry
|
||||
}
|
||||
|
||||
async function getCurrentNodeName(): Promise<string> {
|
||||
const resp = await k8sApi.readNamespacedPod(getRunnerPodName(), namespace())
|
||||
|
||||
@@ -473,19 +526,6 @@ async function getCurrentNodeName(): Promise<string> {
|
||||
}
|
||||
return nodeName
|
||||
}
|
||||
export function namespace(): string {
|
||||
if (process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE']) {
|
||||
return process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE']
|
||||
}
|
||||
|
||||
const context = kc.getContexts().find(ctx => ctx.namespace)
|
||||
if (!context?.namespace) {
|
||||
throw new Error(
|
||||
'Failed to determine namespace, falling back to `default`. Namespace should be set in context, or in env variable "ACTIONS_RUNNER_KUBERNETES_NAMESPACE"'
|
||||
)
|
||||
}
|
||||
return context.namespace
|
||||
}
|
||||
|
||||
class BackOffManager {
|
||||
private backOffSeconds = 1
|
||||
@@ -517,9 +557,6 @@ export function containerPorts(
|
||||
container: ContainerInfo
|
||||
): k8s.V1ContainerPort[] {
|
||||
const ports: k8s.V1ContainerPort[] = []
|
||||
if (!container.portMappings?.length) {
|
||||
return ports
|
||||
}
|
||||
for (const portDefinition of container.portMappings) {
|
||||
const portProtoSplit = portDefinition.split('/')
|
||||
if (portProtoSplit.length > 2) {
|
||||
@@ -554,3 +591,11 @@ export function containerPorts(
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
function generateBuildImage(): string {
|
||||
return `${uuidv4()}:${uuidv4()}`
|
||||
}
|
||||
|
||||
function generateBuildHandle(): string {
|
||||
return uuidv4()
|
||||
}
|
||||
|
||||
95
packages/k8s/src/k8s/kaniko.ts
Normal file
95
packages/k8s/src/k8s/kaniko.ts
Normal file
@@ -0,0 +1,95 @@
|
||||
import * as k8s from '@kubernetes/client-node'
|
||||
import * as path from 'path'
|
||||
import {
|
||||
getRunnerPodName,
|
||||
getVolumeClaimName,
|
||||
MAX_POD_NAME_LENGTH,
|
||||
RunnerInstanceLabel
|
||||
} from '../hooks/constants'
|
||||
import { POD_VOLUME_NAME } from '.'
|
||||
|
||||
export const KANIKO_MOUNT_PATH = '/mnt/kaniko'
|
||||
|
||||
function getKanikoName(): string {
|
||||
return `${getRunnerPodName().substring(
|
||||
0,
|
||||
MAX_POD_NAME_LENGTH - '-kaniko'.length
|
||||
)}-kaniko`
|
||||
}
|
||||
|
||||
export function kanikoPod(
|
||||
dockerfile: string,
|
||||
destination: string,
|
||||
secretName?: string
|
||||
): k8s.V1Pod {
|
||||
const pod = new k8s.V1Pod()
|
||||
pod.apiVersion = 'v1'
|
||||
pod.kind = 'Pod'
|
||||
pod.metadata = new k8s.V1ObjectMeta()
|
||||
pod.metadata.name = getKanikoName()
|
||||
const instanceLabel = new RunnerInstanceLabel()
|
||||
pod.metadata.labels = {
|
||||
[instanceLabel.key]: instanceLabel.value
|
||||
}
|
||||
|
||||
const spec = new k8s.V1PodSpec()
|
||||
const c = new k8s.V1Container()
|
||||
c.image = 'gcr.io/kaniko-project/executor:latest'
|
||||
c.name = 'kaniko'
|
||||
c.imagePullPolicy = 'Always'
|
||||
const prefix = (process.env.RUNNER_WORKSPACE as string).split('_work')[0]
|
||||
const subPath = path
|
||||
.dirname(dockerfile)
|
||||
.substring(prefix.length + '_work/'.length)
|
||||
|
||||
c.volumeMounts = [
|
||||
{
|
||||
name: POD_VOLUME_NAME,
|
||||
mountPath: KANIKO_MOUNT_PATH,
|
||||
subPath,
|
||||
readOnly: true
|
||||
}
|
||||
]
|
||||
c.args = [
|
||||
`--dockerfile=${path.basename(dockerfile)}`,
|
||||
`--context=dir://${KANIKO_MOUNT_PATH}`,
|
||||
`--destination=${destination}`
|
||||
]
|
||||
spec.containers = [c]
|
||||
spec.dnsPolicy = 'ClusterFirst'
|
||||
spec.restartPolicy = 'Never'
|
||||
pod.spec = spec
|
||||
const claimName: string = getVolumeClaimName()
|
||||
pod.spec.volumes = [
|
||||
{
|
||||
name: POD_VOLUME_NAME,
|
||||
persistentVolumeClaim: { claimName }
|
||||
}
|
||||
]
|
||||
if (secretName) {
|
||||
const volumeName = 'docker-registry'
|
||||
pod.spec.volumes.push({
|
||||
name: volumeName,
|
||||
projected: {
|
||||
sources: [
|
||||
{
|
||||
secret: {
|
||||
name: secretName,
|
||||
items: [
|
||||
{
|
||||
key: '.dockerconfigjson',
|
||||
path: 'config.json'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
})
|
||||
c.volumeMounts.push({
|
||||
name: volumeName,
|
||||
mountPath: '/kaniko/.docker/'
|
||||
})
|
||||
}
|
||||
return pod
|
||||
}
|
||||
73
packages/k8s/src/k8s/settings.ts
Normal file
73
packages/k8s/src/k8s/settings.ts
Normal file
@@ -0,0 +1,73 @@
|
||||
import * as k8s from '@kubernetes/client-node'
|
||||
export const kc = new k8s.KubeConfig()
|
||||
|
||||
kc.loadFromDefault()
|
||||
|
||||
export const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
|
||||
export const k8sBatchV1Api = kc.makeApiClient(k8s.BatchV1Api)
|
||||
export const k8sAuthorizationV1Api = kc.makeApiClient(k8s.AuthorizationV1Api)
|
||||
|
||||
export const POD_VOLUME_NAME = 'work'
|
||||
export function namespace(): string {
|
||||
if (process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE']) {
|
||||
return process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE']
|
||||
}
|
||||
|
||||
const context = kc.getContexts().find(ctx => ctx.namespace)
|
||||
if (!context?.namespace) {
|
||||
throw new Error(
|
||||
'Failed to determine namespace, falling back to `default`. Namespace should be set in context, or in env variable "ACTIONS_RUNNER_KUBERNETES_NAMESPACE"'
|
||||
)
|
||||
}
|
||||
return context.namespace
|
||||
}
|
||||
|
||||
export function isLocalRegistrySet(): boolean {
|
||||
const name = 'ACTIONS_RUNNER_CONTAINER_HOOKS_LOCAL_REGISTRY_HOST'
|
||||
return !!process.env[name]
|
||||
}
|
||||
|
||||
export function localRegistryHost(): string {
|
||||
const name = 'ACTIONS_RUNNER_CONTAINER_HOOKS_LOCAL_REGISTRY_HOST'
|
||||
if (process.env[name]) {
|
||||
return process.env[name]
|
||||
}
|
||||
throw new Error(`environment variable ${name} is not set`)
|
||||
}
|
||||
|
||||
export function localRegistryPort(): number {
|
||||
const name = 'ACTIONS_RUNNER_CONTAINER_HOOKS_LOCAL_REGISTRY_PORT'
|
||||
if (process.env[name]) {
|
||||
return parseInt(process.env[name])
|
||||
}
|
||||
throw new Error(`environment variable ${name} is not set`)
|
||||
}
|
||||
|
||||
export function localRegistryNodePort(): number {
|
||||
const name = 'ACTIONS_RUNNER_CONTAINER_HOOKS_LOCAL_REGISTRY_NODE_PORT'
|
||||
if (process.env[name]) {
|
||||
return parseInt(process.env[name])
|
||||
}
|
||||
throw new Error(`environment variable ${name} is not set`)
|
||||
}
|
||||
|
||||
export function remoteRegistryHost(): string {
|
||||
const name = 'ACTIONS_RUNNER_CONTAINER_HOOKS_REMOTE_REGISTRY_HOST'
|
||||
return process.env[name] || ''
|
||||
}
|
||||
|
||||
export function remoteRegistryHandle(): string {
|
||||
const name = 'ACTIONS_RUNNER_CONTAINER_HOOKS_REMOTE_REGISTRY_HANDLE'
|
||||
if (process.env[name]) {
|
||||
return process.env[name]
|
||||
}
|
||||
throw new Error(`environment variable ${name} is not set`)
|
||||
}
|
||||
|
||||
export function remoteRegistrySecretName(): string {
|
||||
const name = 'ACTIONS_RUNNER_CONTAINER_HOOKS_REMOTE_REGISTRY_SECRET_NAME'
|
||||
if (process.env[name]) {
|
||||
return process.env[name]
|
||||
}
|
||||
throw new Error(`environment variable ${name} is not set`)
|
||||
}
|
||||
@@ -111,21 +111,11 @@ export function writeEntryPointScript(
|
||||
if (environmentVariables && Object.entries(environmentVariables).length) {
|
||||
const envBuffer: string[] = []
|
||||
for (const [key, value] of Object.entries(environmentVariables)) {
|
||||
if (
|
||||
key.includes(`=`) ||
|
||||
key.includes(`'`) ||
|
||||
key.includes(`"`) ||
|
||||
key.includes(`$`)
|
||||
) {
|
||||
throw new Error(
|
||||
`environment key ${key} is invalid - the key must not contain =, $, ', or "`
|
||||
)
|
||||
}
|
||||
envBuffer.push(
|
||||
`"${key}=${value
|
||||
.replace(/\\/g, '\\\\')
|
||||
.replace(/"/g, '\\"')
|
||||
.replace(/\$/g, '\\$')}"`
|
||||
.replace(/=/g, '\\=')}"`
|
||||
)
|
||||
}
|
||||
environmentPrefix = `env ${envBuffer.join(' ')} `
|
||||
@@ -147,17 +137,6 @@ exec ${environmentPrefix} ${entryPoint} ${
|
||||
}
|
||||
}
|
||||
|
||||
export function generateContainerName(image: string): string {
|
||||
const nameWithTag = image.split('/').pop()
|
||||
const name = nameWithTag?.split(':').at(0)
|
||||
|
||||
if (!name) {
|
||||
throw new Error(`Image definition '${image}' is invalid`)
|
||||
}
|
||||
|
||||
return name
|
||||
}
|
||||
|
||||
export enum PodPhase {
|
||||
PENDING = 'Pending',
|
||||
RUNNING = 'Running',
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
import * as fs from 'fs'
|
||||
import { containerPorts, POD_VOLUME_NAME } from '../src/k8s'
|
||||
import {
|
||||
containerVolumes,
|
||||
generateContainerName,
|
||||
writeEntryPointScript
|
||||
} from '../src/k8s/utils'
|
||||
import { containerVolumes, writeEntryPointScript } from '../src/k8s/utils'
|
||||
import { TestHelper } from './test-setup'
|
||||
|
||||
let testHelper: TestHelper
|
||||
@@ -49,81 +45,6 @@ describe('k8s utils', () => {
|
||||
).toThrow()
|
||||
})
|
||||
|
||||
it('should throw if environment variable name contains double quote', () => {
|
||||
expect(() =>
|
||||
writeEntryPointScript(
|
||||
'/test',
|
||||
'sh',
|
||||
['-e', 'script.sh'],
|
||||
['/prepend/path'],
|
||||
{
|
||||
'SOME"_ENV': 'SOME_VALUE'
|
||||
}
|
||||
)
|
||||
).toThrow()
|
||||
})
|
||||
|
||||
it('should throw if environment variable name contains =', () => {
|
||||
expect(() =>
|
||||
writeEntryPointScript(
|
||||
'/test',
|
||||
'sh',
|
||||
['-e', 'script.sh'],
|
||||
['/prepend/path'],
|
||||
{
|
||||
'SOME=ENV': 'SOME_VALUE'
|
||||
}
|
||||
)
|
||||
).toThrow()
|
||||
})
|
||||
|
||||
it('should throw if environment variable name contains single quote', () => {
|
||||
expect(() =>
|
||||
writeEntryPointScript(
|
||||
'/test',
|
||||
'sh',
|
||||
['-e', 'script.sh'],
|
||||
['/prepend/path'],
|
||||
{
|
||||
"SOME'_ENV": 'SOME_VALUE'
|
||||
}
|
||||
)
|
||||
).toThrow()
|
||||
})
|
||||
|
||||
it('should throw if environment variable name contains dollar', () => {
|
||||
expect(() =>
|
||||
writeEntryPointScript(
|
||||
'/test',
|
||||
'sh',
|
||||
['-e', 'script.sh'],
|
||||
['/prepend/path'],
|
||||
{
|
||||
SOME_$_ENV: 'SOME_VALUE'
|
||||
}
|
||||
)
|
||||
).toThrow()
|
||||
})
|
||||
|
||||
it('should escape double quote, dollar and backslash in environment variable values', () => {
|
||||
const { runnerPath } = writeEntryPointScript(
|
||||
'/test',
|
||||
'sh',
|
||||
['-e', 'script.sh'],
|
||||
['/prepend/path'],
|
||||
{
|
||||
DQUOTE: '"',
|
||||
BACK_SLASH: '\\',
|
||||
DOLLAR: '$'
|
||||
}
|
||||
)
|
||||
expect(fs.existsSync(runnerPath)).toBe(true)
|
||||
const script = fs.readFileSync(runnerPath, 'utf8')
|
||||
expect(script).toContain('"DQUOTE=\\"')
|
||||
expect(script).toContain('"BACK_SLASH=\\\\"')
|
||||
expect(script).toContain('"DOLLAR=\\$"')
|
||||
})
|
||||
|
||||
it('should return object with containerPath and runnerPath', () => {
|
||||
const { containerPath, runnerPath } = writeEntryPointScript(
|
||||
'/test',
|
||||
@@ -300,32 +221,4 @@ describe('k8s utils', () => {
|
||||
expect(() => containerPorts({ portMappings: ['1/tcp/udp'] })).toThrow()
|
||||
})
|
||||
})
|
||||
|
||||
describe('generate container name', () => {
|
||||
it('should return the container name from image string', () => {
|
||||
expect(
|
||||
generateContainerName('public.ecr.aws/localstack/localstack')
|
||||
).toEqual('localstack')
|
||||
expect(
|
||||
generateContainerName(
|
||||
'public.ecr.aws/url/with/multiple/slashes/postgres:latest'
|
||||
)
|
||||
).toEqual('postgres')
|
||||
expect(generateContainerName('postgres')).toEqual('postgres')
|
||||
expect(generateContainerName('postgres:latest')).toEqual('postgres')
|
||||
expect(generateContainerName('localstack/localstack')).toEqual(
|
||||
'localstack'
|
||||
)
|
||||
expect(generateContainerName('localstack/localstack:latest')).toEqual(
|
||||
'localstack'
|
||||
)
|
||||
})
|
||||
|
||||
it('should throw on invalid image string', () => {
|
||||
expect(() =>
|
||||
generateContainerName('localstack/localstack/:latest')
|
||||
).toThrow()
|
||||
expect(() => generateContainerName(':latest')).toThrow()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
import * as fs from 'fs'
|
||||
import * as path from 'path'
|
||||
import { cleanupJob } from '../src/hooks'
|
||||
import { createContainerSpec, prepareJob } from '../src/hooks/prepare-job'
|
||||
import { prepareJob } from '../src/hooks/prepare-job'
|
||||
import { TestHelper } from './test-setup'
|
||||
import { generateContainerName } from '../src/k8s/utils'
|
||||
import { V1Container } from '@kubernetes/client-node'
|
||||
|
||||
jest.useRealTimers()
|
||||
|
||||
@@ -73,27 +71,4 @@ describe('Prepare job', () => {
|
||||
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
|
||||
).rejects.toThrow()
|
||||
})
|
||||
|
||||
it('should not set command + args for service container if not passed in args', async () => {
|
||||
const services = prepareJobData.args.services.map(service => {
|
||||
return createContainerSpec(service, generateContainerName(service.image))
|
||||
}) as [V1Container]
|
||||
|
||||
expect(services[0].command).toBe(undefined)
|
||||
expect(services[0].args).toBe(undefined)
|
||||
})
|
||||
|
||||
test.each([undefined, null, []])(
|
||||
'should not throw exception when portMapping=%p',
|
||||
async pm => {
|
||||
prepareJobData.args.services.forEach(s => {
|
||||
s.portMappings = pm
|
||||
})
|
||||
await prepareJob(prepareJobData.args, prepareJobOutputFilePath)
|
||||
const content = JSON.parse(
|
||||
fs.readFileSync(prepareJobOutputFilePath).toString()
|
||||
)
|
||||
expect(() => content.context.services[0].image).not.toThrow()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
@@ -3,11 +3,10 @@ import { TestHelper } from './test-setup'
|
||||
|
||||
jest.useRealTimers()
|
||||
|
||||
let testHelper: TestHelper
|
||||
describe('Run container step with image', () => {
|
||||
let testHelper: TestHelper
|
||||
let runContainerStepData: any
|
||||
|
||||
let runContainerStepData: any
|
||||
|
||||
describe('Run container step', () => {
|
||||
beforeEach(async () => {
|
||||
testHelper = new TestHelper()
|
||||
await testHelper.initialize()
|
||||
@@ -39,3 +38,33 @@ describe('Run container step', () => {
|
||||
).resolves.not.toThrow()
|
||||
})
|
||||
})
|
||||
|
||||
describe('run container step with docker build', () => {
|
||||
let testHelper: TestHelper
|
||||
let runContainerStepData: any
|
||||
beforeEach(async () => {
|
||||
testHelper = new TestHelper()
|
||||
await testHelper.initialize()
|
||||
runContainerStepData = testHelper.getRunContainerStepDefinition()
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
await testHelper.cleanup()
|
||||
})
|
||||
|
||||
it('should build container and execute docker action', async () => {
|
||||
const { registryName, localRegistryPort, nodePort } =
|
||||
await testHelper.createContainerRegistry()
|
||||
|
||||
process.env.ACTIONS_RUNNER_CONTAINER_HOOKS_LOCAL_REGISTRY_HOST =
|
||||
registryName
|
||||
process.env.ACTIONS_RUNNER_CONTAINER_HOOKS_LOCAL_REGISTRY_PORT =
|
||||
localRegistryPort.toString()
|
||||
process.env.ACTIONS_RUNNER_CONTAINER_HOOKS_LOCAL_REGISTRY_NODE_PORT =
|
||||
nodePort.toString()
|
||||
const actionPath = testHelper.initializeDockerAction()
|
||||
const data = JSON.parse(JSON.stringify(runContainerStepData))
|
||||
data.args.dockerfile = `${actionPath}/Dockerfile`
|
||||
await expect(runContainerStep(data.args)).resolves.not.toThrow()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -59,6 +59,7 @@ describe('Run script step', () => {
|
||||
it('should shold have env variables available', async () => {
|
||||
runScriptStepDefinition.args.entryPoint = 'bash'
|
||||
|
||||
runScriptStepDefinition.args.workingDirectory = '/' // set to '/' so that cd does not throw
|
||||
runScriptStepDefinition.args.entryPointArgs = [
|
||||
'-c',
|
||||
"'if [[ -z $NODE_ENV ]]; then exit 1; fi'"
|
||||
@@ -89,28 +90,6 @@ describe('Run script step', () => {
|
||||
).resolves.not.toThrow()
|
||||
})
|
||||
|
||||
it('Dollar symbols in environment variables should not be expanded', async () => {
|
||||
runScriptStepDefinition.args.environmentVariables = {
|
||||
VARIABLE1: '$VAR',
|
||||
VARIABLE2: '${VAR}',
|
||||
VARIABLE3: '$(VAR)'
|
||||
}
|
||||
runScriptStepDefinition.args.entryPointArgs = [
|
||||
'-c',
|
||||
'\'if [[ -z "$VARIABLE1" ]]; then exit 1; fi\'',
|
||||
'\'if [[ -z "$VARIABLE2" ]]; then exit 2; fi\'',
|
||||
'\'if [[ -z "$VARIABLE3" ]]; then exit 3; fi\''
|
||||
]
|
||||
|
||||
await expect(
|
||||
runScriptStep(
|
||||
runScriptStepDefinition.args,
|
||||
prepareJobOutputData.state,
|
||||
null
|
||||
)
|
||||
).resolves.not.toThrow()
|
||||
})
|
||||
|
||||
it('Should have path variable changed in container with prepend path string array', async () => {
|
||||
runScriptStepDefinition.args.prependPath = ['/some/other/path']
|
||||
runScriptStepDefinition.args.entryPoint = '/bin/bash'
|
||||
|
||||
@@ -2,7 +2,10 @@ import * as k8s from '@kubernetes/client-node'
|
||||
import * as fs from 'fs'
|
||||
import { HookData } from 'hooklib/lib'
|
||||
import * as path from 'path'
|
||||
import internal from 'stream'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { waitForPodPhases } from '../src/k8s'
|
||||
import { PodPhase } from '../src/k8s/utils'
|
||||
|
||||
const kc = new k8s.KubeConfig()
|
||||
|
||||
@@ -10,6 +13,7 @@ kc.loadFromDefault()
|
||||
|
||||
const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
|
||||
const k8sStorageApi = kc.makeApiClient(k8s.StorageV1Api)
|
||||
const k8sAppsV1 = kc.makeApiClient(k8s.AppsV1Api)
|
||||
|
||||
export class TestHelper {
|
||||
private tempDirPath: string
|
||||
@@ -74,10 +78,19 @@ export class TestHelper {
|
||||
0
|
||||
)
|
||||
.catch(e => {})
|
||||
await k8sApi
|
||||
.deleteNamespacedPod(
|
||||
`${this.podName}-kaniko`,
|
||||
'default',
|
||||
undefined,
|
||||
undefined,
|
||||
0
|
||||
)
|
||||
.catch(e => {})
|
||||
}
|
||||
public createFile(fileName?: string): string {
|
||||
public createFile(fileName?: string, content = ''): string {
|
||||
const filePath = `${this.tempDirPath}/${fileName || uuidv4()}`
|
||||
fs.writeFileSync(filePath, '')
|
||||
fs.writeFileSync(filePath, content)
|
||||
return filePath
|
||||
}
|
||||
|
||||
@@ -193,4 +206,237 @@ export class TestHelper {
|
||||
runContainerStep.args.registry = null
|
||||
return runContainerStep
|
||||
}
|
||||
|
||||
public async createContainerRegistry(): Promise<{
|
||||
registryName: string
|
||||
localRegistryPort: number
|
||||
nodePort: number
|
||||
}> {
|
||||
const registryName = 'docker-registry'
|
||||
const localRegistryPort = 5000
|
||||
const nodePort = 31500
|
||||
|
||||
const cm = registryConfigMap(registryName, localRegistryPort)
|
||||
const secret = registrySecret(registryName)
|
||||
const ss = registryStatefulSet(registryName, localRegistryPort)
|
||||
const svc = registryService(registryName, localRegistryPort, nodePort)
|
||||
const namespace =
|
||||
process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE'] || 'default'
|
||||
|
||||
await Promise.all([
|
||||
k8sApi.createNamespacedConfigMap(namespace, cm),
|
||||
k8sApi.createNamespacedSecret(namespace, secret)
|
||||
])
|
||||
await k8sAppsV1.createNamespacedStatefulSet(namespace, ss)
|
||||
await waitForPodPhases(
|
||||
`${registryName}-0`,
|
||||
new Set([PodPhase.RUNNING]),
|
||||
new Set([PodPhase.PENDING])
|
||||
)
|
||||
await k8sApi.createNamespacedService(namespace, svc)
|
||||
return {
|
||||
registryName,
|
||||
localRegistryPort,
|
||||
nodePort
|
||||
}
|
||||
}
|
||||
|
||||
public initializeDockerAction(): string {
|
||||
const actionPath = `${this.tempDirPath}/_work/_actions/example-handle/example-repo/example-branch/mock-directory`
|
||||
fs.mkdirSync(actionPath, { recursive: true })
|
||||
this.writeDockerfile(actionPath)
|
||||
this.writeEntrypoint(actionPath)
|
||||
return actionPath
|
||||
}
|
||||
|
||||
private writeDockerfile(actionPath: string) {
|
||||
const content = `FROM ubuntu:latest
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
ENTRYPOINT ["/entrypoint.sh"]`
|
||||
fs.writeFileSync(`${actionPath}/Dockerfile`, content)
|
||||
}
|
||||
|
||||
private writeEntrypoint(actionPath) {
|
||||
const content = `#!/bin/sh -l
|
||||
echo "Hello $1"
|
||||
time=$(date)
|
||||
echo "::set-output name=time::$time"`
|
||||
const entryPointPath = `${actionPath}/entrypoint.sh`
|
||||
fs.writeFileSync(entryPointPath, content)
|
||||
fs.chmodSync(entryPointPath, 0o755)
|
||||
}
|
||||
}
|
||||
|
||||
function registryConfigMap(name: string, port: number): k8s.V1ConfigMap {
|
||||
const REGISTRY_CONFIG_MAP_YAML = `
|
||||
storage:
|
||||
filesystem:
|
||||
rootdirectory: /var/lib/registry
|
||||
maxthreads: 100
|
||||
health:
|
||||
storagedriver:
|
||||
enabled: true
|
||||
interval: 10s
|
||||
threshold: 3
|
||||
http:
|
||||
addr: :${port}
|
||||
headers:
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
log:
|
||||
fields:
|
||||
service: registry
|
||||
storage:
|
||||
cache:
|
||||
blobdescriptor: inmemory
|
||||
version: 0.1
|
||||
`.trim()
|
||||
const cm = new k8s.V1ConfigMap()
|
||||
cm.apiVersion = 'v1'
|
||||
cm.data = {
|
||||
'config.yaml': REGISTRY_CONFIG_MAP_YAML
|
||||
}
|
||||
cm.kind = 'ConfigMap'
|
||||
cm.metadata = new k8s.V1ObjectMeta()
|
||||
cm.metadata.labels = { app: name }
|
||||
cm.metadata.name = `${name}-config`
|
||||
|
||||
return cm
|
||||
}
|
||||
|
||||
function registryStatefulSet(name: string, port: number): k8s.V1StatefulSet {
|
||||
const ss = new k8s.V1StatefulSet()
|
||||
ss.apiVersion = 'apps/v1'
|
||||
ss.metadata = new k8s.V1ObjectMeta()
|
||||
ss.metadata.name = name
|
||||
|
||||
const spec = new k8s.V1StatefulSetSpec()
|
||||
spec.selector = new k8s.V1LabelSelector()
|
||||
spec.selector.matchLabels = { app: 'docker-registry' }
|
||||
spec.serviceName = 'registry'
|
||||
spec.replicas = 1
|
||||
|
||||
const tmpl = new k8s.V1PodTemplateSpec()
|
||||
tmpl.metadata = new k8s.V1ObjectMeta()
|
||||
tmpl.metadata.labels = { app: name }
|
||||
tmpl.spec = new k8s.V1PodSpec()
|
||||
tmpl.spec.terminationGracePeriodSeconds = 5 // TODO: figure out for how long
|
||||
|
||||
const c = new k8s.V1Container()
|
||||
c.command = ['/bin/registry', 'serve', '/etc/docker/registry/config.yaml']
|
||||
c.env = [
|
||||
{
|
||||
name: 'REGISTRY_HTTP_SECRET',
|
||||
valueFrom: {
|
||||
secretKeyRef: {
|
||||
key: 'haSharedSecret',
|
||||
name: `${name}-secret`
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY',
|
||||
value: '/var/lib/registry'
|
||||
}
|
||||
]
|
||||
c.image = 'registry:2.6.2'
|
||||
c.name = name
|
||||
c.imagePullPolicy = 'IfNotPresent'
|
||||
c.ports = [
|
||||
{
|
||||
containerPort: port,
|
||||
protocol: 'TCP'
|
||||
}
|
||||
]
|
||||
|
||||
c.volumeMounts = [
|
||||
{
|
||||
mountPath: '/etc/docker/registry',
|
||||
name: 'docker-registry-config'
|
||||
}
|
||||
]
|
||||
|
||||
c.livenessProbe = new k8s.V1Probe()
|
||||
c.livenessProbe.failureThreshold = 3
|
||||
c.livenessProbe.periodSeconds = 10
|
||||
c.livenessProbe.successThreshold = 1
|
||||
c.livenessProbe.timeoutSeconds = 1
|
||||
c.livenessProbe.httpGet = new k8s.V1HTTPGetAction()
|
||||
c.livenessProbe.httpGet.path = '/'
|
||||
c.livenessProbe.httpGet.port = port
|
||||
c.livenessProbe.httpGet.scheme = 'HTTP'
|
||||
|
||||
c.readinessProbe = new k8s.V1Probe()
|
||||
c.readinessProbe.failureThreshold = 3
|
||||
c.readinessProbe.periodSeconds = 10
|
||||
c.readinessProbe.successThreshold = 1
|
||||
c.readinessProbe.timeoutSeconds = 1
|
||||
c.readinessProbe.httpGet = new k8s.V1HTTPGetAction()
|
||||
c.readinessProbe.httpGet.path = '/'
|
||||
c.readinessProbe.httpGet.port = port
|
||||
c.readinessProbe.httpGet.scheme = 'HTTP'
|
||||
|
||||
tmpl.spec.containers = [c]
|
||||
tmpl.spec.volumes = [
|
||||
{
|
||||
name: `${name}-config`,
|
||||
configMap: {
|
||||
name: `${name}-config`
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
spec.template = tmpl
|
||||
ss.spec = spec
|
||||
|
||||
return ss
|
||||
}
|
||||
function registryService(
|
||||
name: string,
|
||||
port: number,
|
||||
nodePort: number
|
||||
): k8s.V1Service {
|
||||
const svc = new k8s.V1Service()
|
||||
svc.apiVersion = 'v1'
|
||||
svc.kind = 'Service'
|
||||
svc.metadata = new k8s.V1ObjectMeta()
|
||||
svc.metadata.name = name
|
||||
svc.metadata.labels = {
|
||||
app: name
|
||||
}
|
||||
const spec = new k8s.V1ServiceSpec()
|
||||
spec.externalTrafficPolicy = 'Cluster'
|
||||
spec.ports = [
|
||||
{
|
||||
name: 'registry',
|
||||
nodePort: nodePort,
|
||||
port: port,
|
||||
protocol: 'TCP',
|
||||
targetPort: port
|
||||
}
|
||||
]
|
||||
spec.selector = {
|
||||
app: name
|
||||
}
|
||||
spec.sessionAffinity = 'None'
|
||||
spec.type = 'NodePort'
|
||||
svc.spec = spec
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
function registrySecret(name: string): k8s.V1Secret {
|
||||
const secret = new k8s.V1Secret()
|
||||
secret.apiVersion = 'v1'
|
||||
secret.data = { haSharedSecret: 'U29tZVZlcnlTdHJpbmdTZWNyZXQK' }
|
||||
secret.kind = 'Secret'
|
||||
secret.metadata = new k8s.V1ObjectMeta()
|
||||
secret.metadata.labels = {
|
||||
app: name,
|
||||
chart: `${name}-1.4.3`
|
||||
}
|
||||
secret.metadata.name = `${name}-secret`
|
||||
secret.type = 'Opaque'
|
||||
|
||||
return secret
|
||||
}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
<!-- ## Features -->
|
||||
## Features
|
||||
- Always use the Docker related ENVs from the host machine instead of ENVs from the runner job [#40]
|
||||
- Use user defined entrypoints for service containers (instead of `tail -f /dev/null`)
|
||||
|
||||
## Bugs
|
||||
- Fixed substring issue with /github/workspace and /github/file_commands [#35]
|
||||
- Fixed issue related to setting hostPort and containerPort when formatting is not recognized by k8s default [#38]
|
||||
|
||||
- Handle `$` symbols in environment variable names and values in k8s [#74]
|
||||
|
||||
<!-- ## Misc -->
|
||||
<!-- ## Misc
|
||||
|
||||
Reference in New Issue
Block a user