mirror of
https://github.com/actions/runner-container-hooks.git
synced 2026-01-08 19:07:22 +08:00
Compare commits
6 Commits
fhammerl/k
...
nikola-jok
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4de51ee6a5 | ||
|
|
c8e272367f | ||
|
|
c4aa97c974 | ||
|
|
f400db92cc | ||
|
|
5f0dc3f3b6 | ||
|
|
6ef042836f |
@@ -1,64 +0,0 @@
|
||||
# ADR 0034: Build container-action Dockerfiles with Kaniko
|
||||
|
||||
**Date**: 2023-01-26
|
||||
|
||||
**Status**: In Progress
|
||||
|
||||
# Background
|
||||
|
||||
[Building Dockerfiles in k8s using Kaniko](https://github.com/actions/runner-container-hooks/issues/23) has been on the radar since the beginning of container hooks.
|
||||
Currently, this is possible in ARC using a [dind/docker-in-docker](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/runner/actions-runner-dind.dockerfile) sidecar container.
|
||||
This container needs to be launched using `--privileged`, which presents a security concern.
|
||||
|
||||
As an alternative tool, a container running [Kaniko](https://github.com/GoogleContainerTools/kaniko) can be used to build these files instead.
|
||||
Kaniko doesn't need to be `--privileged`.
|
||||
Whether using dind/docker-in-docker sidecar or Kaniko, in this ADR I will refer to these containers as '**builder containers**'
|
||||
|
||||
# Guiding Principles
|
||||
- **Security:** running a Kaniko builder container should be possible without the `--privileged` flag
|
||||
- **Feature parity with Docker:** Any 'Dockerfile' that can be built with vanilla Docker should also be possible to build using a Kaniko build container
|
||||
- **Ease of Use:** The customer should be able to build and push Docker images with minimal configuration
|
||||
|
||||
## Limitations
|
||||
|
||||
### User provided registry
|
||||
The user needs to provide a a remote registry (like ghcr.io or dockerhub) and credentials, for the Kaniko builder container to push to and k8s to pull from later. This is the user's responsiblity so that our solution remains lightweight and generic.
|
||||
- Alternatively, a user-managed local Docker Registry within the k8s cluster can of course be used instead
|
||||
|
||||
### Kaniko feature limit
|
||||
Anything Kaniko can't do we'll be by definition unable to help with. Potential incompatibilities / inconsistencies between Docker and Kaniko will naturally be inherited by our solution.
|
||||
|
||||
## Interface
|
||||
The user will set `containerMode:kubernetes`, because this is a change to the behaviour of our k8s hooks
|
||||
|
||||
The user will set two ENVs:
|
||||
- `ACTIONS_RUNNER_CONTAINER_HOOKS_K8S_REGISTRY_HOST`: e.g. `ghcr.io/OWNER` or `dockerhandle`.
|
||||
- `ACTIONS_RUNNER_CONTAINER_HOOKS_K8S_REGISTRY_SECRET_NAME`: e.g. `docker-secret`: the name of the `k8s` secret resource that allows you to authenticate against the registry with the given handle above
|
||||
|
||||
The workspace is used as the image name.
|
||||
|
||||
The image tag is a random generated string.
|
||||
|
||||
To execute a container-action, we then run a k8s job by loading the image from the specified registry
|
||||
|
||||
## Additional configuration
|
||||
|
||||
Users may want to use different URLs for the registry when pushing and pulling an image as they will be invoked by different machines on different networks.
|
||||
|
||||
- The **Kaniko build container pushes the image** after building is a pod that belongs to the runner pod.
|
||||
- The **kubelet pulls the image** before starting a pod.
|
||||
|
||||
The above two might not resolve all host names 100% the same so it makes sense to allow different push and pull URLs.
|
||||
|
||||
ENVs `ACTIONS_RUNNER_CONTAINER_HOOKS_K8S_REGISTRY_HOST_PUSH` and `ACTIONS_RUNNER_CONTAINER_HOOKS_K8S_REGISTRY_HOST_PULL` will be preferred if set.
|
||||
|
||||
### Example
|
||||
|
||||
As an example, a cluster local docker registry could be a long running pod exposed as a service _and_ as a NodePort.
|
||||
|
||||
The Kaniko builder pod would push to `my-local-registry.default.svc.cluster.local:12345/foohandle`. (`ACTIONS_RUNNER_CONTAINER_HOOKS_K8S_REGISTRY_HOST_PUSH`)
|
||||
This URL cannot be resolved by the kubelet to pull the image, so we need a secondary URL to pull it - in this case, using the NodePort, this URL is localhost:NODEPORT/foohandle. (`ACTIONS_RUNNER_CONTAINER_HOOKS_K8S_REGISTRY_HOST_PULL)
|
||||
|
||||
|
||||
## Consequences
|
||||
- Users build container-actions with a local Dockerfile in their k8s cluster without a privileged docker builder container
|
||||
@@ -1,4 +1,5 @@
|
||||
import * as core from '@actions/core'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import * as k8s from '@kubernetes/client-node'
|
||||
import { RunContainerStepArgs } from 'hooklib'
|
||||
import {
|
||||
@@ -8,7 +9,8 @@ import {
|
||||
getPodLogs,
|
||||
getPodStatus,
|
||||
waitForJobToComplete,
|
||||
waitForPodPhases
|
||||
waitForPodPhases,
|
||||
containerBuild
|
||||
} from '../k8s'
|
||||
import {
|
||||
containerVolumes,
|
||||
@@ -23,6 +25,8 @@ export async function runContainerStep(
|
||||
stepContainer: RunContainerStepArgs
|
||||
): Promise<number> {
|
||||
if (stepContainer.dockerfile) {
|
||||
const imagePath = `${generateBuildHandle()}/${generateBuildTag()}`
|
||||
await containerBuild(stepContainer, imagePath)
|
||||
throw new Error('Building container actions is not currently supported')
|
||||
}
|
||||
|
||||
@@ -108,3 +112,20 @@ function createPodSpec(
|
||||
|
||||
return podContainer
|
||||
}
|
||||
|
||||
function generateBuildTag(): string {
|
||||
return `${generateRandomString()}:${uuidv4().substring(0, 6)}`
|
||||
}
|
||||
|
||||
function generateBuildHandle(): string {
|
||||
return generateRandomString()
|
||||
}
|
||||
|
||||
function generateRandomString(length = 10): string {
|
||||
let v = ''
|
||||
const chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
|
||||
for (let i = 0; i < length; i++) {
|
||||
v += chars.charAt(Math.floor(Math.random() * length))
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import * as core from '@actions/core'
|
||||
import * as k8s from '@kubernetes/client-node'
|
||||
import { ContainerInfo, Registry } from 'hooklib'
|
||||
import { RunContainerStepArgs, ContainerInfo, Registry } from 'hooklib'
|
||||
import * as stream from 'stream'
|
||||
import {
|
||||
getJobPodName,
|
||||
@@ -10,6 +10,13 @@ import {
|
||||
getVolumeClaimName,
|
||||
RunnerInstanceLabel
|
||||
} from '../hooks/constants'
|
||||
import {
|
||||
registryConfigMap,
|
||||
registrySecret,
|
||||
registryStatefulSet,
|
||||
registryService,
|
||||
kanikoPod
|
||||
} from './kaniko'
|
||||
import { PodPhase } from './utils'
|
||||
|
||||
const kc = new k8s.KubeConfig()
|
||||
@@ -18,6 +25,7 @@ kc.loadFromDefault()
|
||||
|
||||
const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
|
||||
const k8sBatchV1Api = kc.makeApiClient(k8s.BatchV1Api)
|
||||
const k8sAppsV1 = kc.makeApiClient(k8s.AppsV1Api)
|
||||
const k8sAuthorizationV1Api = kc.makeApiClient(k8s.AuthorizationV1Api)
|
||||
|
||||
export const POD_VOLUME_NAME = 'work'
|
||||
@@ -52,6 +60,12 @@ export const requiredPermissions = [
|
||||
verbs: ['create', 'delete', 'get', 'list'],
|
||||
resource: 'secrets',
|
||||
subresource: ''
|
||||
},
|
||||
{
|
||||
group: '',
|
||||
verbs: ['create', 'delete', 'get', 'list'],
|
||||
resource: 'configmaps',
|
||||
subresource: ''
|
||||
}
|
||||
]
|
||||
|
||||
@@ -326,7 +340,14 @@ export async function waitForPodPhases(
|
||||
let phase: PodPhase = PodPhase.UNKNOWN
|
||||
try {
|
||||
while (true) {
|
||||
phase = await getPodPhase(podName)
|
||||
try {
|
||||
phase = await getPodPhase(podName)
|
||||
} catch (err) {
|
||||
const e = err as k8s.HttpError
|
||||
if (e?.body?.reason === 'NotFound') {
|
||||
phase = PodPhase.UNKNOWN
|
||||
}
|
||||
}
|
||||
if (awaitingPhases.has(phase)) {
|
||||
return
|
||||
}
|
||||
@@ -464,6 +485,45 @@ export async function isPodContainerAlpine(
|
||||
return isAlpine
|
||||
}
|
||||
|
||||
export async function containerBuild(
|
||||
args: RunContainerStepArgs,
|
||||
imagePath: string
|
||||
): Promise<void> {
|
||||
const cm = registryConfigMap()
|
||||
const secret = registrySecret()
|
||||
const ss = registryStatefulSet()
|
||||
const svc = registryService()
|
||||
const pod = kanikoPod(args.workingDirectory, imagePath)
|
||||
await Promise.all([
|
||||
k8sApi.createNamespacedConfigMap(namespace(), cm),
|
||||
k8sApi.createNamespacedSecret(namespace(), secret)
|
||||
])
|
||||
try {
|
||||
await k8sAppsV1.createNamespacedStatefulSet(namespace(), ss)
|
||||
await waitForPodPhases(
|
||||
'docker-registry-0',
|
||||
new Set([PodPhase.RUNNING]),
|
||||
new Set([PodPhase.PENDING, PodPhase.UNKNOWN])
|
||||
)
|
||||
} catch (err) {
|
||||
console.log(err)
|
||||
console.log(JSON.stringify(err))
|
||||
throw err
|
||||
}
|
||||
try {
|
||||
await k8sApi.createNamespacedService(namespace(), svc)
|
||||
} catch (err) {
|
||||
console.log(JSON.stringify(err))
|
||||
throw err
|
||||
}
|
||||
try {
|
||||
await k8sApi.createNamespacedPod(namespace(), pod)
|
||||
} catch (err) {
|
||||
console.log(JSON.stringify(err))
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
async function getCurrentNodeName(): Promise<string> {
|
||||
const resp = await k8sApi.readNamespacedPod(getRunnerPodName(), namespace())
|
||||
|
||||
|
||||
208
packages/k8s/src/k8s/kaniko.ts
Normal file
208
packages/k8s/src/k8s/kaniko.ts
Normal file
@@ -0,0 +1,208 @@
|
||||
import * as k8s from '@kubernetes/client-node'
|
||||
|
||||
const REGISTRY_CONFIG_MAP_YAML = `
|
||||
storage:
|
||||
filesystem:
|
||||
rootdirectory: /var/lib/registry
|
||||
maxthreads: 100
|
||||
health:
|
||||
storagedriver:
|
||||
enabled: true
|
||||
interval: 10s
|
||||
threshold: 3
|
||||
http:
|
||||
addr: :5000
|
||||
headers:
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
log:
|
||||
fields:
|
||||
service: registry
|
||||
storage:
|
||||
cache:
|
||||
blobdescriptor: inmemory
|
||||
version: 0.1
|
||||
`.trim()
|
||||
|
||||
export function registryConfigMap(): k8s.V1ConfigMap {
|
||||
const cm = new k8s.V1ConfigMap()
|
||||
cm.apiVersion = 'v1'
|
||||
cm.data = {
|
||||
'config.yaml': REGISTRY_CONFIG_MAP_YAML
|
||||
}
|
||||
cm.kind = 'ConfigMap'
|
||||
cm.metadata = new k8s.V1ObjectMeta()
|
||||
cm.metadata.labels = { app: 'docker-registry' }
|
||||
cm.metadata.name = 'docker-registry-config'
|
||||
// TODO: make this configurable
|
||||
|
||||
return cm
|
||||
}
|
||||
|
||||
export function registrySecret(): k8s.V1Secret {
|
||||
const secret = new k8s.V1Secret()
|
||||
secret.apiVersion = 'v1'
|
||||
secret.data = { haSharedSecret: 'U29tZVZlcnlTdHJpbmdTZWNyZXQK' }
|
||||
secret.kind = 'Secret'
|
||||
secret.metadata = new k8s.V1ObjectMeta()
|
||||
secret.metadata.labels = {
|
||||
app: 'docker-registry',
|
||||
chart: 'docker-registry-1.4.3'
|
||||
}
|
||||
secret.metadata.name = 'docker-registry-secret'
|
||||
secret.type = 'Opaque'
|
||||
|
||||
return secret
|
||||
}
|
||||
|
||||
export function registryStatefulSet(): k8s.V1StatefulSet {
|
||||
const ss = new k8s.V1StatefulSet()
|
||||
ss.apiVersion = 'apps/v1'
|
||||
ss.metadata = new k8s.V1ObjectMeta()
|
||||
ss.metadata.name = 'docker-registry'
|
||||
|
||||
const spec = new k8s.V1StatefulSetSpec()
|
||||
spec.selector = new k8s.V1LabelSelector()
|
||||
spec.selector.matchLabels = { app: 'docker-registry' }
|
||||
spec.serviceName = 'registry'
|
||||
spec.replicas = 1
|
||||
|
||||
const tmpl = new k8s.V1PodTemplateSpec()
|
||||
tmpl.metadata = new k8s.V1ObjectMeta()
|
||||
tmpl.metadata.labels = { app: 'docker-registry' }
|
||||
tmpl.spec = new k8s.V1PodSpec()
|
||||
tmpl.spec.terminationGracePeriodSeconds = 5 // TODO: figure out for how long
|
||||
|
||||
const c = new k8s.V1Container()
|
||||
c.command = ['/bin/registry', 'serve', '/etc/docker/registry/config.yaml']
|
||||
c.env = [
|
||||
{
|
||||
name: 'REGISTRY_HTTP_SECRET',
|
||||
valueFrom: {
|
||||
secretKeyRef: {
|
||||
key: 'haSharedSecret',
|
||||
name: 'docker-registry-secret'
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY',
|
||||
value: '/var/lib/registry'
|
||||
}
|
||||
]
|
||||
c.image = 'registry:2.6.2'
|
||||
c.name = 'docker-registry'
|
||||
c.imagePullPolicy = 'IfNotPresent'
|
||||
c.ports = [
|
||||
{
|
||||
containerPort: 5000,
|
||||
protocol: 'TCP'
|
||||
}
|
||||
]
|
||||
|
||||
c.volumeMounts = [
|
||||
{
|
||||
mountPath: '/etc/docker/registry',
|
||||
name: 'docker-registry-config'
|
||||
}
|
||||
]
|
||||
|
||||
c.livenessProbe = new k8s.V1Probe()
|
||||
c.livenessProbe.failureThreshold = 3
|
||||
c.livenessProbe.periodSeconds = 10
|
||||
c.livenessProbe.successThreshold = 1
|
||||
c.livenessProbe.timeoutSeconds = 1
|
||||
c.livenessProbe.httpGet = new k8s.V1HTTPGetAction()
|
||||
c.livenessProbe.httpGet.path = '/'
|
||||
c.livenessProbe.httpGet.port = 5000
|
||||
c.livenessProbe.httpGet.scheme = 'HTTP'
|
||||
|
||||
c.readinessProbe = new k8s.V1Probe()
|
||||
c.readinessProbe.failureThreshold = 3
|
||||
c.readinessProbe.periodSeconds = 10
|
||||
c.readinessProbe.successThreshold = 1
|
||||
c.readinessProbe.timeoutSeconds = 1
|
||||
c.readinessProbe.httpGet = new k8s.V1HTTPGetAction()
|
||||
c.readinessProbe.httpGet.path = '/'
|
||||
c.readinessProbe.httpGet.port = 5000
|
||||
c.readinessProbe.httpGet.scheme = 'HTTP'
|
||||
|
||||
tmpl.spec.containers = [c]
|
||||
tmpl.spec.volumes = [
|
||||
{
|
||||
name: 'docker-registry-config',
|
||||
configMap: {
|
||||
name: 'docker-registry-config'
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
spec.template = tmpl
|
||||
ss.spec = spec
|
||||
|
||||
return ss
|
||||
}
|
||||
|
||||
export function registryService(): k8s.V1Service {
|
||||
const svc = new k8s.V1Service()
|
||||
svc.apiVersion = 'v1'
|
||||
svc.kind = 'Service'
|
||||
svc.metadata = new k8s.V1ObjectMeta()
|
||||
svc.metadata.name = 'docker-registry'
|
||||
svc.metadata.labels = {
|
||||
app: 'docker-registry'
|
||||
}
|
||||
const spec = new k8s.V1ServiceSpec()
|
||||
spec.externalTrafficPolicy = 'Cluster'
|
||||
spec.ports = [
|
||||
{
|
||||
name: 'registry',
|
||||
nodePort: 31500,
|
||||
port: 5000,
|
||||
protocol: 'TCP',
|
||||
targetPort: 5000
|
||||
}
|
||||
]
|
||||
spec.selector = {
|
||||
app: 'docker-registry'
|
||||
}
|
||||
spec.sessionAffinity = 'None'
|
||||
spec.type = 'NodePort'
|
||||
svc.spec = spec
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
export function kanikoPod(
|
||||
workingDirectory: string, // git://github.com/<handle>/<repo>
|
||||
imagePath: string // <handle>/<image>:<tag>
|
||||
): k8s.V1Pod {
|
||||
const pod = new k8s.V1Pod()
|
||||
pod.apiVersion = 'v1'
|
||||
pod.kind = 'Pod'
|
||||
pod.metadata = new k8s.V1ObjectMeta()
|
||||
pod.metadata.name = 'kaniko'
|
||||
|
||||
const spec = new k8s.V1PodSpec()
|
||||
const c = new k8s.V1Container()
|
||||
c.image = 'gcr.io/kaniko-project/executor:latest'
|
||||
c.name = 'kaniko'
|
||||
c.imagePullPolicy = 'Always'
|
||||
c.env = [
|
||||
{
|
||||
name: 'GIT_TOKEN',
|
||||
value: process.env.GITHUB_TOKEN
|
||||
}
|
||||
]
|
||||
c.args = [
|
||||
'--dockerfile=Dockerfile',
|
||||
`--context=${workingDirectory}`,
|
||||
`--destination=docker-registry.default.svc.cluster.local:5000/${imagePath}`
|
||||
]
|
||||
spec.containers = [c]
|
||||
spec.dnsPolicy = 'ClusterFirst'
|
||||
spec.restartPolicy = 'Never'
|
||||
pod.spec = spec
|
||||
|
||||
return pod
|
||||
}
|
||||
20
packages/k8s/tests/build-container-test.ts
Normal file
20
packages/k8s/tests/build-container-test.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
import { containerBuild } from '../src/k8s'
|
||||
|
||||
jest.useRealTimers()
|
||||
|
||||
describe('container build', () => {
|
||||
beforeAll(async () => {
|
||||
process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE'] = 'default'
|
||||
})
|
||||
|
||||
it('should finish without throwing an exception', async () => {
|
||||
await expect(
|
||||
containerBuild(
|
||||
{
|
||||
workingDirectory: 'git://github.com/nikola-jokic/dockeraction.git'
|
||||
},
|
||||
'randhandle/randimg:123123'
|
||||
)
|
||||
).resolves.not.toThrow()
|
||||
})
|
||||
})
|
||||
Reference in New Issue
Block a user