Compare commits

...

5 Commits

Author SHA1 Message Date
zarko-a
3f829eef9e Fix event.json not being copied to /github/workflow in kubernetes-novolume mode (#287)
In run-script-step, the _temp directory was being copied to the workflow pod,
but the _github_home and _github_workflow directories were not being moved
from their temporary location to the /github directory structure where they
are expected by GitHub Actions.

This caused event.json to be missing at /github/workflow/event.json, breaking
actions that depend on GITHUB_EVENT_PATH.

The fix adds a setup step that copies _github_home and _github_workflow from
/__w/_temp/ to /github/ after copying the temp directory to the pod, matching
the behavior of run-container-step and prepareJobScript.

Uses cp -r instead of symlinks to avoid symlink validation errors when copying
files back from the pod to the runner.
2025-11-26 11:47:19 +01:00
zarko-a
011ffb284e Fix workingDir permissions issue by creating it within init container (#283)
* Fix workingDir permissions issue by creating it within init container

* Apply suggestion from @Copilot

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* rework init commands

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-26 11:46:47 +01:00
Vincent Van Ouytsel
0951cc73e4 Improve validation checks after copying (#285)
* fix: calculate hash again after failure

The hash from the source is calculated only once. The source hash is
checked with the destination hash, but if the destination hash does not
match, the destination match is calculated again.

The problem is that if the source hash is incorrect, the check will keep
failing because the source hash is never re-calculated.

Now, in the event that the hashes do not match, the hash of the source
and the destination are calculated again.

* fix: use size instead of block size

Previously the %b parameter was used with stat. This displays the block
size of the file. We noticed that in some cases the block size of the
source and the destination file could be slightly different. Since the
source and target run in different containers, they can have different
block sizes defined. If the block size did not match, the hash would also not match, even if
the file content would be exactly the same.

With this change, the block size is no longer used. Instead the actual
size in bytes of the file is listed.
2025-11-24 16:14:02 +01:00
Nikola Jokic
15e808935c Allow non-root container (#264)
* Allow non-root container

* format

* add lint:fix and fix lint errors

* fix tests and volume mounts
2025-11-21 14:44:29 +01:00
vvanouytsel-trendminer
ad9cb43c31 feat: check if required binaries are present (#272)
* feat: check if required binaries are present

Previously the necessary binaries were copied over using the runner
container. This lead to issues in case your main container was using the
musl libc implementation.

Instead of copying over any binaries, the initContainer now checks if
the required binaries are present in the main container.

* feat: get rid of the init container

* fix: add _runner_file_commands

* fix: do not fail if _runner_file_commands does not exist

It seems that for container actions this directory does not exist.
2025-11-10 15:01:40 +01:00
8 changed files with 108 additions and 46 deletions

View File

@@ -12,6 +12,7 @@
"format": "prettier --write '**/*.ts'",
"format-check": "prettier --check '**/*.ts'",
"lint": "eslint packages/**/*.ts",
"lint:fix": "eslint packages/**/*.ts --fix",
"build-all": "npm run build --prefix packages/hooklib && npm run build --prefix packages/k8s && npm run build --prefix packages/docker"
},
"repository": {

View File

@@ -41,3 +41,4 @@ rules:
- Container actions will not have access to the services network or job container network
- Docker [create options](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idcontaineroptions) are not supported
- Container actions will have to specify the entrypoint, since the default entrypoint will be overridden to run the commands from the workflow.
- Container actions need to have the following binaries in their container image: `sh`, `env`, `tail`.

View File

@@ -104,7 +104,7 @@ export async function runContainerStep(
try {
core.debug(`Executing container step script in pod ${podName}`)
return await execPodStep(
['/__e/sh', '-e', containerPath],
['sh', '-e', containerPath],
pod.metadata.name,
JOB_CONTAINER_NAME
)
@@ -133,7 +133,7 @@ function createContainerSpec(
podContainer.name = JOB_CONTAINER_NAME
podContainer.image = container.image
podContainer.workingDir = '/__w'
podContainer.command = ['/__e/tail']
podContainer.command = ['tail']
podContainer.args = DEFAULT_CONTAINER_ENTRY_POINT_ARGS
podContainer.volumeMounts = CONTAINER_VOLUMES

View File

@@ -6,6 +6,7 @@ import { execCpFromPod, execCpToPod, execPodStep } from '../k8s'
import { writeRunScript, sleep, listDirAllCommand } from '../k8s/utils'
import { JOB_CONTAINER_NAME } from './constants'
import { dirname } from 'path'
import * as shlex from 'shlex'
export async function runScriptStep(
args: RunScriptStepArgs,
@@ -26,6 +27,23 @@ export async function runScriptStep(
const runnerTemp = `${workdir}/_temp`
await execCpToPod(state.jobPod, runnerTemp, containerTemp)
// Copy GitHub directories from temp to /github
const setupCommands = [
'mkdir -p /github',
'cp -r /__w/_temp/_github_home /github/home',
'cp -r /__w/_temp/_github_workflow /github/workflow'
]
try {
await execPodStep(
['sh', '-c', shlex.quote(setupCommands.join(' && '))],
state.jobPod,
JOB_CONTAINER_NAME
)
} catch (err) {
core.debug(`Failed to copy GitHub directories: ${JSON.stringify(err)}`)
}
// Execute the entrypoint script
args.entryPoint = 'sh'
args.entryPointArgs = ['-e', containerPath]

View File

@@ -20,8 +20,10 @@ import {
listDirAllCommand,
sleep,
EXTERNALS_VOLUME_NAME,
GITHUB_VOLUME_NAME
GITHUB_VOLUME_NAME,
WORK_VOLUME
} from './utils'
import * as shlex from 'shlex'
const kc = new k8s.KubeConfig()
@@ -91,13 +93,33 @@ export async function createJobPod(
appPod.spec = new k8s.V1PodSpec()
appPod.spec.containers = containers
appPod.spec.securityContext = {
fsGroup: 1001
}
// Extract working directory from GITHUB_WORKSPACE
// GITHUB_WORKSPACE is like /__w/repo-name/repo-name
const githubWorkspace = process.env.GITHUB_WORKSPACE
const workingDirPath = githubWorkspace?.split('/').slice(-2).join('/') ?? ''
const initCommands = [
'mkdir -p /mnt/externals',
'mkdir -p /mnt/work',
'mkdir -p /mnt/github',
'mv /home/runner/externals/* /mnt/externals/'
]
if (workingDirPath) {
initCommands.push(`mkdir -p /mnt/work/${workingDirPath}`)
}
appPod.spec.initContainers = [
{
name: 'fs-init',
image:
process.env.ACTIONS_RUNNER_IMAGE ||
'ghcr.io/actions/actions-runner:latest',
command: ['sh', '-c', 'mv /home/runner/externals/* /mnt/externals'],
command: ['sh', '-c', initCommands.join(' && ')],
securityContext: {
runAsGroup: 1001,
runAsUser: 1001
@@ -106,6 +128,14 @@ export async function createJobPod(
{
name: EXTERNALS_VOLUME_NAME,
mountPath: '/mnt/externals'
},
{
name: WORK_VOLUME,
mountPath: '/mnt/work'
},
{
name: GITHUB_VOLUME_NAME,
mountPath: '/mnt/github'
}
]
}
@@ -121,6 +151,10 @@ export async function createJobPod(
{
name: GITHUB_VOLUME_NAME,
emptyDir: {}
},
{
name: WORK_VOLUME,
emptyDir: {}
}
]
@@ -169,33 +203,6 @@ export async function createContainerStepPod(
appPod.spec = new k8s.V1PodSpec()
appPod.spec.containers = [container]
appPod.spec.initContainers = [
{
name: 'fs-init',
image:
process.env.ACTIONS_RUNNER_IMAGE ||
'ghcr.io/actions/actions-runner:latest',
command: [
'bash',
'-c',
`sudo cp $(which sh) /mnt/externals/sh \
&& sudo cp $(which tail) /mnt/externals/tail \
&& sudo cp $(which env) /mnt/externals/env \
&& sudo chmod -R 777 /mnt/externals`
],
securityContext: {
runAsGroup: 1001,
runAsUser: 1001,
privileged: true
},
volumeMounts: [
{
name: EXTERNALS_VOLUME_NAME,
mountPath: '/mnt/externals'
}
]
}
]
appPod.spec.restartPolicy = 'Never'
@@ -207,6 +214,10 @@ export async function createContainerStepPod(
{
name: GITHUB_VOLUME_NAME,
emptyDir: {}
},
{
name: WORK_VOLUME,
emptyDir: {}
}
]
@@ -378,7 +389,15 @@ export async function execCpToPod(
while (true) {
try {
const exec = new k8s.Exec(kc)
const command = ['tar', 'xf', '-', '-C', containerPath]
// Use tar to extract with --no-same-owner to avoid ownership issues.
// Then use find to fix permissions. The -m flag helps but we also need to fix permissions after.
const command = [
'sh',
'-c',
`tar xf - --no-same-owner -C ${shlex.quote(containerPath)} 2>/dev/null; ` +
`find ${shlex.quote(containerPath)} -type f -exec chmod u+rw {} \\; 2>/dev/null; ` +
`find ${shlex.quote(containerPath)} -type d -exec chmod u+rwx {} \\; 2>/dev/null`
]
const readStream = tar.pack(runnerPath)
const errStream = new WritableStreamBuffer()
await new Promise((resolve, reject) => {
@@ -396,7 +415,7 @@ export async function execCpToPod(
if (errStream.size()) {
reject(
new Error(
`Error from cpFromPod - details: \n ${errStream.getContentsAsString()}`
`Error from execCpToPod - status: ${status.status}, details: \n ${errStream.getContentsAsString()}`
)
)
}
@@ -418,16 +437,16 @@ export async function execCpToPod(
}
}
const want = await localCalculateOutputHashSorted([
'sh',
'-c',
listDirAllCommand(runnerPath)
])
let attempts = 15
const delay = 1000
for (let i = 0; i < attempts; i++) {
try {
const want = await localCalculateOutputHashSorted([
'sh',
'-c',
listDirAllCommand(runnerPath)
])
const got = await execCalculateOutputHashSorted(
podName,
JOB_CONTAINER_NAME,
@@ -459,11 +478,6 @@ export async function execCpFromPod(
core.debug(
`Copying from pod ${podName} ${containerPath} to ${targetRunnerPath}`
)
const want = await execCalculateOutputHashSorted(
podName,
JOB_CONTAINER_NAME,
['sh', '-c', listDirAllCommand(containerPath)]
)
let attempt = 0
while (true) {
@@ -524,6 +538,12 @@ export async function execCpFromPod(
const delay = 1000
for (let i = 0; i < attempts; i++) {
try {
const want = await execCalculateOutputHashSorted(
podName,
JOB_CONTAINER_NAME,
['sh', '-c', listDirAllCommand(containerPath)]
)
const got = await localCalculateOutputHashSorted([
'sh',
'-c',

View File

@@ -15,12 +15,17 @@ export const ENV_USE_KUBE_SCHEDULER = 'ACTIONS_RUNNER_USE_KUBE_SCHEDULER'
export const EXTERNALS_VOLUME_NAME = 'externals'
export const GITHUB_VOLUME_NAME = 'github'
export const WORK_VOLUME = 'work'
export const CONTAINER_VOLUMES: k8s.V1VolumeMount[] = [
{
name: EXTERNALS_VOLUME_NAME,
mountPath: '/__e'
},
{
name: WORK_VOLUME,
mountPath: '/__w'
},
{
name: GITHUB_VOLUME_NAME,
mountPath: '/github'
@@ -102,7 +107,7 @@ export function writeContainerStepScript(
rm "$0" # remove script after running
mv /__w/_temp/_github_home /github/home && \
mv /__w/_temp/_github_workflow /github/workflow && \
mv /__w/_temp/_runner_file_commands /github/file_commands && \
mv /__w/_temp/_runner_file_commands /github/file_commands || true && \
mv /__w/${parts.join('/')}/ /github/workspace && \
cd /github/workspace && \
exec ${environmentPrefix} ${entryPoint} ${
@@ -291,5 +296,5 @@ export async function sleep(ms: number): Promise<void> {
}
export function listDirAllCommand(dir: string): string {
return `cd ${shlex.quote(dir)} && find . -not -path '*/_runner_hook_responses*' -exec stat -c '%b %n' {} \\;`
return `cd ${shlex.quote(dir)} && find . -not -path '*/_runner_hook_responses*' -exec stat -c '%s %n' {} \\;`
}

View File

@@ -26,6 +26,7 @@ describe('e2e', () => {
afterEach(async () => {
await testHelper.cleanup()
})
it('should prepare job, run script step, run container step then cleanup without errors', async () => {
await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath)

View File

@@ -231,4 +231,20 @@ describe('Prepare job', () => {
expect(() => content.context.services[0].image).not.toThrow()
}
)
it('should prepare job with container with non-root user', async () => {
prepareJobData.args!.container!.image =
'ghcr.io/actions/actions-runner:latest' // known to use user 1001
await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).resolves.not.toThrow()
const content = JSON.parse(
fs.readFileSync(prepareJobOutputFilePath).toString()
)
expect(content.state.jobPod).toBeTruthy()
expect(content.context.container.image).toBe(
'ghcr.io/actions/actions-runner:latest'
)
})
})