1
0

1 Commits

Author SHA1 Message Date
4c490fc450 feat: explicit list of source files 2024-04-22 15:59:00 +00:00
44 changed files with 414 additions and 1094 deletions

View File

@ -2,19 +2,13 @@ name: E2E
on:
workflow_dispatch:
schedule:
- cron: '0 0 * * 0'
push:
branches:
- master
- dev
paths-ignore:
- ".github/workflows/images-*"
- ".github/workflows/ubuntu.yml"
- ".github/workflows/macos.yml"
- ".github/workflows/debian-cache.yml"
- "docs/**"
- 'tools/**'
- ".readthedocs.yaml"
- "README.md"
@ -37,7 +31,6 @@ jobs:
{ distro: Ubuntu, release: 18.04 },
{ distro: Ubuntu, release: 20.04 },
{ distro: Ubuntu, release: 22.04 },
{ distro: Ubuntu, release: 24.04 },
{ distro: CentOS, release: 6 },
{ distro: CentOS, release: 7 },
{ distro: CentOS, release: 8 },
@ -126,7 +119,7 @@ jobs:
echo 'distro = { id = "${{ matrix.os.distro }}", release = "${{ matrix.os.release }}" }' >> examples/kernel-module/.out-of-tree.toml
echo 'kernel = { regex = ".*" }' >> examples/kernel-module/.out-of-tree.toml
echo '[qemu]' >> examples/kernel-module/.out-of-tree.toml
echo 'timeout = "5m"' >> examples/kernel-module/.out-of-tree.toml
echo 'timeout = "10m"' >> examples/kernel-module/.out-of-tree.toml
echo 'after_start_timeout = "10s"' >> examples/kernel-module/.out-of-tree.toml
echo 'modprobe uio || modprobe 9p || modprobe xfs' >> examples/kernel-module/test.sh
@ -142,8 +135,8 @@ jobs:
echo 'Type=oneshot' >> test.service
echo 'WorkingDirectory=/root/test' >> test.service
echo 'TimeoutStopSec=1' >> test.service
echo 'ExecStart=/usr/local/bin/out-of-tree kernel --no-prebuilt-containers autogen --threads=8 --max=64 --shuffle' >> test.service
echo 'ExecStart=/usr/local/bin/out-of-tree pew --threads=4 --include-internal-errors' >> test.service
echo 'ExecStart=/usr/local/bin/out-of-tree kernel --no-container-cache autogen --threads=8 --max=128 --shuffle' >> test.service
echo 'ExecStart=/usr/local/bin/out-of-tree pew --qemu-timeout=10m --threads=4 --include-internal-errors' >> test.service
scp test.service root@$IP:/etc/systemd/system/test.service

View File

@ -1,87 +0,0 @@
name: CentOS images
on:
workflow_dispatch:
push:
paths:
- 'tools/qemu-centos-img/**'
- '.github/workflows/images-centos.yml'
concurrency:
group: ${{ github.workflow_ref }}
cancel-in-progress: true
jobs:
images-centos:
name: Qemu Images
runs-on: ubuntu-latest
steps:
- uses: digitalocean/action-doctl@v2
with:
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
- uses: webfactory/ssh-agent@v0.8.0
with:
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
- name: create droplet
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94,37:46:77:a8:4a:96:3b:20:16:46:35:04:95:ca:0c:5c' --tag-name=github-actions ga-out-of-tree-images-centos-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
# TODO Move to common script
- name: generate images
shell: bash
run: |
sleep 1m
IP=$(doctl compute droplet list --tag-name=github-actions --format "Name,Public IPv4" | grep -v ID | grep ga-out-of-tree-images-centos-$GITHUB_SHA | awk '{print $2}')
while ! ssh -o StrictHostKeyChecking=accept-new root@$IP echo
do
sleep 1s
done
ssh root@$IP "cloud-init status --wait | grep done"
ssh root@$IP apt-get update
ssh root@$IP apt-get install -y git podman s3cmd
ssh root@$IP git clone https://github.com/out-of-tree/out-of-tree
ssh root@$IP "cd out-of-tree && git checkout $GITHUB_SHA"
ssh root@$IP "echo -e '[Unit]\nDescription=CentOS image generator and uploader\n[Service]\nRemainAfterExit=yes\nStandardError=append:/var/log/images-centos.log\nStandardOutput=append:/var/log/images-centos.log\nType=oneshot' >> /etc/systemd/system/images-centos.service"
ssh root@$IP "echo Environment=HOST=fra1.digitaloceanspaces.com >> /etc/systemd/system/images-centos.service"
ssh root@$IP "echo Environment=HOST_BUCKET=out-of-tree.fra1.digitaloceanspaces.com >> /etc/systemd/system/images-centos.service"
ssh root@$IP "echo Environment=ACCESS_KEY=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} >> /etc/systemd/system/images-centos.service"
ssh root@$IP "echo Environment=SECRET_KEY=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }} >> /etc/systemd/system/images-centos.service"
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-centos-img/6/generate.sh' >> /etc/systemd/system/images-centos.service"
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-centos-img/7/generate.sh' >> /etc/systemd/system/images-centos.service"
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-centos-img/8/generate.sh' >> /etc/systemd/system/images-centos.service"
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-centos-img/*/*.tar.gz s3://out-of-tree/3.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-centos.service'
ssh root@$IP "echo TimeoutStopSec=1 >> /etc/systemd/system/images-centos.service"
ssh root@$IP systemctl daemon-reload
ssh root@$IP systemctl start images-centos --no-block
while ! ssh root@$IP systemctl show images-centos -p SubState --value | grep -E '(failed|exited)'
do
sleep 3m
done
scp root@$IP:/var/log/images-centos.log .
ssh root@$IP systemctl is-active images-centos
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: images-centos-log
path: images-centos.log
- name: delete droplet
if: always()
run: doctl compute droplet delete -f ga-out-of-tree-images-centos-$GITHUB_SHA

View File

@ -1,4 +1,4 @@
name: Debian images
name: Debian
on:
workflow_dispatch:
@ -25,7 +25,7 @@ jobs:
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
- name: create droplet
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94,37:46:77:a8:4a:96:3b:20:16:46:35:04:95:ca:0c:5c' --tag-name=github-actions ga-out-of-tree-images-debian-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94' --tag-name=github-actions ga-out-of-tree-images-debian-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
# TODO Move to common script
- name: generate images
@ -40,7 +40,8 @@ jobs:
sleep 1s
done
ssh root@$IP "cloud-init status --wait | grep done"
sleep 5m
ssh root@$IP pkill apt-get || true
ssh root@$IP apt-get update
ssh root@$IP apt-get install -y git podman s3cmd
@ -56,7 +57,7 @@ jobs:
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-debian-img/generate-images.sh' >> /etc/systemd/system/images-debian.service"
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-debian-img/*.tar.gz s3://out-of-tree/3.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-debian.service'
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-debian-img/*.tar.gz s3://out-of-tree/1.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-debian.service'
ssh root@$IP "echo TimeoutStopSec=1 >> /etc/systemd/system/images-debian.service"

View File

@ -1,4 +1,4 @@
name: Oracle Linux images
name: Oracle Linux
on:
workflow_dispatch:
@ -25,7 +25,7 @@ jobs:
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
- name: create droplet
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94,37:46:77:a8:4a:96:3b:20:16:46:35:04:95:ca:0c:5c' --tag-name=github-actions ga-out-of-tree-images-oraclelinux-$GITHUB_SHA --size s-1vcpu-2gb --image ubuntu-22-04-x64 --wait
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94' --tag-name=github-actions ga-out-of-tree-images-oraclelinux-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
# TODO Move to common script
- name: generate images
@ -40,7 +40,8 @@ jobs:
sleep 1s
done
ssh root@$IP "cloud-init status --wait | grep done"
sleep 5m
ssh root@$IP pkill apt-get || true
ssh root@$IP apt-get update
ssh root@$IP apt-get install -y git podman s3cmd
@ -56,7 +57,7 @@ jobs:
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-oraclelinux-img/generate-images.sh' >> /etc/systemd/system/images-oraclelinux.service"
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-oraclelinux-img/*.tar.gz s3://out-of-tree/3.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-oraclelinux.service'
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-oraclelinux-img/*.tar.gz s3://out-of-tree/1.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-oraclelinux.service'
ssh root@$IP "echo TimeoutStopSec=1 >> /etc/systemd/system/images-oraclelinux.service"
@ -73,13 +74,6 @@ jobs:
ssh root@$IP systemctl is-active images-oraclelinux
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: images-oraclelinux-log
path: images-oraclelinux.log
- name: delete droplet
if: always()
run: doctl compute droplet delete -f ga-out-of-tree-images-oraclelinux-$GITHUB_SHA

View File

@ -1,89 +0,0 @@
name: Ubuntu images
on:
workflow_dispatch:
push:
paths:
- 'tools/qemu-ubuntu-img/**'
- '.github/workflows/images-ubuntu.yml'
concurrency:
group: ${{ github.workflow_ref }}
cancel-in-progress: true
jobs:
images:
name: Qemu Images
runs-on: ubuntu-latest
steps:
- uses: digitalocean/action-doctl@v2
with:
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
- uses: webfactory/ssh-agent@v0.8.0
with:
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
- name: create droplet
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94,37:46:77:a8:4a:96:3b:20:16:46:35:04:95:ca:0c:5c' --tag-name=github-actions ga-out-of-tree-images-ubuntu-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
# TODO Move to common script
- name: generate images
shell: bash
run: |
set -x
sleep 1m
IP=$(doctl compute droplet list --tag-name=github-actions --format "Name,Public IPv4" | grep -v ID | grep ga-out-of-tree-images-ubuntu-$GITHUB_SHA | awk '{print $2}')
while ! ssh -o StrictHostKeyChecking=accept-new root@$IP echo
do
sleep 1s
done
ssh root@$IP "cloud-init status --wait | grep done"
ssh root@$IP apt-get update
ssh root@$IP apt-get install -y git podman s3cmd
ssh root@$IP git clone https://github.com/out-of-tree/out-of-tree
ssh root@$IP "cd out-of-tree && git checkout $GITHUB_SHA"
ssh root@$IP "echo -e '[Unit]\nDescription=Ubuntu image generator and uploader\n[Service]\nRemainAfterExit=yes\nStandardError=append:/var/log/images-ubuntu.log\nStandardOutput=append:/var/log/images-ubuntu.log\nType=oneshot' >> /etc/systemd/system/images-ubuntu.service"
ssh root@$IP "echo Environment=HOST=fra1.digitaloceanspaces.com >> /etc/systemd/system/images-ubuntu.service"
ssh root@$IP "echo Environment=HOST_BUCKET=out-of-tree.fra1.digitaloceanspaces.com >> /etc/systemd/system/images-ubuntu.service"
ssh root@$IP "echo Environment=ACCESS_KEY=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} >> /etc/systemd/system/images-ubuntu.service"
ssh root@$IP "echo Environment=SECRET_KEY=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }} >> /etc/systemd/system/images-ubuntu.service"
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-ubuntu-img/generate-images.py' >> /etc/systemd/system/images-ubuntu.service"
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-ubuntu-img/*.tar.gz s3://out-of-tree/3.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-ubuntu.service'
ssh root@$IP "echo TimeoutStopSec=1 >> /etc/systemd/system/images-ubuntu.service"
ssh root@$IP systemctl daemon-reload
ssh root@$IP systemctl start images-ubuntu --no-block
while ! ssh root@$IP systemctl show images-ubuntu -p SubState --value | grep -E '(failed|exited)'
do
sleep 1m
done
scp root@$IP:/var/log/images-ubuntu.log .
cat images-ubuntu.log
ssh root@$IP systemctl is-active images-ubuntu
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: images-ubuntu-log
path: images-ubuntu.log
- name: delete droplet
if: always()
run: doctl compute droplet delete -f ga-out-of-tree-images-ubuntu-$GITHUB_SHA

View File

@ -18,7 +18,7 @@ concurrency:
jobs:
build:
name: Build
runs-on: macOS-12
runs-on: macOS-latest
steps:
- uses: actions/checkout@v1

View File

@ -2,8 +2,6 @@ name: Ubuntu
on:
workflow_dispatch:
schedule:
- cron: '0 0 * * 0'
push:
paths-ignore:
- ".github/workflows/images-*"
@ -115,7 +113,6 @@ jobs:
{ distro: Ubuntu, release: 18.04 },
{ distro: Ubuntu, release: 20.04 },
{ distro: Ubuntu, release: 22.04 },
{ distro: Ubuntu, release: 24.04 },
{ distro: CentOS, release: 6 },
{ distro: CentOS, release: 7 },
{ distro: CentOS, release: 8 },
@ -203,7 +200,7 @@ jobs:
cp ../examples/kernel-module/{module.c,Makefile,test.sh} .
../out-of-tree --log-level=debug kernel list-remote --distro-id=${{ matrix.os.distro }} --distro-release=${{ matrix.os.release }}
../out-of-tree --log-level=debug kernel list-remote --distro=${{ matrix.os.distro }} --ver=${{ matrix.os.release }}
../out-of-tree --log-level=debug kernel autogen --max=1 --shuffle
../out-of-tree --log-level=debug pew --qemu-timeout=20m --include-internal-errors

View File

@ -8,6 +8,8 @@
*out-of-tree* was created to reduce the complexity of the environment for developing, testing and debugging Linux kernel exploits and out-of-tree kernel modules (hence the name "out-of-tree").
![Screenshot](https://cloudflare-ipfs.com/ipfs/Qmb88fgdDjbWkxz91sWsgmoZZNfVThnCtj37u3mF2s3T3T)
## Installation
### GNU/Linux (with [Nix](https://nixos.org/nix/))
@ -40,9 +42,9 @@ Read [documentation](https://out-of-tree.readthedocs.io) for further info.
## Examples
Download all Ubuntu 24.04 kernels:
Generate all Ubuntu 22.04 kernels:
$ out-of-tree kernel genall --distro-id=Ubuntu --distro-release=24.04
$ out-of-tree kernel genall --distro=Ubuntu --ver=22.04
Run tests based on .out-of-tree.toml definitions:
@ -50,8 +52,8 @@ Run tests based on .out-of-tree.toml definitions:
Test with a specific kernel:
$ out-of-tree pew --realtime-output --distro-id=ubuntu --kernel-regex=6.8.0-41-generic
$ out-of-tree pew --kernel='Ubuntu:5.4.0-29-generic'
Run debug environment:
$ out-of-tree debug --distro-id=ubuntu --distro-release=24.04 --kernel-regex=6.8.0-41-generic
$ out-of-tree debug --kernel='Ubuntu:5.4.0-29-generic'

View File

@ -56,8 +56,6 @@ type Job struct {
RepoName string
Commit string
Description string
Artifact artifact.Artifact
Target distro.KernelInfo

View File

@ -12,6 +12,7 @@ import (
"github.com/naoina/toml"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro"
@ -240,9 +241,8 @@ func (ka Artifact) Supported(ki distro.KernelInfo) (supported bool, err error) {
return
}
// TODO too many parameters
func (ka Artifact) Process(slog zerolog.Logger, ki distro.KernelInfo,
outputOnSuccess, realtimeOutput, endless bool, cBinary,
endless bool, cBinary,
cEndlessStress string, cEndlessTimeout time.Duration,
dump func(q *qemu.System, ka Artifact, ki distro.KernelInfo,
result *Result)) {
@ -334,22 +334,12 @@ func (ka Artifact) Process(slog zerolog.Logger, ki distro.KernelInfo,
// TODO: build should return structure
start := time.Now()
result.BuildDir, result.BuildArtifact, result.Build.Output, err =
Build(slog, tmp, ka, ki, ka.Docker.Timeout.Duration, realtimeOutput)
Build(slog, tmp, ka, ki, ka.Docker.Timeout.Duration)
slog.Debug().Str("duration", time.Since(start).String()).
Msg("build done")
if err != nil {
if !realtimeOutput {
slog.Error().Err(err).Msgf("build failure\n%v\n", result.Build.Output)
} else {
slog.Error().Err(err).Msg("build failure")
}
log.Error().Err(err).Msg("build")
return
} else {
if outputOnSuccess && !realtimeOutput {
slog.Info().Msgf("build success\n%v\n", result.Build.Output)
} else {
slog.Info().Msg("build success")
}
}
result.Build.Ok = true
} else {
@ -371,8 +361,6 @@ func (ka Artifact) Process(slog zerolog.Logger, ki distro.KernelInfo,
ka.Qemu.Timeout.Duration = time.Minute
}
slog.Info().Msg("wait for vm initialisation")
err = q.WaitForSSH(ka.Qemu.Timeout.Duration)
if err != nil {
result.InternalError = err
@ -409,40 +397,11 @@ func (ka Artifact) Process(slog zerolog.Logger, ki distro.KernelInfo,
return
}
var qemuTestOutput string
q.SetQemuOutputHandler(func(s string) {
if realtimeOutput {
fmt.Printf("kmsg: %s\n", s)
} else {
qemuTestOutput += s + "\n"
}
})
if realtimeOutput {
q.SetCommandsOutputHandler(func(s string) {
fmt.Printf("test: %s\n", s)
})
}
start := time.Now()
slog.Info().Msg("copy artifact and run test")
copyArtifactAndTest(slog, q, ka, &result, remoteTest, outputOnSuccess, realtimeOutput)
copyArtifactAndTest(slog, q, ka, &result, remoteTest)
slog.Debug().Str("duration", time.Since(start).String()).
Msgf("test completed (success: %v)", result.Test.Ok)
if result.Build.Ok && !realtimeOutput {
if !result.Run.Ok || !result.Test.Ok {
slog.Error().Msgf("qemu output\n%v\n", qemuTestOutput)
} else if outputOnSuccess {
slog.Info().Msgf("qemu output\n%v\n", qemuTestOutput)
}
}
if realtimeOutput {
q.CloseCommandsOutputHandler()
}
q.CloseQemuOutputHandler()
if !endless {
return
}

View File

@ -37,7 +37,7 @@ func preload(q *qemu.System, ki distro.KernelInfo, pm PreloadModule,
var workPath, cache string
if pm.Path != "" {
log.Debug().Msg("Use non-git path for preload module (no cache)")
log.Print("Use non-git path for preload module (no cache)")
workPath = pm.Path
} else if pm.Repo != "" {
workPath, cache, err = cloneOrPull(pm.Repo, ki)
@ -85,7 +85,7 @@ func buildAndInsmod(workPath string, q *qemu.System, ki distro.KernelInfo,
output, err := q.CopyAndInsmod(af)
if err != nil {
log.Error().Err(err).Msg(output)
log.Print(output)
return
}
return
@ -111,7 +111,7 @@ func buildPreload(workPath, tmp string, ki distro.KernelInfo,
dockerTimeout = ka.Docker.Timeout.Duration
}
_, af, _, err = Build(log.Logger, tmp, ka, ki, dockerTimeout, false)
_, af, _, err = Build(log.Logger, tmp, ka, ki, dockerTimeout)
return
}
@ -147,7 +147,7 @@ func cloneOrPull(repo string, ki distro.KernelInfo) (workPath, cache string,
err = w.Pull(&git.PullOptions{})
if err != nil && err != git.NoErrAlreadyUpToDate {
log.Error().Err(err).Msgf("pull %s error", repo)
log.Print(repo, "pull error:", err)
}
} else {
r, err = git.PlainClone(workPath, false, &git.CloneOptions{URL: repo})

View File

@ -103,7 +103,7 @@ func applyPatches(src string, ka Artifact) (err error) {
}
func Build(flog zerolog.Logger, tmp string, ka Artifact,
ki distro.KernelInfo, dockerTimeout time.Duration, realtimeOutput bool) (
ki distro.KernelInfo, dockerTimeout time.Duration) (
outdir, outpath, output string, err error) {
target := strings.Replace(ka.Name, " ", "_", -1)
@ -155,21 +155,9 @@ func Build(flog zerolog.Logger, tmp string, ka Artifact,
log.Fatal().Err(err).Msg("container creation failure")
}
c.Args = append(c.Args, "--network", "none")
if realtimeOutput {
c.SetCommandsOutputHandler(func(s string) {
fmt.Printf("%s\n", s)
})
}
output, err = c.Run(outdir, []string{
buildCommand + " && chmod -R 777 /work",
})
if realtimeOutput {
c.CloseCommandsOutputHandler()
}
} else {
cmd := exec.Command("bash", "-c", "cd "+outdir+" && "+
buildCommand)
@ -291,7 +279,7 @@ func CopyFile(sourcePath, destinationPath string) (err error) {
}
func copyArtifactAndTest(slog zerolog.Logger, q *qemu.System, ka Artifact,
res *Result, remoteTest string, outputOnSuccess, realtimeOutput bool) (err error) {
res *Result, remoteTest string) (err error) {
// Copy all test files to the remote machine
for _, f := range ka.TestFiles {
@ -323,7 +311,8 @@ func copyArtifactAndTest(slog zerolog.Logger, q *qemu.System, ka Artifact,
res.Test.Output, err = testKernelModule(q, ka, remoteTest)
if err != nil {
break
slog.Error().Err(err).Msg(res.Test.Output)
return
}
res.Test.Ok = true
case KernelExploit:
@ -336,36 +325,24 @@ func copyArtifactAndTest(slog zerolog.Logger, q *qemu.System, ka Artifact,
res.Test.Output, err = testKernelExploit(q, ka, remoteTest,
remoteExploit)
if err != nil {
break
slog.Error().Err(err).Msg(res.Test.Output)
return
}
res.Run.Ok = true // does not really used
res.Test.Ok = true
case Script:
res.Test.Output, err = runScript(q, remoteTest)
if err != nil {
break
slog.Error().Err(err).Msg(res.Test.Output)
return
}
slog.Info().Msgf("\n%v\n", res.Test.Output)
res.Run.Ok = true
res.Test.Ok = true
default:
slog.Fatal().Msg("Unsupported artifact type")
}
if err != nil || !res.Test.Ok {
if !realtimeOutput {
slog.Error().Err(err).Msgf("test failure\n%v\n", res.Test.Output)
} else {
slog.Error().Err(err).Msg("test failure")
}
return
}
if outputOnSuccess && !realtimeOutput {
slog.Info().Msgf("test success\n%v\n", res.Test.Output)
} else {
slog.Info().Msg("test success")
}
_, err = q.Command("root", "echo")
if err != nil {
slog.Error().Err(err).Msg("after-test ssh reconnect")

View File

@ -198,11 +198,9 @@ func (c Client) PushRepo(repo api.Repo) (err error) {
remote := fmt.Sprintf("git://%s/%s", addr, repo.Name)
log.Debug().Msgf("git proxy remote: %v", remote)
raw, err := exec.Command("git", "-c", "push.default=current",
"--work-tree", repo.Path, "push", "--force", remote).
raw, err := exec.Command("git", "--work-tree", repo.Path, "push", "--force", remote).
CombinedOutput()
if err != nil {
log.Error().Msgf("push repo %v\n%v", repo, string(raw))
return
}

View File

@ -1,74 +1,39 @@
// Copyright 2024 Mikhail Klementev. All rights reserved.
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"errors"
"fmt"
"os/exec"
"path/filepath"
"strings"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
)
type ContainerCmd struct {
DistroID string `help:"filter by distribution"`
DistroRelease string `help:"filter by distribution release"`
Filter string `help:"filter by name"`
List ContainerListCmd `cmd:"" help:"list containers"`
Update ContainerUpdateCmd `cmd:"" help:"update containers"`
Save ContainerSaveCmd `cmd:"" help:"save containers"`
Cleanup ContainerCleanupCmd `cmd:"" help:"cleanup containers"`
RealtimeOutput RealtimeContainerOutputFlag `help:"show realtime output"`
}
type RealtimeContainerOutputFlag bool
func (f RealtimeContainerOutputFlag) AfterApply() (err error) {
container.Stdout = bool(f)
return
}
func (cmd ContainerCmd) Containers() (diis []container.Image, err error) {
func (cmd ContainerCmd) Containers() (names []string) {
images, err := container.Images()
if err != nil {
return
}
var dt distro.Distro
if cmd.DistroID != "" {
dt.ID, err = distro.NewID(cmd.DistroID)
if err != nil {
return
}
if cmd.DistroRelease != "" {
dt.Release = cmd.DistroRelease
}
} else if cmd.DistroRelease != "" {
err = errors.New("--distro-release has no use on its own")
return
log.Fatal().Err(err).Msg("")
}
for _, img := range images {
if dt.ID != distro.None && dt.ID != img.Distro.ID {
log.Debug().Msgf("skip %s", img.Name)
if cmd.Filter != "" && !strings.Contains(img.Name, cmd.Filter) {
continue
}
if dt.Release != "" && dt.Release != img.Distro.Release {
log.Debug().Msgf("skip %s", img.Name)
continue
}
log.Debug().Msgf("append %s", img.Name)
diis = append(diis, img)
names = append(names, img.Name)
}
return
}
@ -76,40 +41,9 @@ func (cmd ContainerCmd) Containers() (diis []container.Image, err error) {
type ContainerListCmd struct{}
func (cmd ContainerListCmd) Run(containerCmd *ContainerCmd) (err error) {
images, err := containerCmd.Containers()
if err != nil {
return
for _, name := range containerCmd.Containers() {
fmt.Println(name)
}
for _, img := range images {
fmt.Printf("%s\n", img.Distro.String())
}
return
}
type ContainerUpdateCmd struct{}
func (cmd ContainerUpdateCmd) Run(g *Globals, containerCmd *ContainerCmd) (err error) {
images, err := containerCmd.Containers()
if err != nil {
return
}
container.UseCache = false
container.UsePrebuilt = false
// TODO move from all commands to main command line handler
container.Commands = g.Config.Docker.Commands
container.Registry = g.Config.Docker.Registry
container.Timeout = g.Config.Docker.Timeout.Duration
for _, img := range images {
_, err = img.Distro.Packages()
if err != nil {
return
}
}
return
}
@ -118,18 +52,13 @@ type ContainerSaveCmd struct {
}
func (cmd ContainerSaveCmd) Run(containerCmd *ContainerCmd) (err error) {
images, err := containerCmd.Containers()
if err != nil {
return
}
for _, name := range containerCmd.Containers() {
nlog := log.With().Str("name", name).Logger()
for _, img := range images {
nlog := log.With().Str("name", img.Name).Logger()
output := filepath.Join(cmd.OutDir, img.Name+".tar")
output := filepath.Join(cmd.OutDir, name+".tar")
nlog.Info().Msgf("saving to %v", output)
err = container.Save(img.Name, output)
err = container.Save(name, output)
if err != nil {
return
}
@ -152,14 +81,9 @@ func (cmd ContainerSaveCmd) Run(containerCmd *ContainerCmd) (err error) {
type ContainerCleanupCmd struct{}
func (cmd ContainerCleanupCmd) Run(containerCmd *ContainerCmd) (err error) {
images, err := containerCmd.Containers()
if err != nil {
return
}
var output []byte
for _, img := range images {
output, err = exec.Command(container.Runtime, "image", "rm", img.Name).
for _, name := range containerCmd.Containers() {
output, err = exec.Command(container.Runtime, "image", "rm", name).
CombinedOutput()
if err != nil {
log.Error().Err(err).Str("output", string(output)).Msg("")

View File

@ -1,4 +1,4 @@
// Copyright 2024 Mikhail Klementev. All rights reserved.
// Copyright 2018 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
@ -22,11 +22,8 @@ import (
)
type DebugCmd struct {
KernelRegex string `required:"" help:"set kernel regex"`
DistroID string `required:"" help:"set distribution"`
DistroRelease string `required:"" help:"set distribution release"`
Gdb string `help:"gdb listen address" default:"tcp::1234"`
Kernel string `help:"regexp (first match)" required:""`
Gdb string `help:"gdb listen address" default:"tcp::1234"`
SshAddr string `help:"ssh address to listen" default:"127.0.0.1"`
SshPort int `help:"ssh port to listen" default:"50022"`
@ -48,7 +45,7 @@ type DebugCmd struct {
func (cmd *DebugCmd) Run(g *Globals) (err error) {
kcfg, err := config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
log.Error().Err(err).Msg("read kernel config")
log.Print(err)
}
var configPath string
@ -66,17 +63,7 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
ka.SourcePath = g.WorkDir
}
var km artifact.Target
km.Distro.ID, err = distro.NewID(cmd.DistroID)
if err != nil {
return
}
km.Distro.Release = cmd.DistroRelease
km.Kernel.Regex = cmd.KernelRegex
ka.Targets = []artifact.Target{km}
ki, err := firstSupported(kcfg, ka)
ki, err := firstSupported(kcfg, ka, cmd.Kernel)
if err != nil {
return
}
@ -174,14 +161,14 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
// Module depends on one of the standard modules
err = artifact.CopyStandardModules(q, ki)
if err != nil {
log.Error().Err(err).Msg("copy standard modules")
log.Print(err)
return
}
}
err = artifact.PreloadModules(q, ka, ki, g.Config.Docker.Timeout.Duration)
if err != nil {
log.Error().Err(err).Msg("preload modules")
log.Print(err)
return
}
@ -193,10 +180,9 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
return
}
} else {
buildDir, outFile, output, err = artifact.Build(
log.Logger, tmp, ka, ki, g.Config.Docker.Timeout.Duration, false)
buildDir, outFile, output, err = artifact.Build(log.Logger, tmp, ka, ki, g.Config.Docker.Timeout.Duration)
if err != nil {
log.Error().Err(err).Msg(output)
log.Print(err, output)
return
}
@ -220,7 +206,7 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
}
err = q.CopyFile(f.User, f.Local, f.Remote)
if err != nil {
log.Error().Err(err).Msgf("copy %s -> %s", f.Local, f.Remote)
log.Print("error copy err:", err, f.Local, f.Remote)
return
}
}
@ -237,7 +223,15 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
return
}
func firstSupported(kcfg config.KernelConfig, ka artifact.Artifact) (ki distro.KernelInfo, err error) {
func firstSupported(kcfg config.KernelConfig, ka artifact.Artifact,
kernel string) (ki distro.KernelInfo, err error) {
km, err := kernelMask(kernel)
if err != nil {
return
}
ka.Targets = []artifact.Target{km}
for _, ki = range kcfg.Kernels {
var supported bool

View File

@ -11,36 +11,37 @@ import (
"strings"
"time"
"github.com/cavaliergopher/grab/v3"
"github.com/naoina/toml"
"github.com/remeh/sizedwaitgroup"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/cache"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
"code.dumpstack.io/tools/out-of-tree/kernel"
)
type KernelCmd struct {
NoDownload bool `help:"do not download qemu image while kernel generation"`
UseHost bool `help:"also use host kernels"`
Force bool `help:"force reinstall kernel"`
NoHeaders bool `help:"do not install kernel headers"`
Shuffle bool `help:"randomize kernels installation order"`
Retries int `help:"amount of tries for each kernel" default:"2"`
Threads int `help:"threads for parallel installation" default:"1"`
Update bool `help:"update container"`
PrebuiltContainers bool `help:"try prebuilt container images first" default:"true" negatable:""`
Max int `help:"maximum kernels to download" default:"100500"`
NoPrune bool `help:"do not remove dangling or unused images from local storage after build"`
NoCfgRegen bool `help:"do not update kernels.toml"`
NoDownload bool `help:"do not download qemu image while kernel generation"`
UseHost bool `help:"also use host kernels"`
Force bool `help:"force reinstall kernel"`
NoHeaders bool `help:"do not install kernel headers"`
Shuffle bool `help:"randomize kernels installation order"`
Retries int `help:"amount of tries for each kernel" default:"2"`
Threads int `help:"threads for parallel installation" default:"1"`
Update bool `help:"update container"`
ContainerCache bool `help:"try prebuilt container images first" default:"true" negatable:""`
Max int `help:"maximum kernels to download" default:"100500"`
NoPrune bool `help:"do not remove dangling or unused images from local storage after build"`
NoCfgRegen bool `help:"do not update kernels.toml"`
ContainerTimeout time.Duration `help:"container timeout"`
RealtimeOutput RealtimeContainerOutputFlag `help:"show realtime output"`
List KernelListCmd `cmd:"" help:"list kernels"`
ListRemote KernelListRemoteCmd `cmd:"" help:"list remote kernels"`
Autogen KernelAutogenCmd `cmd:"" help:"generate kernels based on the current config"`
@ -167,6 +168,33 @@ func (cmd *KernelCmd) GenKernel(km artifact.Target, pkg string) {
}
}
func (cmd *KernelCmd) fetchContainerCache(c container.Container) {
if !cmd.ContainerCache {
return
}
if c.Exist() {
return
}
tmp, err := fs.TempDir()
if err != nil {
return
}
defer os.RemoveAll(tmp)
resp, err := grab.Get(tmp, cache.ContainerURL(c.Name()))
if err != nil {
return
}
defer os.Remove(resp.Filename)
err = container.Load(resp.Filename, c.Name())
if err == nil {
log.Info().Msgf("use prebuilt container %s", c.Name())
}
}
func (cmd *KernelCmd) Generate(g *Globals, km artifact.Target) (err error) {
defer func() {
if err != nil {
@ -183,8 +211,6 @@ func (cmd *KernelCmd) Generate(g *Globals, km artifact.Target) (err error) {
container.Prune = false
}
container.UsePrebuilt = cmd.PrebuiltContainers
cmd.kcfg, err = config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
log.Debug().Err(err).Msg("read kernels config")
@ -204,6 +230,13 @@ func (cmd *KernelCmd) Generate(g *Globals, km artifact.Target) (err error) {
return
}
c, err := container.New(km.Distro)
if err != nil || cmd.shutdown {
return
}
cmd.fetchContainerCache(c)
pkgs, err := kernel.MatchPackages(km)
if err != nil || cmd.shutdown {
return
@ -230,7 +263,7 @@ func (cmd *KernelCmd) Generate(g *Globals, km artifact.Target) (err error) {
}
if cmd.stats.success >= cmd.Max {
log.Info().Msg("Max is reached")
log.Print("Max is reached")
swg.Done()
break
}
@ -267,8 +300,8 @@ func (cmd *KernelListCmd) Run(g *Globals) (err error) {
}
type KernelListRemoteCmd struct {
DistroID string `required:"" help:"distribution"`
DistroRelease string `help:"distro version"`
Distro string `required:"" help:"distribution"`
Ver string `help:"distro version"`
}
func (cmd *KernelListRemoteCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
@ -279,15 +312,13 @@ func (cmd *KernelListRemoteCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error
container.Prune = false
}
container.UsePrebuilt = kernelCmd.PrebuiltContainers
distroType, err := distro.NewID(cmd.DistroID)
distroType, err := distro.NewID(cmd.Distro)
if err != nil {
return
}
km := artifact.Target{
Distro: distro.Distro{ID: distroType, Release: cmd.DistroRelease},
Distro: distro.Distro{ID: distroType, Release: cmd.Ver},
Kernel: artifact.Kernel{Regex: ".*"},
}
@ -299,6 +330,13 @@ func (cmd *KernelListRemoteCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error
container.Registry = g.Config.Docker.Registry
container.Commands = g.Config.Docker.Commands
c, err := container.New(km.Distro)
if err != nil {
return
}
kernelCmd.fetchContainerCache(c)
pkgs, err := kernel.MatchPackages(km)
// error check skipped on purpose
@ -338,12 +376,12 @@ func (cmd *KernelAutogenCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
}
type KernelGenallCmd struct {
DistroID string `help:"distribution"`
DistroRelease string `help:"distro version"`
Distro string `help:"distribution"`
Ver string `help:"distro version"`
}
func (cmd *KernelGenallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
distroType, err := distro.NewID(cmd.DistroID)
distroType, err := distro.NewID(cmd.Distro)
if err != nil {
return
}
@ -359,7 +397,7 @@ func (cmd *KernelGenallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
continue
}
if cmd.DistroRelease != "" && dist.Release != cmd.DistroRelease {
if cmd.Ver != "" && dist.Release != cmd.Ver {
continue
}
@ -378,13 +416,13 @@ func (cmd *KernelGenallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
}
type KernelInstallCmd struct {
DistroID string `required:"" help:"distribution"`
DistroRelease string `required:"" help:"distro version"`
KernelRegex string `required:"" help:"kernel release mask"`
Distro string `required:"" help:"distribution"`
Ver string `required:"" help:"distro version"`
Kernel string `required:"" help:"kernel release mask"`
}
func (cmd *KernelInstallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
distroType, err := distro.NewID(cmd.DistroID)
distroType, err := distro.NewID(cmd.Distro)
if err != nil {
return
}
@ -392,8 +430,8 @@ func (cmd *KernelInstallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
kernel.SetSigintHandler(&kernelCmd.shutdown)
km := artifact.Target{
Distro: distro.Distro{ID: distroType, Release: cmd.DistroRelease},
Kernel: artifact.Kernel{Regex: cmd.KernelRegex},
Distro: distro.Distro{ID: distroType, Release: cmd.Ver},
Kernel: artifact.Kernel{Regex: cmd.Kernel},
}
err = kernelCmd.Generate(g, km)
if err != nil {

View File

@ -1,4 +1,4 @@
// Copyright 2024 Mikhail Klementev. All rights reserved.
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
@ -42,7 +42,7 @@ func (cmd *LogQueryCmd) Run(g *Globals) (err error) {
ka, kaErr := artifact.Artifact{}.Read(g.WorkDir + "/.out-of-tree.toml")
if kaErr == nil {
log.Debug().Msg(".out-of-tree.toml found, filter by artifact name")
log.Print(".out-of-tree.toml found, filter by artifact name")
les, err = getAllArtifactLogs(db, cmd.Tag, cmd.Num, ka)
} else {
les, err = getAllLogs(db, cmd.Tag, cmd.Num)
@ -212,12 +212,7 @@ func center(s string, w int) string {
}
func genOkFailCentered(name string, ok bool) (aurv aurora.Value) {
if ok {
name += " OK"
} else {
name += " FAIL"
}
name = center(name, 14)
name = center(name, 10)
if ok {
aurv = aurora.BgGreen(aurora.Black(name))
} else {
@ -230,7 +225,7 @@ func logLogEntry(l logEntry) {
distroInfo := fmt.Sprintf("%s-%s {%s}", l.Distro.ID,
l.Distro.Release, l.KernelRelease)
artifactInfo := fmt.Sprintf("%s", l.Name)
artifactInfo := fmt.Sprintf("{[%s] %s}", l.Type, l.Name)
timestamp := l.Timestamp.Format("2006-01-02 15:04")
@ -262,10 +257,7 @@ func logLogEntry(l logEntry) {
additional = "(timeout)"
}
if len(distroInfo) > 40 {
distroInfo = distroInfo[:40]
}
colored := aurora.Sprintf("[%4d %4s] [%s] %s %-40s: %s %s",
colored := aurora.Sprintf("[%4d %4s] [%s] %s %-70s: %s %s",
l.ID, l.Tag, timestamp, artifactInfo, distroInfo, status,
additional)

View File

@ -33,7 +33,7 @@ type PackCmd struct {
func (cmd *PackCmd) Run(g *Globals) (err error) {
tag := fmt.Sprintf("pack_run_%d", time.Now().Unix())
log.Info().Msgf("Tag: %s", tag)
log.Print("Tag:", tag)
files, err := os.ReadDir(g.WorkDir)
if err != nil {
@ -65,7 +65,7 @@ func (cmd *PackCmd) Run(g *Globals) (err error) {
}
}
log.Info().Msg(f.Name())
log.Print(f.Name())
pew := PewCmd{
Max: cmd.KernelRuns,

View File

@ -1,4 +1,4 @@
// Copyright 2024 Mikhail Klementev. All rights reserved.
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
@ -12,7 +12,6 @@ import (
"math/rand"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
@ -65,6 +64,7 @@ func successRate(state runstate) float64 {
type PewCmd struct {
Max int64 `help:"test no more than X kernels" default:"100500"`
Runs int64 `help:"runs per each kernel" default:"1"`
Kernel string `help:"override kernel regex"`
RootFS string `help:"override rootfs image" type:"existingfile"`
Guess bool `help:"try all defined kernels"`
Shuffle bool `help:"randomize kernels test order"`
@ -75,10 +75,6 @@ type PewCmd struct {
Tag string `help:"log tagging"`
Timeout time.Duration `help:"timeout after tool will not spawn new tests"`
KernelRegex string `help:"set kernel regex"`
DistroID string `help:"set distribution"`
DistroRelease string `help:"set distribution release"`
ArtifactConfig string `help:"path to artifact config" type:"path"`
QemuTimeout time.Duration `help:"timeout for qemu"`
@ -87,12 +83,6 @@ type PewCmd struct {
Threshold float64 `help:"reliablity threshold for exit code" default:"1.00"`
IncludeInternalErrors bool `help:"count internal errors as part of the success rate"`
InternalErrorsRetries int `help:"amount of retries on internal errors" default:"3"`
OutputOnSuccess bool `help:"show output on success"`
RealtimeOutput bool `help:"show realtime output"`
LogDir string `help:"write logs to directory"`
Endless bool `help:"endless tests"`
EndlessTimeout time.Duration `help:"timeout between tests" default:"1m"`
@ -168,11 +158,6 @@ func (cmd *PewCmd) Run(g *Globals) (err error) {
cmd.useRemote = g.Remote
cmd.remoteAddr = g.RemoteAddr
if cmd.RealtimeOutput && cmd.Threads != 1 {
log.Warn().Msg("realtime output disables multithreading")
cmd.Threads = 1
}
if cmd.useRemote {
c := client.Client{RemoteAddr: cmd.remoteAddr}
cmd.Kcfg.Kernels, err = c.Kernels()
@ -235,68 +220,32 @@ func (cmd *PewCmd) Run(g *Globals) (err error) {
ka.SourcePath = g.WorkDir
}
if cmd.KernelRegex != "" {
if cmd.Kernel != "" {
var km artifact.Target
km.Kernel.Regex = cmd.KernelRegex
if cmd.DistroID == "" {
err = errors.New("--distro-id is required")
return
}
var dt distro.ID
dt, err = distro.NewID(cmd.DistroID)
km, err = kernelMask(cmd.Kernel)
if err != nil {
return
}
km.Distro.ID = dt
if cmd.DistroRelease != "" {
km.Distro.Release = cmd.DistroRelease
}
ka.Targets = []artifact.Target{km}
} else if cmd.DistroID != "" {
var km artifact.Target
var dt distro.ID
dt, err = distro.NewID(cmd.DistroID)
if err != nil {
return
}
km.Distro.ID = dt
if cmd.DistroRelease != "" {
km.Distro.Release = cmd.DistroRelease
}
ka.Targets = []artifact.Target{km}
} else if cmd.DistroRelease != "" {
err = errors.New("--distro-release has no use on its own")
return
}
if ka.Qemu.Timeout.Duration == 0 {
ka.Qemu.Timeout.Duration = g.Config.Qemu.Timeout.Duration
}
if ka.Docker.Timeout.Duration == 0 {
ka.Docker.Timeout.Duration = g.Config.Docker.Timeout.Duration
}
// TODO there was a lib for merge structures
ka.Qemu.Timeout.Duration = g.Config.Qemu.Timeout.Duration
ka.Docker.Timeout.Duration = g.Config.Docker.Timeout.Duration
if cmd.QemuTimeout != 0 {
log.Info().Msgf("Set qemu timeout to %s", cmd.QemuTimeout)
g.Config.Qemu.Timeout.Duration = cmd.QemuTimeout
ka.Qemu.Timeout.Duration = cmd.QemuTimeout
}
if cmd.DockerTimeout != 0 {
log.Info().Msgf("Set docker timeout to %s", cmd.DockerTimeout)
g.Config.Docker.Timeout.Duration = cmd.DockerTimeout
ka.Docker.Timeout.Duration = cmd.DockerTimeout
}
log.Info().Msgf("Qemu timeout: %s", ka.Qemu.Timeout.Duration)
log.Info().Msgf("Docker timeout: %s", ka.Docker.Timeout.Duration)
if cmd.Tag == "" {
cmd.Tag = fmt.Sprintf("%d", time.Now().Unix())
}
@ -412,54 +361,39 @@ func (cmd PewCmd) testArtifact(swg *sizedwaitgroup.SizedWaitGroup,
defer swg.Done()
var logDirWriter *zerolog.ConsoleWriter
if cmd.LogDir != "" {
logdir := filepath.Join(cmd.LogDir, cmd.Tag)
err := os.MkdirAll(logdir, os.ModePerm)
if err != nil {
log.Error().Err(err).Msgf("mkdir %s", logdir)
return
}
logdir := "logs/" + cmd.Tag
err := os.MkdirAll(logdir, os.ModePerm)
if err != nil {
log.Error().Err(err).Msgf("mkdir %s", logdir)
return
}
logfile := fmt.Sprintf("logs/%s/%s-%s-%s.log",
cmd.Tag,
ki.Distro.ID.String(),
ki.Distro.Release,
ki.KernelRelease,
)
f, err := os.Create(logfile)
if err != nil {
log.Error().Err(err).Msgf("create %s", logfile)
return
}
defer f.Close()
logfile := fmt.Sprintf("logs/%s/%s-%s-%s.log",
cmd.Tag,
ki.Distro.ID.String(),
ki.Distro.Release,
ki.KernelRelease,
)
f, err := os.Create(logfile)
if err != nil {
log.Error().Err(err).Msgf("create %s", logfile)
return
}
defer f.Close()
logDirWriter = &zerolog.ConsoleWriter{
slog := zerolog.New(zerolog.MultiLevelWriter(
&ConsoleWriter,
&FileWriter,
&zerolog.ConsoleWriter{
Out: f,
FieldsExclude: []string{
"distro_release",
"distro_type",
"kernel",
"command",
"workdir",
},
NoColor: true,
}
}
var slog zerolog.Logger
if logDirWriter != nil {
slog = zerolog.New(zerolog.MultiLevelWriter(
&ConsoleWriter,
&FileWriter,
logDirWriter,
))
} else {
slog = zerolog.New(zerolog.MultiLevelWriter(
&ConsoleWriter,
&FileWriter,
))
}
},
))
switch LogLevel {
case zerolog.TraceLevel, zerolog.DebugLevel:
@ -472,33 +406,12 @@ func (cmd PewCmd) testArtifact(swg *sizedwaitgroup.SizedWaitGroup,
Str("kernel", ki.KernelRelease).
Logger()
retriesLeft := cmd.InternalErrorsRetries
var stop bool
for !stop {
ka.Process(slog, ki, cmd.OutputOnSuccess, cmd.RealtimeOutput,
cmd.Endless, cmd.Binary, cmd.EndlessStress, cmd.EndlessTimeout,
func(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo, res *artifact.Result) {
if res.InternalError == nil {
cmd.dumpResult(q, ka, ki, res)
stop = true
return
}
q.Log.Warn().Err(res.InternalError).
Str("panic", fmt.Sprintf("%v", q.KernelPanic)).
Str("timeout", fmt.Sprintf("%v", q.KilledByTimeout)).
Int("retries_left", retriesLeft).
Msg("internal")
if retriesLeft == 0 {
state.InternalErrors += 1
stop = true
return
}
retriesLeft -= 1
},
)
}
ka.Process(slog, ki,
cmd.Endless, cmd.Binary, cmd.EndlessStress, cmd.EndlessTimeout,
func(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo, result *artifact.Result) {
dumpResult(q, ka, ki, result, cmd.Dist, cmd.Tag, cmd.Binary, cmd.DB)
},
)
}
func shuffleKernels(a []distro.KernelInfo) []distro.KernelInfo {
@ -579,6 +492,25 @@ func (cmd PewCmd) performCI(ka artifact.Artifact) (err error) {
return
}
func kernelMask(kernel string) (km artifact.Target, err error) {
parts := strings.Split(kernel, ":")
if len(parts) != 2 {
err = errors.New("kernel is not 'distroType:regex'")
return
}
dt, err := distro.NewID(parts[0])
if err != nil {
return
}
km = artifact.Target{
Distro: distro.Distro{ID: dt},
Kernel: artifact.Kernel{Regex: parts[1]},
}
return
}
func genOkFail(name string, ok bool) (aurv aurora.Value) {
s := " " + name
if name == "" {
@ -594,70 +526,76 @@ func genOkFail(name string, ok bool) (aurv aurora.Value) {
return
}
func (cmd PewCmd) dumpResult(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo, res *artifact.Result) {
state.Overall += 1
func dumpResult(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo,
res *artifact.Result, dist, tag, binary string, db *sql.DB) {
if res.Test.Ok {
state.Success += 1
}
// TODO refactor
colored := ""
switch ka.Type {
case artifact.KernelExploit:
colored = aurora.Sprintf("%s %s",
genOkFail("BUILD", res.Build.Ok),
genOkFail("LPE", res.Test.Ok))
case artifact.KernelModule:
colored = aurora.Sprintf("%s %s %s",
genOkFail("BUILD", res.Build.Ok),
genOkFail("INSMOD", res.Run.Ok),
genOkFail("TEST", res.Test.Ok))
case artifact.Script:
colored = aurora.Sprintf("%s",
genOkFail("", res.Test.Ok))
}
additional := ""
if q.KernelPanic {
additional = "(panic)"
} else if q.KilledByTimeout {
additional = "(timeout)"
}
if additional != "" {
q.Log.Info().Msgf("%v %v", colored, additional)
if res.InternalError != nil {
q.Log.Warn().Err(res.InternalError).
Str("panic", fmt.Sprintf("%v", q.KernelPanic)).
Str("timeout", fmt.Sprintf("%v", q.KilledByTimeout)).
Msg("internal")
res.InternalErrorString = res.InternalError.Error()
state.InternalErrors += 1
} else {
q.Log.Info().Msgf("%v", colored)
colored := ""
state.Overall += 1
if res.Test.Ok {
state.Success += 1
}
switch ka.Type {
case artifact.KernelExploit:
colored = aurora.Sprintf("%s %s",
genOkFail("BUILD", res.Build.Ok),
genOkFail("LPE", res.Test.Ok))
case artifact.KernelModule:
colored = aurora.Sprintf("%s %s %s",
genOkFail("BUILD", res.Build.Ok),
genOkFail("INSMOD", res.Run.Ok),
genOkFail("TEST", res.Test.Ok))
case artifact.Script:
colored = aurora.Sprintf("%s",
genOkFail("", res.Test.Ok))
}
additional := ""
if q.KernelPanic {
additional = "(panic)"
} else if q.KilledByTimeout {
additional = "(timeout)"
}
if additional != "" {
q.Log.Info().Msgf("%v %v", colored, additional)
} else {
q.Log.Info().Msgf("%v", colored)
}
}
err := addToLog(cmd.DB, q, ka, ki, res, cmd.Tag)
err := addToLog(db, q, ka, ki, res, tag)
if err != nil {
q.Log.Error().Err(err).Msgf("[db] addToLog (%v)", ka)
q.Log.Warn().Err(err).Msgf("[db] addToLog (%v)", ka)
}
if cmd.Binary != "" {
return
}
if binary == "" && dist != pathDevNull {
err = os.MkdirAll(dist, os.ModePerm)
if err != nil {
log.Warn().Err(err).Msgf("os.MkdirAll (%v)", ka)
}
if cmd.Dist == pathDevNull { // why?
return
}
path := fmt.Sprintf("%s/%s-%s-%s", dist, ki.Distro.ID,
ki.Distro.Release, ki.KernelRelease)
if ka.Type != artifact.KernelExploit {
path += ".ko"
}
err = os.MkdirAll(cmd.Dist, os.ModePerm)
if err != nil {
log.Error().Err(err).Msgf("os.MkdirAll (%v)", ka)
return
}
path := fmt.Sprintf("%s/%s-%s-%s", cmd.Dist, ki.Distro.ID,
ki.Distro.Release, ki.KernelRelease)
if ka.Type != artifact.KernelExploit {
path += ".ko"
}
err = artifact.CopyFile(res.BuildArtifact, path)
if err != nil {
log.Error().Err(err).Msgf("copy file (%v)", ka)
return
err = artifact.CopyFile(res.BuildArtifact, path)
if err != nil {
log.Warn().Err(err).Msgf("copy file (%v)", ka)
}
}
}

View File

@ -35,12 +35,9 @@ type OutOfTree struct {
Timeout artifact.Duration
Registry string
// Commands that are executed before (prepend) and after (append) the
// base layer of the Dockerfile.
Commands struct {
Prepend []distro.Command
Append []distro.Command
}
// Commands that will be executed before
// the base layer of Dockerfile
Commands []distro.Command
}
}

View File

@ -14,17 +14,13 @@ import (
"path/filepath"
"regexp"
"strings"
"sync"
"time"
"github.com/cavaliergopher/grab/v3"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/cache"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
)
var Runtime = "docker"
@ -33,21 +29,12 @@ var Registry = ""
var Timeout time.Duration
// Commands that are executed before (prepend) and after (append) the
// base layer of the Dockerfile.
var Commands struct {
Prepend []distro.Command
Append []distro.Command
}
var Commands []distro.Command
var UseCache = true
var UsePrebuilt = true
var Prune = true
var Stdout = false
type Image struct {
Name string
Distro distro.Distro
@ -105,25 +92,6 @@ func Load(localpath string, name string) (err error) {
return
}
if strings.Contains(Runtime, "docker") {
var err2 error
cmd = exec.Command(Runtime, "tag", "localhost/"+name, name)
log.Debug().Msgf("%v", cmd)
raw, err2 = cmd.CombinedOutput()
if err2 != nil {
log.Debug().Err(err2).Msg(string(raw))
}
cmd = exec.Command(Runtime, "rmi", "localhost/"+name)
log.Debug().Msgf("%v", cmd)
raw, err2 = cmd.CombinedOutput()
if err2 != nil {
log.Debug().Err(err2).Msg(string(raw))
}
}
return
}
@ -178,15 +146,7 @@ type Container struct {
// Additional arguments
Args []string
// Base of container is local-only
LocalBase bool
Log zerolog.Logger
commandsOutput struct {
listener chan string
mu sync.Mutex
}
}
func New(dist distro.Distro) (c Container, err error) {
@ -245,43 +205,6 @@ func NewFromKernelInfo(ki distro.KernelInfo) (
return
}
// c.SetCommandsOutputHandler(func(s string) { fmt.Println(s) })
// defer c.CloseCommandsOutputHandler()
func (c *Container) SetCommandsOutputHandler(handler func(s string)) {
c.commandsOutput.mu.Lock()
defer c.commandsOutput.mu.Unlock()
c.commandsOutput.listener = make(chan string)
go func(l chan string) {
for m := range l {
if m != "" {
handler(m)
}
}
}(c.commandsOutput.listener)
}
func (c *Container) CloseCommandsOutputHandler() {
c.commandsOutput.mu.Lock()
defer c.commandsOutput.mu.Unlock()
close(c.commandsOutput.listener)
c.commandsOutput.listener = nil
}
func (c *Container) handleCommandsOutput(m string) {
if c.commandsOutput.listener == nil {
return
}
c.commandsOutput.mu.Lock()
defer c.commandsOutput.mu.Unlock()
if c.commandsOutput.listener != nil {
c.commandsOutput.listener <- m
}
}
func (c Container) Name() string {
return c.name
}
@ -308,38 +231,7 @@ func (c Container) Exist() (yes bool) {
return
}
func (c Container) loadPrebuilt() (err error) {
if c.Exist() && UseCache {
return
}
tmp, err := fs.TempDir()
if err != nil {
return
}
defer os.RemoveAll(tmp)
log.Info().Msgf("download prebuilt container %s", c.Name())
resp, err := grab.Get(tmp, cache.ContainerURL(c.Name()))
if err != nil {
return
}
defer os.Remove(resp.Filename)
err = Load(resp.Filename, c.Name())
if err == nil {
log.Info().Msgf("use prebuilt container %s", c.Name())
}
return
}
func (c Container) Build(image string, envs, runs []string) (err error) {
if c.Exist() && UseCache {
return
}
cdir := dotfiles.Dir("containers", c.name)
cfile := filepath.Join(cdir, "Dockerfile")
@ -349,15 +241,9 @@ func (c Container) Build(image string, envs, runs []string) (err error) {
}
cf += image + "\n"
for _, cmd := range Commands.Prepend {
if cmd.Distro.ID != distro.None && cmd.Distro.ID != c.dist.ID {
continue
}
if cmd.Distro.Release != "" && cmd.Distro.Release != c.dist.Release {
continue
}
cf += "RUN " + cmd.Command + "\n"
for _, c := range Commands {
// TODO check for distro type
cf += "RUN " + c.Command + "\n"
}
for _, e := range envs {
@ -368,17 +254,6 @@ func (c Container) Build(image string, envs, runs []string) (err error) {
cf += "RUN " + c + "\n"
}
for _, cmd := range Commands.Append {
if cmd.Distro.ID != distro.None && cmd.Distro.ID != c.dist.ID {
continue
}
if cmd.Distro.Release != "" && cmd.Distro.Release != c.dist.Release {
continue
}
cf += "RUN " + cmd.Command + "\n"
}
buf, err := os.ReadFile(cfile)
if err != nil {
err = os.WriteFile(cfile, []byte(cf), os.ModePerm)
@ -402,17 +277,10 @@ func (c Container) Build(image string, envs, runs []string) (err error) {
c.Log.Info().Msg("build")
}
if UsePrebuilt {
err = c.loadPrebuilt()
}
if err != nil || !UsePrebuilt {
var output string
output, err = c.build(cdir)
if err != nil {
c.Log.Error().Err(err).Msg(output)
return
}
output, err := c.build(cdir)
if err != nil {
c.Log.Error().Err(err).Msg(output)
return
}
c.Log.Info().Msg("success")
@ -431,10 +299,7 @@ func (c Container) build(imagePath string) (output string, err error) {
args := []string{"build"}
if !UseCache {
if !c.LocalBase {
args = append(args, "--pull")
}
args = append(args, "--no-cache")
args = append(args, "--pull", "--no-cache")
}
args = append(args, "-t", c.name, imagePath)
@ -459,10 +324,6 @@ func (c Container) build(imagePath string) (output string, err error) {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
if Stdout {
fmt.Println(m)
}
c.handleCommandsOutput(m)
output += m + "\n"
flog.Trace().Str("stdout", m).Msg("")
}
@ -472,7 +333,7 @@ func (c Container) build(imagePath string) (output string, err error) {
return
}
func (c *Container) Run(workdir string, cmds []string) (out string, err error) {
func (c Container) Run(workdir string, cmds []string) (out string, err error) {
flog := c.Log.With().
Str("workdir", workdir).
Str("command", fmt.Sprintf("%v", cmds)).
@ -536,17 +397,16 @@ func (c *Container) Run(workdir string, cmds []string) (out string, err error) {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
if Stdout {
fmt.Println(m)
}
c.handleCommandsOutput(m)
out += m + "\n"
flog.Trace().Str("container stdout", m).Msg("")
flog.Trace().Str("stdout", m).Msg("")
}
}()
err = cmd.Wait()
if err != nil {
e := fmt.Sprintf("error `%v` for cmd `%v` with output `%v`",
err, cmds, out)
err = errors.New(e)
return
}

View File

@ -18,7 +18,6 @@ func createJobTable(db *sql.DB) (err error) {
group_uuid TEXT,
repo TEXT,
"commit" TEXT,
description TEXT,
config TEXT,
target TEXT,
created INT,
@ -31,8 +30,8 @@ func createJobTable(db *sql.DB) (err error) {
func AddJob(db *sql.DB, job *api.Job) (err error) {
stmt, err := db.Prepare(`INSERT INTO job (updated, uuid, group_uuid, repo, "commit", ` +
`description, config, target, created, started, finished) ` +
`VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11);`)
`config, target, created, started, finished) ` +
`VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10);`)
if err != nil {
return
}
@ -54,7 +53,7 @@ func AddJob(db *sql.DB, job *api.Job) (err error) {
target := tbuf.Bytes()
res, err := stmt.Exec(time.Now().Unix(), job.UUID, job.Group,
job.RepoName, job.Commit, job.Description, config, target,
job.RepoName, job.Commit, config, target,
job.Created.Unix(), job.Started.Unix(),
job.Finished.Unix(),
)
@ -69,10 +68,10 @@ func AddJob(db *sql.DB, job *api.Job) (err error) {
func UpdateJob(db *sql.DB, job *api.Job) (err error) {
stmt, err := db.Prepare(`UPDATE job ` +
`SET updated=$1, uuid=$2, group_uuid=$3, repo=$4, ` +
`"commit"=$5, description=$6, config=$7, target=$8, ` +
`created=$9, started=$10, finished=$11, ` +
`status=$12 ` +
`WHERE id=$13`)
`"commit"=$5, config=$6, target=$7, ` +
`created=$8, started=$9, finished=$10, ` +
`status=$11 ` +
`WHERE id=$12`)
if err != nil {
return
}
@ -93,7 +92,7 @@ func UpdateJob(db *sql.DB, job *api.Job) (err error) {
target := tbuf.Bytes()
_, err = stmt.Exec(time.Now().Unix(), job.UUID, job.Group,
job.RepoName, job.Commit, job.Description,
job.RepoName, job.Commit,
config, target,
job.Created.Unix(), job.Started.Unix(),
job.Finished.Unix(), job.Status, job.ID)
@ -104,8 +103,7 @@ func scanJob(scan func(dest ...any) error) (job api.Job, err error) {
var config, target []byte
var updated, created, started, finished int64
err = scan(&job.ID, &updated, &job.UUID, &job.Group,
&job.RepoName, &job.Commit, &job.Description,
&config, &target,
&job.RepoName, &job.Commit, &config, &target,
&created, &started, &finished, &job.Status)
if err != nil {
return
@ -132,7 +130,7 @@ func scanJob(scan func(dest ...any) error) (job api.Job, err error) {
func Jobs(db *sql.DB, where string, args ...any) (jobs []api.Job, err error) {
q := `SELECT id, updated, uuid, group_uuid, ` +
`repo, "commit", description, config, target, created, ` +
`repo, "commit", config, target, created, ` +
`started, finished, status FROM job`
if len(where) != 0 {
q += ` WHERE ` + where
@ -165,7 +163,7 @@ func Jobs(db *sql.DB, where string, args ...any) (jobs []api.Job, err error) {
func Job(db *sql.DB, uuid string) (job api.Job, err error) {
stmt, err := db.Prepare(`SELECT id, updated, uuid, ` +
`group_uuid, ` +
`repo, "commit", description, config, target, ` +
`repo, "commit", config, target, ` +
`created, started, finished, status ` +
`FROM job WHERE uuid=$1`)
if err != nil {

View File

@ -131,7 +131,7 @@ func (pj *jobProcessor) Process(res *Resources) (err error) {
var result *artifact.Result
var dq *qemu.System
pj.job.Artifact.Process(pj.log, pj.job.Target, false, false, false, "", "", 0,
pj.job.Artifact.Process(pj.log, pj.job.Target, false, "", "", 0,
func(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo,
res *artifact.Result) {

View File

@ -37,9 +37,12 @@ func (centos CentOS) Packages() (pkgs []string, err error) {
return
}
err = c.Build("centos:"+centos.release, centos.envs(), centos.runs())
if err != nil {
return
if !c.Exist() {
err = c.Build("centos:"+centos.release,
centos.envs(), centos.runs())
if err != nil {
return
}
}
cmd := "yum search kernel --showduplicates 2>/dev/null " +
@ -79,41 +82,36 @@ func (centos CentOS) runs() (commands []string) {
// TODO refactor
switch centos.release {
case "6":
repofmt := "[6.%d-%s]\\n" +
"name=CentOS-6.%d - %s\\n" +
"baseurl=https://vault.centos.org/6.%d/%s/$basearch/\\n" +
"gpgcheck=0"
repofmt := "[6.%d-%s]\\nbaseurl=https://vault.centos.org/6.%d/%s/$basearch/\\ngpgcheck=0"
for i := 0; i <= 10; i++ {
repos = append(repos, fmt.Sprintf(repofmt, i, "os", i, "os", i, "os"))
repos = append(repos, fmt.Sprintf(repofmt, i, "updates", i, "updates", i, "updates"))
repos = append(repos, fmt.Sprintf(repofmt, i, "os", i, "os"))
repos = append(repos, fmt.Sprintf(repofmt, i, "updates", i, "updates"))
}
cmdf("rm /etc/yum.repos.d/*")
case "7":
repofmt := "[%s-%s]\\n" +
"name=CentOS-%s - %s\\n" +
"baseurl=https://vault.centos.org/%s/%s/$basearch/\\n" +
"gpgcheck=0"
repofmt := "[%s-%s]\\nbaseurl=https://vault.centos.org/%s/%s/$basearch/\\ngpgcheck=0"
for _, ver := range []string{
"7.0.1406", "7.1.1503", "7.2.1511",
"7.3.1611", "7.4.1708", "7.5.1804",
"7.6.1810", "7.7.1908", "7.8.2003",
"7.9.2009",
} {
repos = append(repos, fmt.Sprintf(repofmt, ver, "os", ver, "os", ver, "os"))
repos = append(repos, fmt.Sprintf(repofmt, ver, "updates", ver, "updates", ver, "updates"))
repos = append(repos, fmt.Sprintf(repofmt, ver, "os", ver, "os"))
repos = append(repos, fmt.Sprintf(repofmt, ver, "updates", ver, "updates"))
}
// FIXME http/gpgcheck=0
repofmt = "[%s-%s]\\nbaseurl=http://mirror.centos.org/centos-7/%s/%s/$basearch/\\ngpgcheck=0"
repos = append(repos, fmt.Sprintf(repofmt, "7.9.2009", "os", "7.9.2009", "os"))
repos = append(repos, fmt.Sprintf(repofmt, "7.9.2009", "updates", "7.9.2009", "updates"))
case "8":
repofmt := "[%s-%s]\\n" +
"name=CentOS-%s - %s\\n" +
"baseurl=https://vault.centos.org/%s/%s/$basearch/os/\\n" +
"gpgcheck=0"
repofmt := "[%s-%s]\\nbaseurl=https://vault.centos.org/%s/%s/$basearch/os/\\ngpgcheck=0"
for _, ver := range []string{
"8.0.1905", "8.1.1911", "8.2.2004",
"8.3.2011", "8.4.2105", "8.5.2111",
} {
repos = append(repos, fmt.Sprintf(repofmt, ver, "baseos", ver, "baseos", ver, "BaseOS"))
repos = append(repos, fmt.Sprintf(repofmt, ver, "appstream", ver, "appstream", ver, "AppStream"))
repos = append(repos, fmt.Sprintf(repofmt, ver, "baseos", ver, "BaseOS"))
repos = append(repos, fmt.Sprintf(repofmt, ver, "appstream", ver, "AppStream"))
}
default:
log.Fatal().Msgf("no support for centos %s", centos.release)
@ -121,7 +119,6 @@ func (centos CentOS) runs() (commands []string) {
}
cmdf("sed -i 's/enabled=1/enabled=0/' /etc/yum.repos.d/* || true")
cmdf("sed -i 's/name/enabled=0\\nname/' /etc/yum.repos.d/* || true")
for _, repo := range repos {
cmdf("echo -e '%s' >> /etc/yum.repos.d/oot.repo\n", repo)

View File

@ -54,9 +54,11 @@ func (d Debian) Packages() (packages []string, err error) {
return
}
err = c.Build(d.image(), d.envs(), d.runs())
if err != nil {
return
if !c.Exist() {
err = c.Build(d.image(), d.envs(), d.runs())
if err != nil {
return
}
}
kernels, err := GetKernels()

View File

@ -57,8 +57,6 @@ func (suse OpenSUSE) Packages() (pkgs []string, err error) {
if err != nil {
return
}
c.LocalBase = true
} else if strings.HasPrefix(suse.release, "13") {
name = "opensuse:13"
cnturl := cache.ContainerURL("openSUSE-13.2")
@ -66,17 +64,17 @@ func (suse OpenSUSE) Packages() (pkgs []string, err error) {
if err != nil {
return
}
c.LocalBase = true
} else if strings.HasPrefix(suse.release, "42") {
name = "opensuse/leap:42"
} else if strings.HasPrefix(suse.release, "15") {
name = "opensuse/leap:" + suse.release
}
err = c.Build(name, suse.envs(), suse.runs())
if err != nil {
return
if !c.Exist() {
err = c.Build(name, suse.envs(), suse.runs())
if err != nil {
return
}
}
cmd := "zypper search -s --match-exact kernel-default | grep x86_64 " +
@ -87,32 +85,7 @@ func (suse OpenSUSE) Packages() (pkgs []string, err error) {
return
}
// TODO Find a way for non-interactive installation of
// retracted kernels
retracted := []string{
"5.14.21-150400.24.49.3",
"5.14.21-150400.24.84.1",
"5.14.21-150500.55.22.1",
"5.3.18-150300.59.81.1",
"5.3.18-59.30.1",
"5.3.18-lp152.98.1",
}
for _, k := range strings.Fields(output) {
skip := false
for _, rk := range retracted {
if rk == k {
skip = true
break
}
}
if skip {
continue
}
pkgs = append(pkgs, k)
}
pkgs = append(pkgs, strings.Fields(output)...)
return
}

View File

@ -38,9 +38,12 @@ func (ol OracleLinux) Packages() (pkgs []string, err error) {
return
}
err = c.Build("oraclelinux:"+ol.release, ol.envs(), ol.runs())
if err != nil {
return
if !c.Exist() {
err = c.Build("oraclelinux:"+ol.release,
ol.envs(), ol.runs())
if err != nil {
return
}
}
if ol.release == "8" {

View File

@ -17,7 +17,6 @@ func init() {
"18.04",
"20.04",
"22.04",
"24.04",
}
for _, release := range releases {
@ -43,9 +42,11 @@ func (u Ubuntu) Packages() (pkgs []string, err error) {
return
}
err = c.Build("ubuntu:"+u.release, u.envs(), u.runs())
if err != nil {
return
if !c.Exist() {
err = c.Build("ubuntu:"+u.release, u.envs(), u.runs())
if err != nil {
return
}
}
cmd := "apt-cache search " +

View File

@ -34,7 +34,7 @@ func GenHostKernels(download bool) (kernels []distro.KernelInfo, err error) {
rawOutput, err := cmd.CombinedOutput()
if err != nil {
log.Error().Err(err).Msg(string(rawOutput))
log.Print(string(rawOutput), err)
return
}
@ -86,7 +86,7 @@ func GenHostKernels(download bool) (kernels []distro.KernelInfo, err error) {
}
vmlinux := "/usr/lib/debug/boot/vmlinux-" + krel
log.Info().Msgf("vmlinux %s", vmlinux)
log.Print("vmlinux", vmlinux)
if fs.PathExists(vmlinux) {
ki.VmlinuxPath = vmlinux
}

View File

@ -35,13 +35,13 @@ type CLI struct {
cmd.Globals
Pew cmd.PewCmd `cmd:"" help:"build, run, and test module/exploit"`
Kernel cmd.KernelCmd `cmd:"" aliases:"kernels" help:"manipulate kernels"`
Kernel cmd.KernelCmd `cmd:"" help:"manipulate kernels"`
Debug cmd.DebugCmd `cmd:"" help:"debug environment"`
Log cmd.LogCmd `cmd:"" help:"query logs"`
Pack cmd.PackCmd `cmd:"" help:"exploit pack test"`
Gen cmd.GenCmd `cmd:"" help:"generate .out-of-tree.toml skeleton"`
Image cmd.ImageCmd `cmd:"" aliases:"images" help:"manage images"`
Container cmd.ContainerCmd `cmd:"" aliases:"containers" help:"manage containers"`
Image cmd.ImageCmd `cmd:"" help:"manage images"`
Container cmd.ContainerCmd `cmd:"" help:"manage containers"`
Distro cmd.DistroCmd `cmd:"" help:"distro-related helpers"`
Daemon cmd.DaemonCmd `cmd:"" help:"run daemon"`

View File

@ -101,16 +101,6 @@ type System struct {
Stdout, Stderr string
qemuOutput struct {
listener chan string
mu sync.Mutex
}
commandsOutput struct {
listener chan string
mu sync.Mutex
}
// accessible after qemu is closed
exitErr error
@ -148,80 +138,6 @@ func NewSystem(arch arch, kernel Kernel, drivePath string) (q *System, err error
return
}
// q.SetQemuOutputHandler(func(s string) { fmt.Println(s) })
// defer q.CloseQemuOutputHandler()
func (q *System) SetQemuOutputHandler(handler func(s string)) {
q.qemuOutput.mu.Lock()
defer q.qemuOutput.mu.Unlock()
q.qemuOutput.listener = make(chan string)
go func(l chan string) {
for m := range l {
if m != "" {
handler(m)
}
}
}(q.qemuOutput.listener)
}
func (q *System) CloseQemuOutputHandler() {
q.qemuOutput.mu.Lock()
defer q.qemuOutput.mu.Unlock()
close(q.qemuOutput.listener)
q.qemuOutput.listener = nil
}
func (q *System) handleQemuOutput(m string) {
if q.qemuOutput.listener == nil {
return
}
q.qemuOutput.mu.Lock()
defer q.qemuOutput.mu.Unlock()
if q.qemuOutput.listener != nil {
q.qemuOutput.listener <- m
}
}
// q.SetCommandsOutputHandler(func(s string) { fmt.Println(s) })
// defer q.CloseCommandsOutputHandler()
func (q *System) SetCommandsOutputHandler(handler func(s string)) {
q.commandsOutput.mu.Lock()
defer q.commandsOutput.mu.Unlock()
q.commandsOutput.listener = make(chan string)
go func(l chan string) {
for m := range l {
if m != "" {
handler(m)
}
}
}(q.commandsOutput.listener)
}
func (q *System) CloseCommandsOutputHandler() {
q.commandsOutput.mu.Lock()
defer q.commandsOutput.mu.Unlock()
close(q.commandsOutput.listener)
q.commandsOutput.listener = nil
}
func (q *System) handleCommandsOutput(m string) {
if q.commandsOutput.listener == nil {
return
}
q.commandsOutput.mu.Lock()
defer q.commandsOutput.mu.Unlock()
if q.commandsOutput.listener != nil {
q.commandsOutput.listener <- m
}
}
func (q *System) SetSSHAddrPort(addr string, port int) (err error) {
// TODO validate
q.SSH.AddrPort = fmt.Sprintf("%s:%d", addr, port)
@ -396,8 +312,7 @@ func (q *System) Start() (err error) {
go func() {
scanner := bufio.NewScanner(q.pipe.stdout)
for scanner.Scan() {
m := strings.TrimSpace(scanner.Text())
q.handleQemuOutput(m)
m := scanner.Text()
q.Stdout += m + "\n"
q.Log.Trace().Str("stdout", m).Msg("qemu")
go q.checkOopsPanic(m)
@ -407,8 +322,7 @@ func (q *System) Start() (err error) {
go func() {
scanner := bufio.NewScanner(q.pipe.stderr)
for scanner.Scan() {
m := strings.TrimSpace(scanner.Text())
q.handleQemuOutput(m)
m := scanner.Text()
q.Stderr += m + "\n"
q.Log.Trace().Str("stderr", m).Msg("qemu")
}
@ -428,7 +342,6 @@ func (q *System) Start() (err error) {
if q.Timeout != 0 {
go func() {
q.Log.Debug().Msgf("qemu wait for %s before kill", q.Timeout)
time.Sleep(q.Timeout)
q.KilledByTimeout = true
q.Stop()
@ -560,8 +473,7 @@ func (q System) Command(user, cmd string) (output string, err error) {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := strings.TrimSpace(scanner.Text())
q.handleCommandsOutput(m)
m := scanner.Text()
output += m + "\n"
flog.Trace().Str("stdout", m).Msg("qemu command")
}
@ -574,8 +486,7 @@ func (q System) Command(user, cmd string) (output string, err error) {
scanner := bufio.NewScanner(stderr)
for scanner.Scan() {
m := strings.TrimSpace(scanner.Text())
q.handleCommandsOutput(m)
m := scanner.Text()
output += m + "\n"
// Note: it prints stderr as stdout
flog.Trace().Str("stdout", m).Msg("qemu command")

View File

@ -28,8 +28,6 @@ RUN sed -i '/PermitEmptyPasswords/d' $TMPDIR/etc/ssh/sshd_config
RUN echo PermitEmptyPasswords yes >> $TMPDIR/etc/ssh/sshd_config
RUN sed -i '/PermitRootLogin/d' $TMPDIR/etc/ssh/sshd_config
RUN echo PermitRootLogin yes >> $TMPDIR/etc/ssh/sshd_config
RUN sed -i '/UseDNS/d' $TMPDIR/etc/ssh/sshd_config
RUN echo UseDNS no >> $TMPDIR/etc/ssh/sshd_config
# network workaround
RUN chmod +x $TMPDIR/etc/rc.local

View File

@ -4,6 +4,6 @@ set -eux
cd "$(dirname "$0")"
sudo podman build -t gen-centos6-image .
sudo podman run --privileged -v $(pwd):/shared -t gen-centos6-image
sudo docker build -t gen-centos6-image .
sudo docker run --privileged -v $(pwd):/shared -t gen-centos6-image
tar -Szcf out_of_tree_centos_6.img.tar.gz out_of_tree_centos_6.img

View File

@ -13,11 +13,6 @@
#
FROM centos:7
RUN sed -i 's/enabled=1/enabled=0/' /etc/yum.repos.d/* || true
RUN sed -i 's/name/enabled=0\nname/' /etc/yum.repos.d/* || true
RUN echo -e '[7.9.2009-os]\nbaseurl=https://vault.centos.org/7.9.2009/os/$basearch/\ngpgcheck=0' >> /etc/yum.repos.d/oot.repo
RUN echo -e '[7.9.2009-updates]\nbaseurl=https://vault.centos.org/7.9.2009/updates/$basearch/\ngpgcheck=0' >> /etc/yum.repos.d/oot.repo
RUN yum -y update
RUN yum -y groupinstall "Development Tools"
RUN yum -y install qemu-img e2fsprogs
@ -26,13 +21,13 @@ ENV TMPDIR=/tmp/centos
RUN yum --installroot=$TMPDIR \
--releasever=7 \
--disablerepo='*' \
--enablerepo=base \
-y groupinstall Base
RUN rm $TMPDIR/etc/yum.repos.d/*
RUN cp /etc/yum.repos.d/* $TMPDIR/etc/yum.repos.d/
RUN yum --installroot=$TMPDIR \
--releasever=7 \
--disablerepo='*' \
--enablerepo=base \
-y install openssh-server openssh-clients
RUN chroot $TMPDIR /bin/sh -c 'useradd -m user'
@ -42,8 +37,6 @@ RUN sed -i '/PermitEmptyPasswords/d' $TMPDIR/etc/ssh/sshd_config
RUN echo PermitEmptyPasswords yes >> $TMPDIR/etc/ssh/sshd_config
RUN sed -i '/PermitRootLogin/d' $TMPDIR/etc/ssh/sshd_config
RUN echo PermitRootLogin yes >> $TMPDIR/etc/ssh/sshd_config
RUN sed -i '/UseDNS/d' $TMPDIR/etc/ssh/sshd_config
RUN echo UseDNS no >> $TMPDIR/etc/ssh/sshd_config
# network workaround
RUN chmod +x $TMPDIR/etc/rc.local

View File

@ -1,9 +0,0 @@
#!/bin/sh
set -eux
cd "$(dirname "$0")"
sudo podman build -t gen-centos7-image .
sudo podman run --privileged -v $(pwd):/shared -t gen-centos7-image
tar -Szcf out_of_tree_centos_7.img.tar.gz out_of_tree_centos_7.img

View File

@ -28,8 +28,6 @@ RUN sed -i '/PermitEmptyPasswords/d' $TMPDIR/etc/ssh/sshd_config
RUN echo PermitEmptyPasswords yes >> $TMPDIR/etc/ssh/sshd_config
RUN sed -i '/PermitRootLogin/d' $TMPDIR/etc/ssh/sshd_config
RUN echo PermitRootLogin yes >> $TMPDIR/etc/ssh/sshd_config
RUN sed -i '/UseDNS/d' $TMPDIR/etc/ssh/sshd_config
RUN echo UseDNS no >> $TMPDIR/etc/ssh/sshd_config
# network workaround
RUN chmod +x $TMPDIR/etc/rc.local

View File

@ -4,6 +4,6 @@ set -eux
cd "$(dirname "$0")"
sudo podman build -t gen-centos8-image .
sudo podman run --privileged -v $(pwd):/shared -t gen-centos8-image
sudo docker build -t gen-centos8-image .
sudo docker run --privileged -v $(pwd):/shared -t gen-centos8-image
tar -Szcf out_of_tree_centos_8.img.tar.gz out_of_tree_centos_8.img

View File

@ -11,8 +11,6 @@ sed -i '/PermitEmptyPasswords/d' $TMPDIR/etc/ssh/sshd_config
echo PermitEmptyPasswords yes >> $TMPDIR/etc/ssh/sshd_config
sed -i '/PermitRootLogin/d' $TMPDIR/etc/ssh/sshd_config
echo PermitRootLogin yes >> $TMPDIR/etc/ssh/sshd_config
sed -i '/UseDNS/d' $TMPDIR/etc/ssh/sshd_config
echo UseDNS no >> $TMPDIR/etc/ssh/sshd_config
echo '#!/bin/sh' > $TMPDIR/etc/rc.local
echo 'dhclient' >> $TMPDIR/etc/rc.local

View File

@ -13,7 +13,6 @@ RUN yum --installroot=$TMPDIR \
--releasever=_VERSION_ \
--disablerepo='*' \
--enablerepo=ol_VERSION__baseos_latest \
--enablerepo=ol_VERSION__appstream \
-y groupinstall Base
RUN cp /etc/yum.repos.d/* $TMPDIR/etc/yum.repos.d/
@ -22,7 +21,6 @@ RUN yum --installroot=$TMPDIR \
--releasever=_VERSION_ \
--disablerepo='*' \
--enablerepo=ol_VERSION__baseos_latest \
--enablerepo=ol_VERSION__appstream \
-y install openssh-server openssh-clients dhclient yum
RUN chroot $TMPDIR /bin/sh -c 'useradd -m user'
@ -32,8 +30,6 @@ RUN sed -i '/PermitEmptyPasswords/d' $TMPDIR/etc/ssh/sshd_config
RUN echo PermitEmptyPasswords yes >> $TMPDIR/etc/ssh/sshd_config
RUN sed -i '/PermitRootLogin/d' $TMPDIR/etc/ssh/sshd_config
RUN echo PermitRootLogin yes >> $TMPDIR/etc/ssh/sshd_config
RUN sed -i '/UseDNS/d' $TMPDIR/etc/ssh/sshd_config
RUN echo UseDNS no >> $TMPDIR/etc/ssh/sshd_config
# network workaround
RUN chmod +x $TMPDIR/etc/rc.local

View File

@ -11,11 +11,9 @@ for version in 6 7 8 9; do
if [[ $version -eq 6 ]]; then
sed -i 's/baseos_latest/u10_base/' $version/Dockerfile
sed -i '/appstream/d' $version/Dockerfile
fi
if [[ $version -eq 7 ]]; then
sed -i 's/baseos_latest/u9_base/' $version/Dockerfile
sed -i '/appstream/d' $version/Dockerfile
fi
podman build -t gen-oraclelinux${version}-image $version

View File

@ -0,0 +1,35 @@
# Copyright 2018 Mikhail Klementev. All rights reserved.
# Use of this source code is governed by a AGPLv3 license
# (or later) that can be found in the LICENSE file.
#
# Usage:
#
# $ docker build -t gen-ubuntu1404-image .
# $ docker run --privileged -v $(pwd):/shared -t gen-ubuntu1404-image
#
# ubuntu1404.img will be created in current directory. You can change $(pwd) to
# different directory to use different destination for image.
#
FROM ubuntu:14.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update
RUN apt-get install -y debootstrap qemu
ENV TMPDIR=/tmp/ubuntu
ENV IMAGEDIR=/tmp/image
ENV IMAGE=/shared/out_of_tree_ubuntu_14__04.img
ENV REPOSITORY=http://archive.ubuntu.com/ubuntu
ENV RELEASE=trusty
RUN mkdir $IMAGEDIR
# Must be executed with --privileged because of /dev/loop
CMD debootstrap --include=openssh-server,policykit-1 \
$RELEASE $TMPDIR $REPOSITORY && \
/shared/setup.sh $TMPDIR && \
qemu-img create $IMAGE 2G && \
mkfs.ext4 -F $IMAGE && \
mount -o loop $IMAGE $IMAGEDIR && \
cp -a $TMPDIR/* $IMAGEDIR/ && \
umount $IMAGEDIR

View File

@ -0,0 +1,17 @@
#!/bin/sh -eux
# Copyright 2018 Mikhail Klementev. All rights reserved.
# Use of this source code is governed by a AGPLv3 license
# (or later) that can be found in the LICENSE file.
TMPDIR=$1
chroot $TMPDIR /bin/sh -c 'useradd -m user'
sed -i 's/root:\*:/root::/' $TMPDIR/etc/shadow
sed -i 's/user:!!:/user::/' $TMPDIR/etc/shadow
echo auth sufficient pam_permit.so > $TMPDIR/etc/pam.d/sshd
sed -i '/PermitEmptyPasswords/d' $TMPDIR/etc/ssh/sshd_config
echo PermitEmptyPasswords yes >> $TMPDIR/etc/ssh/sshd_config
sed -i '/PermitRootLogin/d' $TMPDIR/etc/ssh/sshd_config
echo PermitRootLogin yes >> $TMPDIR/etc/ssh/sshd_config
echo '#!/bin/sh' > $TMPDIR/etc/rc.local
echo 'dhclient eth0' >> $TMPDIR/etc/rc.local
chmod +x $TMPDIR/etc/rc.local

View File

@ -1,73 +0,0 @@
#!/usr/bin/env python3
import os
import subprocess
script_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(script_dir)
releases = [
('12.04', 'precise', 'http://old-releases.ubuntu.com/ubuntu'),
('14.04', 'trusty', 'http://archive.ubuntu.com/ubuntu'),
('16.04', 'xenial', 'http://archive.ubuntu.com/ubuntu'),
('18.04', 'bionic', 'http://archive.ubuntu.com/ubuntu'),
('20.04', 'focal', 'http://archive.ubuntu.com/ubuntu'),
('22.04', 'jammy', 'http://archive.ubuntu.com/ubuntu'),
('24.04', 'noble', 'http://archive.ubuntu.com/ubuntu')
]
template = '''
FROM ubuntu:{version}
RUN sed -i 's;http://archive.ubuntu.com/ubuntu;{repository};' /etc/apt/sources.list
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update
RUN apt-get install -y debootstrap qemu-utils
RUN apt-get install -y linux-image-generic
ENV TMPDIR=/tmp/ubuntu
ENV IMAGEDIR=/tmp/image
ENV IMAGE=/shared/out_of_tree_ubuntu_{img_version}.img
ENV REPOSITORY={repository}
ENV RELEASE={codename}
RUN mkdir $IMAGEDIR
# Must be executed with --privileged because of /dev/loop
CMD debootstrap --include=openssh-server,policykit-1 \
$RELEASE $TMPDIR $REPOSITORY && \
/shared/setup.sh $TMPDIR && \
qemu-img create $IMAGE 2G && \
mkfs.ext4 -F $IMAGE && \
mount -o loop $IMAGE $IMAGEDIR && \
cp -a $TMPDIR/* $IMAGEDIR/ && \
umount $IMAGEDIR
'''
def run_cmd(cmd):
print(f"+ {cmd}")
subprocess.run(cmd, shell=True, check=True, executable='/bin/bash')
for version, codename, repository in releases:
numeric_version = version.replace('.', '')
img_version=version.replace(".","__")
dockerfile_content = template.format(
version=version,
img_version=img_version,
codename=codename,
repository=repository,
numeric_version=numeric_version)
os.makedirs(str(version), exist_ok=True)
with open(f"{version}/Dockerfile", "w") as dockerfile:
dockerfile.write(dockerfile_content)
run_cmd(f"podman build -t gen-ubuntu{numeric_version}-image {version}")
run_cmd(f"rm -rf {version}")
run_cmd(f"podman run --privileged -v {os.getcwd()}:/shared -t gen-ubuntu{numeric_version}-image")
run_cmd(f"tar -Szcf out_of_tree_ubuntu_{img_version}.img.tar.gz out_of_tree_ubuntu_{img_version}.img")

View File

@ -11,9 +11,7 @@ sed -i '/PermitEmptyPasswords/d' $TMPDIR/etc/ssh/sshd_config
echo PermitEmptyPasswords yes >> $TMPDIR/etc/ssh/sshd_config
sed -i '/PermitRootLogin/d' $TMPDIR/etc/ssh/sshd_config
echo PermitRootLogin yes >> $TMPDIR/etc/ssh/sshd_config
sed -i '/UseDNS/d' $TMPDIR/etc/ssh/sshd_config
echo UseDNS no >> $TMPDIR/etc/ssh/sshd_config
echo '#!/bin/sh' > $TMPDIR/etc/rc.local
echo 'dhclient || dhcpcd' >> $TMPDIR/etc/rc.local
echo 'dhclient' >> $TMPDIR/etc/rc.local
chmod +x $TMPDIR/etc/rc.local